data
dict |
|---|
{
"proceeding": {
"id": "12OmNAY79oC",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"acronym": "icvgip",
"groupId": "1800020",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz5JBRO",
"doi": "10.1109/ICVGIP.2008.98",
"title": "Explosion Simulation Using Compressible Fluids",
"normalizedTitle": "Explosion Simulation Using Compressible Fluids",
"abstract": "We propose a novel physically based method to simulate explosions and other compressible fluid phenomena. Themethod uses compressible Navier Stokes equations for modeling the explosion with a Semi-Lagrangian integration method. The proposed integration method addresses the issues of stability and larger timesteps. This is achieved by modifying the Semi-Lagrangian method to reduce dissipation and increase accuracy, using improved interpolation and an error correction method. The proposed method allows the rendering of related phenomena like a fireball, dust and smoke clouds, and the simulation of solid interaction - like rigid fracture and rigid body simulation. Our method is flexible enough to afford substantial artistic control over the behavior of the explosion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel physically based method to simulate explosions and other compressible fluid phenomena. Themethod uses compressible Navier Stokes equations for modeling the explosion with a Semi-Lagrangian integration method. The proposed integration method addresses the issues of stability and larger timesteps. This is achieved by modifying the Semi-Lagrangian method to reduce dissipation and increase accuracy, using improved interpolation and an error correction method. The proposed method allows the rendering of related phenomena like a fireball, dust and smoke clouds, and the simulation of solid interaction - like rigid fracture and rigid body simulation. Our method is flexible enough to afford substantial artistic control over the behavior of the explosion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel physically based method to simulate explosions and other compressible fluid phenomena. Themethod uses compressible Navier Stokes equations for modeling the explosion with a Semi-Lagrangian integration method. The proposed integration method addresses the issues of stability and larger timesteps. This is achieved by modifying the Semi-Lagrangian method to reduce dissipation and increase accuracy, using improved interpolation and an error correction method. The proposed method allows the rendering of related phenomena like a fireball, dust and smoke clouds, and the simulation of solid interaction - like rigid fracture and rigid body simulation. Our method is flexible enough to afford substantial artistic control over the behavior of the explosion.",
"fno": "3476a063",
"keywords": [
"Animation",
"Computational Fluid Dynamics",
"Physically Based Animation",
"Navier Stokes",
"Fluid Simulation",
"Explosions",
"Compressible Fluids"
],
"authors": [
{
"affiliation": null,
"fullName": "Abhinav Golas",
"givenName": "Abhinav",
"surname": "Golas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Akram Khan",
"givenName": "Akram",
"surname": "Khan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Prem Kalra",
"givenName": "Prem",
"surname": "Kalra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Subodh Kumar",
"givenName": "Subodh",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvgip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "63-70",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3476-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3476a055",
"articleId": "12OmNwx3Qao",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3476a071",
"articleId": "12OmNzwZ6u8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isvri/2011/0054/0/05759668",
"title": "Real-time, directable smoke simulation",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759668/12OmNAY79cY",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcasia/1997/7901/0/79010093",
"title": "Simulation of aerodynamics problem on a distributed shared-memory machine",
"doi": null,
"abstractUrl": "/proceedings-article/hpcasia/1997/79010093/12OmNBKmXo4",
"parentPublication": {
"id": "proceedings/hpcasia/1997/7901/0",
"title": "High Performance Computing and Grid in Asia Pacific Region, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a076",
"title": "VOF Method for Fluids and Solids on Octree Structure",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a076/12OmNBkxspY",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iptc/2010/4196/0/4196a153",
"title": "Numerical Methods and Analysis for Compressible Miscible Displacement",
"doi": null,
"abstractUrl": "/proceedings-article/iptc/2010/4196a153/12OmNC0PGMu",
"parentPublication": {
"id": "proceedings/iptc/2010/4196/0",
"title": "Intelligence Information Processing and Trusted Computing, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2014/7981/0/7981a121",
"title": "A GPU-Based Method for Weakly Compressible Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981a121/12OmNqGA55c",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2012/4899/0/4899a309",
"title": "Physics Based Real-Time Explosion Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2012/4899a309/12OmNvrvj7p",
"parentPublication": {
"id": "proceedings/icdh/2012/4899/0",
"title": "4th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hipc/1996/7557/0/75570010",
"title": "Comparison of parallelization strategies for simulation of aerodynamics problem",
"doi": null,
"abstractUrl": "/proceedings-article/hipc/1996/75570010/12OmNz3bdPb",
"parentPublication": {
"id": "proceedings/hipc/1996/7557/0",
"title": "Proceedings of 3rd International Conference on High Performance Computing (HiPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2012/4899/0/4899a374",
"title": "SPH-Based Real-Time Wall-Fountain Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2012/4899a374/12OmNzuZUoG",
"parentPublication": {
"id": "proceedings/icdh/2012/4899/0",
"title": "4th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010070",
"title": "Fluid Simulation with Articulated Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010070/13rRUxDqS8f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010004",
"title": "Multiphase Flow of Immiscible Fluids on Unstructured Moving Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010004/13rRUxcbnCr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxy4MYd",
"doi": "10.1109/ICCV.2017.207",
"title": "See the Glass Half Full: Reasoning About Liquid Containers, Their Volume and Content",
"normalizedTitle": "See the Glass Half Full: Reasoning About Liquid Containers, Their Volume and Content",
"abstract": "Humans have rich understanding of liquid containers and their contents; for example, we can effortlessly pour water from a pitcher to a cup. Doing so requires estimating the volume of the cup, approximating the amount of water in the pitcher, and predicting the behavior of water when we tilt the pitcher. Very little attention in computer vision has been made to liquids and their containers. In this paper, we study liquid containers and their contents, and propose methods to estimate the volume of containers, approximate the amount of liquid in them, and perform comparative volume estimations all from a single RGB image. Furthermore, we show the results of the proposed model for predicting the behavior of liquids inside containers when one tilts the containers. We also introduce a new dataset of Containers Of liQuid contEnt (COQE) that contains more than 5,000 images of 10,000 liquid containers in context labelled with volume, amount of content, bounding box annotation, and corresponding similar 3D CAD models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Humans have rich understanding of liquid containers and their contents; for example, we can effortlessly pour water from a pitcher to a cup. Doing so requires estimating the volume of the cup, approximating the amount of water in the pitcher, and predicting the behavior of water when we tilt the pitcher. Very little attention in computer vision has been made to liquids and their containers. In this paper, we study liquid containers and their contents, and propose methods to estimate the volume of containers, approximate the amount of liquid in them, and perform comparative volume estimations all from a single RGB image. Furthermore, we show the results of the proposed model for predicting the behavior of liquids inside containers when one tilts the containers. We also introduce a new dataset of Containers Of liQuid contEnt (COQE) that contains more than 5,000 images of 10,000 liquid containers in context labelled with volume, amount of content, bounding box annotation, and corresponding similar 3D CAD models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Humans have rich understanding of liquid containers and their contents; for example, we can effortlessly pour water from a pitcher to a cup. Doing so requires estimating the volume of the cup, approximating the amount of water in the pitcher, and predicting the behavior of water when we tilt the pitcher. Very little attention in computer vision has been made to liquids and their containers. In this paper, we study liquid containers and their contents, and propose methods to estimate the volume of containers, approximate the amount of liquid in them, and perform comparative volume estimations all from a single RGB image. Furthermore, we show the results of the proposed model for predicting the behavior of liquids inside containers when one tilts the containers. We also introduce a new dataset of Containers Of liQuid contEnt (COQE) that contains more than 5,000 images of 10,000 liquid containers in context labelled with volume, amount of content, bounding box annotation, and corresponding similar 3D CAD models.",
"fno": "1032b889",
"keywords": [
"Containers",
"Liquids",
"Solid Modeling",
"Estimation",
"Three Dimensional Displays",
"Cognition"
],
"authors": [
{
"affiliation": null,
"fullName": "Roozbeh Mottaghi",
"givenName": "Roozbeh",
"surname": "Mottaghi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Connor Schenck",
"givenName": "Connor",
"surname": "Schenck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dieter Fox",
"givenName": "Dieter",
"surname": "Fox",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ali Farhadi",
"givenName": "Ali",
"surname": "Farhadi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1889-1898",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032b879",
"articleId": "12OmNym2bTM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032b899",
"articleId": "12OmNyKJiAQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdl/2002/7350/0/01022697",
"title": "Charged particles in the bulk and near the surface of a non-polar liquid dielectric",
"doi": null,
"abstractUrl": "/proceedings-article/icdl/2002/01022697/12OmNAfPIPF",
"parentPublication": {
"id": "proceedings/icdl/2002/7350/0",
"title": "Proceedings of 14th International Conference on Dielectric Liquids",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223430",
"title": "Presentation of virtual liquid by modeling vibration of a Japanese sake bottle",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223430/12OmNviHKdt",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a711",
"title": "Fill and Transfer: A Simple Physics-Based Approach for Containability Reasoning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a711/12OmNyoiZc7",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a012",
"title": "Surface Tension and Wettability Modeling for Flowing Liquids",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a012/12OmNyuya8j",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2009/3781/0/3781a271",
"title": "Delaunay Simplexes in Liquid Cyclohexane",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2009/3781a271/12OmNzXnNCd",
"parentPublication": {
"id": "proceedings/isvd/2009/3781/0",
"title": "2009 Sixth International Symposium on Voronoi Diagrams",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010017",
"title": "Mass-Conserving Eulerian Liquid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010017/13rRUB7a1fQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/01/mcg2015010046",
"title": "Simulating Drops Settling in a Still Liquid",
"doi": null,
"abstractUrl": "/magazine/cg/2015/01/mcg2015010046/13rRUwjXZMj",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a988",
"title": "Liquid Digital Twins Based on Magnetic Fluid Toys",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a988/1CJeOcQf4K4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3801",
"title": "Image Based Reconstruction of Liquids from 2D Surface Detections",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3801/1H0LsB06x7q",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/04/09582731",
"title": "A Portable and Convenient System for Unknown Liquid Identification With Smartphone Vibration",
"doi": null,
"abstractUrl": "/journal/tm/2023/04/09582731/1xR2TmxY1MI",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzn395d",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"acronym": "ectc",
"groupId": "1000248",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy3AgwI",
"doi": "10.1109/ECTC.2017.147",
"title": "Ga Liquid Metal Embrittlement for Fine Pitch Interconnect Rework",
"normalizedTitle": "Ga Liquid Metal Embrittlement for Fine Pitch Interconnect Rework",
"abstract": "Heterogeneous integration is considered to be essential to maximal exploitation of the densification, performance and system cost potential of semiconductor packaging. To ensure high yields of these multiple device systems without sacrificing packaging integrity and reliability, a rework process that can effectively and locally remove defective chips is a sought-after element. Motivated by chip removal challenges on fine pitch interconnects with low volumes of Pb-free solder and relatively high intermetallic content, a novel method of die separation that minimizes the need for high heat is proposed and validated. The approach exploits a known failure mechanism for metal systems known as Liquid Metal Embrittlement (LME) to weaken chip level interconnects and facilitate separation. Specifically, this work investigated the use of liquid Gallium (Ga) to effect SAC solder interconnect embrittlement. To study the LME effect, SAC BGA samples were exposed to liquid Ga. Subsequent shear force testing demonstrated the embrittlement phenomenon and showed a dependence on exposure time and temperature as well as surface coverage. Characterization of specimens at various stages of liquid Ga exposure proposes an embrittlement mechanism that comprises both intergranular and transgranular diffusion as exhibited by the progressive creation of multiple grain-like structures defined by the liquid Ga penetration. To enable practical application in flip chip rework processes, a number of means were explored to promote liquid Ga infiltration into the chip to substrate gap. The most effective approach, using liquid Ga micro particles dispersed and suspended in a flux solution, resulted in a solder exposure that enabled some degree of embrittlement with interface separation predominantly within the solder and efficient removal of excess liquid Ga. These results recommend further work to optimize the size and concentration of liquid Ga in the suspension in order to improve exposure conditions and accelerate embrittlement, followed by chip replacement tests to validate wetting and reliability of the newly formed interconnects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Heterogeneous integration is considered to be essential to maximal exploitation of the densification, performance and system cost potential of semiconductor packaging. To ensure high yields of these multiple device systems without sacrificing packaging integrity and reliability, a rework process that can effectively and locally remove defective chips is a sought-after element. Motivated by chip removal challenges on fine pitch interconnects with low volumes of Pb-free solder and relatively high intermetallic content, a novel method of die separation that minimizes the need for high heat is proposed and validated. The approach exploits a known failure mechanism for metal systems known as Liquid Metal Embrittlement (LME) to weaken chip level interconnects and facilitate separation. Specifically, this work investigated the use of liquid Gallium (Ga) to effect SAC solder interconnect embrittlement. To study the LME effect, SAC BGA samples were exposed to liquid Ga. Subsequent shear force testing demonstrated the embrittlement phenomenon and showed a dependence on exposure time and temperature as well as surface coverage. Characterization of specimens at various stages of liquid Ga exposure proposes an embrittlement mechanism that comprises both intergranular and transgranular diffusion as exhibited by the progressive creation of multiple grain-like structures defined by the liquid Ga penetration. To enable practical application in flip chip rework processes, a number of means were explored to promote liquid Ga infiltration into the chip to substrate gap. The most effective approach, using liquid Ga micro particles dispersed and suspended in a flux solution, resulted in a solder exposure that enabled some degree of embrittlement with interface separation predominantly within the solder and efficient removal of excess liquid Ga. These results recommend further work to optimize the size and concentration of liquid Ga in the suspension in order to improve exposure conditions and accelerate embrittlement, followed by chip replacement tests to validate wetting and reliability of the newly formed interconnects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Heterogeneous integration is considered to be essential to maximal exploitation of the densification, performance and system cost potential of semiconductor packaging. To ensure high yields of these multiple device systems without sacrificing packaging integrity and reliability, a rework process that can effectively and locally remove defective chips is a sought-after element. Motivated by chip removal challenges on fine pitch interconnects with low volumes of Pb-free solder and relatively high intermetallic content, a novel method of die separation that minimizes the need for high heat is proposed and validated. The approach exploits a known failure mechanism for metal systems known as Liquid Metal Embrittlement (LME) to weaken chip level interconnects and facilitate separation. Specifically, this work investigated the use of liquid Gallium (Ga) to effect SAC solder interconnect embrittlement. To study the LME effect, SAC BGA samples were exposed to liquid Ga. Subsequent shear force testing demonstrated the embrittlement phenomenon and showed a dependence on exposure time and temperature as well as surface coverage. Characterization of specimens at various stages of liquid Ga exposure proposes an embrittlement mechanism that comprises both intergranular and transgranular diffusion as exhibited by the progressive creation of multiple grain-like structures defined by the liquid Ga penetration. To enable practical application in flip chip rework processes, a number of means were explored to promote liquid Ga infiltration into the chip to substrate gap. The most effective approach, using liquid Ga micro particles dispersed and suspended in a flux solution, resulted in a solder exposure that enabled some degree of embrittlement with interface separation predominantly within the solder and efficient removal of excess liquid Ga. These results recommend further work to optimize the size and concentration of liquid Ga in the suspension in order to improve exposure conditions and accelerate embrittlement, followed by chip replacement tests to validate wetting and reliability of the newly formed interconnects.",
"fno": "07999893",
"keywords": [
"Ball Grid Arrays",
"Fine Pitch Technology",
"Flip Chip Devices",
"Gallium",
"Integrated Circuit Interconnections",
"Liquid Metal Embrittlement",
"Solders",
"Ga Liquid Metal Embrittlement",
"Fine Pitch Interconnect Rework",
"Heterogeneous Integration",
"Semiconductor Packaging",
"Chip Removal Challenges",
"Pb Free Solder",
"Intermetallic Content",
"Die Separation",
"Chip Level Interconnects",
"Liquid Gallium",
"SAC Solder Interconnect Embrittlement",
"SAC BGA Samples",
"Shear Force Testing",
"Intergranular Diffusion",
"Transgranular Diffusion",
"Multiple Grain Like Structures",
"Flip Chip Rework Process",
"Liquid Ga Infiltration",
"Liquid Ga Microparticles",
"Chip Replacement Tests",
"Ga",
"Liquids",
"Gallium",
"Solids",
"Intermetallic",
"Substrates",
"Surface Cracks",
"Rework",
"Pb Free Solder",
"Flip Chip",
"Heterogeneous Integration",
"2 5",
"3 0",
"Liquid Metal Embrittlement"
],
"authors": [
{
"affiliation": null,
"fullName": "E. Nguena",
"givenName": "E.",
"surname": "Nguena",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "D. Danovitch",
"givenName": "D.",
"surname": "Danovitch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Kanso",
"givenName": "M.",
"surname": "Kanso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "R. Langlois",
"givenName": "R.",
"surname": "Langlois",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ectc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1584-1591",
"year": "2017",
"issn": "2377-5726",
"isbn": "978-1-5090-6315-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07999892",
"articleId": "12OmNx7G63k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07999894",
"articleId": "12OmNzYNN26",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ectc/2017/6315/0/07999724",
"title": "Low Temperature Ni/Sn/Ni Transient Liquid Phase Bonding for High Temperature Packaging Applications by Imposing Temperature Gradient",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999724/12OmNAMtAOA",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999720",
"title": "Scaling Cu Pillars to 20um Pitch and Below: Critical Role of Surface Finish and Barrier Layers",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999720/12OmNqJHFDN",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999970",
"title": "Cu-In-Microbumps for Low-Temperature Bonding of Fine-Pitch-Interconnects",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999970/12OmNxRnvVO",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdl/2002/7350/0/01022745",
"title": "Surface conductivity in liquid-solid interface due to image force",
"doi": null,
"abstractUrl": "/proceedings-article/icdl/2002/01022745/12OmNxzMnWM",
"parentPublication": {
"id": "proceedings/icdl/2002/7350/0",
"title": "Proceedings of 14th International Conference on Dielectric Liquids",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mcsoc/2017/3441/0/3441a037",
"title": "Thermal Management in 3D Homogeneous NoC Systems Using Optimized Placement of Liquid Microchannels",
"doi": null,
"abstractUrl": "/proceedings-article/mcsoc/2017/3441a037/12OmNySG3SN",
"parentPublication": {
"id": "proceedings/mcsoc/2017/3441/0",
"title": "2017 IEEE 11th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999683",
"title": "Fine Pitch Interconnect Rework for Lead-Free Flip Chip Packages",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999683/12OmNzxyiF5",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010017",
"title": "Mass-Conserving Eulerian Liquid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010017/13rRUB7a1fQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06747389",
"title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2022/9978/0/997800a503",
"title": "Design of Frequency Reconfigurable Antenna Based on Liquid Metal Foam",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2022/997800a503/1ByeOrRaA1i",
"parentPublication": {
"id": "proceedings/icmtma/2022/9978/0",
"title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscde/2021/0142/0/014200a121",
"title": "Design of a simple and multifunctional liquid parameter measuring instrument based on MSP430 single chip microcomputer",
"doi": null,
"abstractUrl": "/proceedings-article/icscde/2021/014200a121/1xtSBmePeco",
"parentPublication": {
"id": "proceedings/icscde/2021/0142/0",
"title": "2021 International Conference of Social Computing and Digital Economy (ICSCDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyugz58",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"acronym": "cgiv",
"groupId": "1001775",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyuya8j",
"doi": "10.1109/CGiV.2016.12",
"title": "Surface Tension and Wettability Modeling for Flowing Liquids",
"normalizedTitle": "Surface Tension and Wettability Modeling for Flowing Liquids",
"abstract": "The presented simulation model of surface tension and wettability based on physical properties of liquids is designed for use in computer graphics. Due to the relatively small surface tension forces the model is useful for simulating liquid of small volume such as droplets. This model can be used in conjunction with various fluid simulation methods, one of the most popular - Marker and Cell - has been selected for this paper. The paper describes also a simple and rapid method of determining the liquid surface as a mesh of triangles. The presented method improves the final visual effect and is well suited for determining the surface of the droplets. The simulation method was applied to create realistic animations of flowing liquid droplets of different types.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The presented simulation model of surface tension and wettability based on physical properties of liquids is designed for use in computer graphics. Due to the relatively small surface tension forces the model is useful for simulating liquid of small volume such as droplets. This model can be used in conjunction with various fluid simulation methods, one of the most popular - Marker and Cell - has been selected for this paper. The paper describes also a simple and rapid method of determining the liquid surface as a mesh of triangles. The presented method improves the final visual effect and is well suited for determining the surface of the droplets. The simulation method was applied to create realistic animations of flowing liquid droplets of different types.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The presented simulation model of surface tension and wettability based on physical properties of liquids is designed for use in computer graphics. Due to the relatively small surface tension forces the model is useful for simulating liquid of small volume such as droplets. This model can be used in conjunction with various fluid simulation methods, one of the most popular - Marker and Cell - has been selected for this paper. The paper describes also a simple and rapid method of determining the liquid surface as a mesh of triangles. The presented method improves the final visual effect and is well suited for determining the surface of the droplets. The simulation method was applied to create realistic animations of flowing liquid droplets of different types.",
"fno": "0811a012",
"keywords": [
"Liquids",
"Surface Tension",
"Solids",
"Computational Modeling",
"Force",
"Solid Modeling",
"Shape",
"Natural Phenomena",
"Animation",
"Computational Fluid Dynamics",
"Liquid Simulation",
"Liquid Surface"
],
"authors": [
{
"affiliation": null,
"fullName": "Mariusz Zubrzycki",
"givenName": "Mariusz",
"surname": "Zubrzycki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jacek Raczkowski",
"givenName": "Jacek",
"surname": "Raczkowski",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgiv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "12-17",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-0811-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0811a006",
"articleId": "12OmNBOCWhd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0811a018",
"articleId": "12OmNwx3Q8F",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2015/7673/0/7673a295",
"title": "SPH-based Fluid Simulation with a New Surface Tension Formulation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a295/12OmNAS9zo4",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdl/2002/7350/0/01022697",
"title": "Charged particles in the bulk and near the surface of a non-polar liquid dielectric",
"doi": null,
"abstractUrl": "/proceedings-article/icdl/2002/01022697/12OmNAfPIPF",
"parentPublication": {
"id": "proceedings/icdl/2002/7350/0",
"title": "Proceedings of 14th International Conference on Dielectric Liquids",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450421",
"title": "A New Surface Tension Formulation for SPH",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450421/12OmNButq4h",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmens/2003/1947/0/19470197",
"title": "Equilibrium and Dynamic Behavior of Micro Flows Under Electrically Induced Surface Tension Actuation Forces",
"doi": null,
"abstractUrl": "/proceedings-article/icmens/2003/19470197/12OmNwGqBnl",
"parentPublication": {
"id": "proceedings/icmens/2003/1947/0",
"title": "MEMS, NANO, and Smart Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isatp/2003/7770/0/01217187",
"title": "Capillary and surface tension forces in the manipulation of small parts",
"doi": null,
"abstractUrl": "/proceedings-article/isatp/2003/01217187/12OmNwpXRWO",
"parentPublication": {
"id": "proceedings/isatp/2003/7770/0",
"title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acomp/2015/8234/0/8234a112",
"title": "A New Hllem-Type Riemann Solver for Compressible Multi-phase Flows with Surface Tension",
"doi": null,
"abstractUrl": "/proceedings-article/acomp/2015/8234a112/12OmNzWx02Y",
"parentPublication": {
"id": "proceedings/acomp/2015/8234/0",
"title": "2015 International Conference on Advanced Computing and Applications (ACOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/05/07414526",
"title": "A Unified Detail-Preserving Liquid Simulation by Two-Phase Lattice Boltzmann Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2017/05/07414526/13rRUwdIOUS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/05999663",
"title": "A Deformable Surface Model for Real-Time Water Drop Animation",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/05999663/13rRUyY28Ys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/10/07932108",
"title": "Pairwise Force SPH Model for Real-Time Multi-Interaction Applications",
"doi": null,
"abstractUrl": "/journal/tg/2017/10/07932108/13rRUyYjKan",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a550",
"title": "Semi-Analytical Surface Tension Model for Free Surface Flows",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a550/1CJdPv2AGJi",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCeaPZI",
"title": "2016 IEEE First International Conference on Data Science in Cyberspace (DSC)",
"acronym": "dsc",
"groupId": "1815424",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzBOhHv",
"doi": "10.1109/DSC.2016.69",
"title": "A Hybrid Modeling Method for Dynamic Liquid Simulation",
"normalizedTitle": "A Hybrid Modeling Method for Dynamic Liquid Simulation",
"abstract": "Natural phenomena simulation attracts a lot of research attention and interest in virtual reality. We introduce the main research achievements in recent years with regard of liquid, which is a common natural phenomenon. A hybrid modeling approach for dynamic liquid simulation is proposed, followed by a surface reconstruction method using simulated results. Experiment shows that satisfactory visual effect is achieved and this application can be used in computer games, movie making and virtual simulation in medical area.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Natural phenomena simulation attracts a lot of research attention and interest in virtual reality. We introduce the main research achievements in recent years with regard of liquid, which is a common natural phenomenon. A hybrid modeling approach for dynamic liquid simulation is proposed, followed by a surface reconstruction method using simulated results. Experiment shows that satisfactory visual effect is achieved and this application can be used in computer games, movie making and virtual simulation in medical area.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Natural phenomena simulation attracts a lot of research attention and interest in virtual reality. We introduce the main research achievements in recent years with regard of liquid, which is a common natural phenomenon. A hybrid modeling approach for dynamic liquid simulation is proposed, followed by a surface reconstruction method using simulated results. Experiment shows that satisfactory visual effect is achieved and this application can be used in computer games, movie making and virtual simulation in medical area.",
"fno": "1192a489",
"keywords": [
"Mathematical Model",
"Computational Modeling",
"Liquids",
"Surface Reconstruction",
"Solid Modeling",
"Numerical Models",
"Fluid Simulation",
"Particle",
"Navier Stokes Equations",
"Fluids",
"Natural Phenomena Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Ling Zou",
"givenName": "Ling",
"surname": "Zou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guoping Wang",
"givenName": "Guoping",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "dsc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "489-492",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-1192-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1192a484",
"articleId": "12OmNwp74wc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1192a493",
"articleId": "12OmNz6iOw5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ectc/2017/6315/0/07999893",
"title": "Ga Liquid Metal Embrittlement for Fine Pitch Interconnect Rework",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999893/12OmNy3AgwI",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a012",
"title": "Surface Tension and Wettability Modeling for Flowing Liquids",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a012/12OmNyuya8j",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbi/2016/3231/1/3231a104",
"title": "Towards Liquid Models: An Evolutionary Modeling Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cbi/2016/3231a104/12OmNyyO8Ju",
"parentPublication": {
"id": "proceedings/cbi/2016/3231/2",
"title": "2016 IEEE 18th Conference on Business Informatics (CBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010017",
"title": "Mass-Conserving Eulerian Liquid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010017/13rRUB7a1fQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/05/07414526",
"title": "A Unified Detail-Preserving Liquid Simulation by Two-Phase Lattice Boltzmann Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2017/05/07414526/13rRUwdIOUS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06747389",
"title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccbd/2022/5716/0/10080788",
"title": "Numerical Simulation on Drag Reduction of Micro-grooved Surface",
"doi": null,
"abstractUrl": "/proceedings-article/iccbd/2022/10080788/1LSP5NEHXq0",
"parentPublication": {
"id": "proceedings/iccbd/2022/5716/0",
"title": "2022 5th International Conference on Computing and Big Data (ICCBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089636",
"title": "Multiple-scale Simulation Method for Liquid with Trapped Air under Particle-based Framework",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089636/1jIx9StwsnK",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2020/9986/0/998600a349",
"title": "Simulation study on consequence of leakage accident of liquid chlorine storage tank",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2020/998600a349/1tweOAfoTJe",
"parentPublication": {
"id": "proceedings/aiam/2020/9986/0",
"title": "2020 2nd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09524524",
"title": "Simulating Multi-Scale, Granular Materials and Their Transitions With a Hybrid Euler-Lagrange Solver",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09524524/1wpqubOKAne",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mWa",
"title": "2009 Sixth International Symposium on Voronoi Diagrams",
"acronym": "isvd",
"groupId": "1001201",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzXnNCd",
"doi": "10.1109/ISVD.2009.10",
"title": "Delaunay Simplexes in Liquid Cyclohexane",
"normalizedTitle": "Delaunay Simplexes in Liquid Cyclohexane",
"abstract": "We study the structure of computer models of liquid cyclohexane and its noncyclic analogue 2,3-dimethylbutane. It is shown that regardless of the different chemical structure of these molecules, they both behave in liquid like spherical particles. It emerges insimilarity of pair correlation functions calculated forcenters of mass of molecules with the functions for simple liquids. An analogy with simple liquids also follows fromthe analysis of Delaunay simplexes. Structural difference between our molecular liquids is the same as betweendisordered packings of spheres with correspondingdensities. More dense systems contain more Delaunaysimplexes of shape close to the perfect tetrahedron. They associate by faces and produce clusters with morphologyalien for crystalline lattices, Phys. Rev. Lett 98, 235504 (2007). Thus the geometrical principle for the formationof non-crystalline packings is valid not only for sphericalatoms or hard spheres, where the tetrahedron is a natural geometrical element, but also for molecules of morecomplex shape, which, because of thermal motion andlack of significant orientational correlation occupy aspherical volume only on average.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We study the structure of computer models of liquid cyclohexane and its noncyclic analogue 2,3-dimethylbutane. It is shown that regardless of the different chemical structure of these molecules, they both behave in liquid like spherical particles. It emerges insimilarity of pair correlation functions calculated forcenters of mass of molecules with the functions for simple liquids. An analogy with simple liquids also follows fromthe analysis of Delaunay simplexes. Structural difference between our molecular liquids is the same as betweendisordered packings of spheres with correspondingdensities. More dense systems contain more Delaunaysimplexes of shape close to the perfect tetrahedron. They associate by faces and produce clusters with morphologyalien for crystalline lattices, Phys. Rev. Lett 98, 235504 (2007). Thus the geometrical principle for the formationof non-crystalline packings is valid not only for sphericalatoms or hard spheres, where the tetrahedron is a natural geometrical element, but also for molecules of morecomplex shape, which, because of thermal motion andlack of significant orientational correlation occupy aspherical volume only on average.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We study the structure of computer models of liquid cyclohexane and its noncyclic analogue 2,3-dimethylbutane. It is shown that regardless of the different chemical structure of these molecules, they both behave in liquid like spherical particles. It emerges insimilarity of pair correlation functions calculated forcenters of mass of molecules with the functions for simple liquids. An analogy with simple liquids also follows fromthe analysis of Delaunay simplexes. Structural difference between our molecular liquids is the same as betweendisordered packings of spheres with correspondingdensities. More dense systems contain more Delaunaysimplexes of shape close to the perfect tetrahedron. They associate by faces and produce clusters with morphologyalien for crystalline lattices, Phys. Rev. Lett 98, 235504 (2007). Thus the geometrical principle for the formationof non-crystalline packings is valid not only for sphericalatoms or hard spheres, where the tetrahedron is a natural geometrical element, but also for molecules of morecomplex shape, which, because of thermal motion andlack of significant orientational correlation occupy aspherical volume only on average.",
"fno": "3781a271",
"keywords": [
"Delaunay Simplexes",
"Structure Of Simple Liquids",
"Molecular Liquids",
"Cyclohexane"
],
"authors": [
{
"affiliation": null,
"fullName": "Alexey Anikeenko",
"givenName": "Alexey",
"surname": "Anikeenko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alexandra Kim",
"givenName": "Alexandra",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nikolai Medvedev",
"givenName": "Nikolai",
"surname": "Medvedev",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isvd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-06-01T00:00:00",
"pubType": "proceedings",
"pages": "271-277",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3781-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3781a265",
"articleId": "12OmNy7QfoW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3781a278",
"articleId": "12OmNxveNRN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/conielecomp/2008/3120/0/3120a099",
"title": "Spectrometer to Measure the Steady-State Fluorescence Emitted by Liquid and Solid Samples",
"doi": null,
"abstractUrl": "/proceedings-article/conielecomp/2008/3120a099/12OmNrHjqRe",
"parentPublication": {
"id": "proceedings/conielecomp/2008/3120/0",
"title": "Electronics, Communications, and Computers, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccrd/2010/4043/0/4043a326",
"title": "Delaunay Triangulation Based Three Dimensional Anatomical Facial Reconstruction from 2D CT Slices",
"doi": null,
"abstractUrl": "/proceedings-article/iccrd/2010/4043a326/12OmNvTk00T",
"parentPublication": {
"id": "proceedings/iccrd/2010/4043/0",
"title": "Computer Research and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2009/3781/0/3781a254",
"title": "A Single Beta-Complex Solves All Geometry Problems in a Molecule",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2009/3781a254/12OmNwKYbuu",
"parentPublication": {
"id": "proceedings/isvd/2009/3781/0",
"title": "2009 Sixth International Symposium on Voronoi Diagrams",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2007/2869/0/28690130",
"title": "The Role of Quasi-Regular Tetrahedra in Dense Disordered Packings of Hard Spheres",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2007/28690130/12OmNxeM46S",
"parentPublication": {
"id": "proceedings/isvd/2007/2869/0",
"title": "4th International Symposium on Voronoi Diagrams in Science and Engineering (ISVD 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/1995/6921/0/69210237",
"title": "Computer-aided design of crystalline drugs",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/1995/69210237/12OmNzVoBJw",
"parentPublication": {
"id": "proceedings/hicss/1995/6921/0",
"title": "28th Hawaii International Conference on System Sciences (HICSS'95)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdl/2002/7350/0/01022750",
"title": "Spectroscopic investigation of electrical discharges in liquids",
"doi": null,
"abstractUrl": "/proceedings-article/icdl/2002/01022750/12OmNzmLxSE",
"parentPublication": {
"id": "proceedings/icdl/2002/7350/0",
"title": "Proceedings of 14th International Conference on Dielectric Liquids",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/01/mcg2015010046",
"title": "Simulating Drops Settling in a Still Liquid",
"doi": null,
"abstractUrl": "/magazine/cg/2015/01/mcg2015010046/13rRUwjXZMj",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvd/2006/2630/0/04124815",
"title": "Critical densities in hard sphere packings. Delaunay simplex analysis.",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2006/04124815/17D45XH89pT",
"parentPublication": {
"id": "proceedings/isvd/2006/2630/0",
"title": "2006 3rd International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2022/5963/0/596300a053",
"title": "FLUID: Formulated LiqUid Instability Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2022/596300a053/1JvaK0pPmlq",
"parentPublication": {
"id": "proceedings/bigmm/2022/5963/0",
"title": "2022 IEEE Eighth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2023/04/09582731",
"title": "A Portable and Convenient System for Unknown Liquid Identification With Smartphone Vibration",
"doi": null,
"abstractUrl": "/journal/tm/2023/04/09582731/1xR2TmxY1MI",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCcKQCl",
"doi": "10.1109/ICCV.2015.247",
"title": "3D Fragment Reassembly Using Integrated Template Guidance and Fracture-Region Matching",
"normalizedTitle": "3D Fragment Reassembly Using Integrated Template Guidance and Fracture-Region Matching",
"abstract": "This paper studies matching of fragmented objects to recompose their original geometry. Solving this geometric reassembly problem has direct applications in archaeology and forensic investigation in the computer-aided restoration of damaged artifacts and evidence. We develop a new algorithm to effectively integrate both guidance from a template and from matching of adjacent pieces' fracture-regions. First, we compute partial matchings between fragments and a template, and pairwise matchings among fragments. Many potential matches are obtained and then selected/refined in a multi-piece matching stage to maximize global groupwise matching consistency. This pipeline is effective in composing fragmented thin-shell objects containing small pieces, whose pairwise matching is usually unreliable and ambiguous and hence their reassembly remains challenging to the existing algorithms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper studies matching of fragmented objects to recompose their original geometry. Solving this geometric reassembly problem has direct applications in archaeology and forensic investigation in the computer-aided restoration of damaged artifacts and evidence. We develop a new algorithm to effectively integrate both guidance from a template and from matching of adjacent pieces' fracture-regions. First, we compute partial matchings between fragments and a template, and pairwise matchings among fragments. Many potential matches are obtained and then selected/refined in a multi-piece matching stage to maximize global groupwise matching consistency. This pipeline is effective in composing fragmented thin-shell objects containing small pieces, whose pairwise matching is usually unreliable and ambiguous and hence their reassembly remains challenging to the existing algorithms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper studies matching of fragmented objects to recompose their original geometry. Solving this geometric reassembly problem has direct applications in archaeology and forensic investigation in the computer-aided restoration of damaged artifacts and evidence. We develop a new algorithm to effectively integrate both guidance from a template and from matching of adjacent pieces' fracture-regions. First, we compute partial matchings between fragments and a template, and pairwise matchings among fragments. Many potential matches are obtained and then selected/refined in a multi-piece matching stage to maximize global groupwise matching consistency. This pipeline is effective in composing fragmented thin-shell objects containing small pieces, whose pairwise matching is usually unreliable and ambiguous and hence their reassembly remains challenging to the existing algorithms.",
"fno": "8391c138",
"keywords": [
"Feature Extraction",
"Three Dimensional Displays",
"Shape",
"Geometry",
"Forensics",
"Pipelines",
"Surface Cracks"
],
"authors": [
{
"affiliation": null,
"fullName": "Kang Zhang",
"givenName": "Kang",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wuyi Yu",
"givenName": "Wuyi",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mary Manhein",
"givenName": "Mary",
"surname": "Manhein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Warren Waggenspack",
"givenName": "Warren",
"surname": "Waggenspack",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xin Li",
"givenName": "Xin",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2138-2146",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391c129",
"articleId": "12OmNyOHFZQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391c147",
"articleId": "12OmNAlvHzS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccet/2009/3521/1/3521a495",
"title": "A Fast Reassembly Methodology for Polygon Fragment",
"doi": null,
"abstractUrl": "/proceedings-article/iccet/2009/3521a495/12OmNAXPykE",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2007/2794/0/27940014",
"title": "Feature-based Part Retrieval for Interactive 3D Reassembly",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2007/27940014/12OmNC17hUs",
"parentPublication": {
"id": "proceedings/wacv/2007/2794/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2009/3804/4/3804e485",
"title": "An Approach for Polygon Fragment Reassembly Based on Multiple Features",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804e485/12OmNC3FGhg",
"parentPublication": {
"id": "proceedings/icicta/2009/3804/4",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2003/1900/1/04624516",
"title": "Fast Fragment Assemblage Using Boundary Line and Surface Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2003/04624516/12OmNvlPkvQ",
"parentPublication": {
"id": "proceedings/cvprw/2003/1900/1",
"title": "2003 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761648",
"title": "3D image analysis for evaluating internal deformation/fracture characteristics of materials",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761648/12OmNyTOssx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543532",
"title": "HINDSITE: A user-interactive framework for fragment assembly",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543532/12OmNzRZq1F",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2014/4143/3/4143c344",
"title": "Content-Based Feature Matching for Fragment Reassembly of Ceramic Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2014/4143c344/12OmNznkK64",
"parentPublication": {
"id": "proceedings/wi-iat/2014/4143/3",
"title": "2014 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent Agent Technologies (IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457502",
"title": "Virtual 3D bone fracture reconstruction via inter-fragmentary surface alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457502/12OmNzvQHMO",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200f423",
"title": "Structure-from-Sherds: Incremental 3D Reassembly of Axially Symmetric Pots from Unordered and Mixed Fragment Collections",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200f423/1BmJIOiqxNe",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412372",
"title": "3D Pots Configuration System by Optimizing over Geometric Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412372/1tmhOBeNpbW",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqAU6sQ",
"title": "Computer Research and Development, International Conference on",
"acronym": "iccrd",
"groupId": "1800063",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvEhfYT",
"doi": "10.1109/ICCRD.2010.13",
"title": "Fast Traversal Algorithm for Detecting Object Interference Using Hierarchical Representation between Rigid Bodies",
"normalizedTitle": "Fast Traversal Algorithm for Detecting Object Interference Using Hierarchical Representation between Rigid Bodies",
"abstract": "Searching for fast and efficient algorithm to perform collision detection between static and moving objects is always fundamental problems in virtual environment. Most of previous method seems trying to tackle the problems of involving specific geometric models colliding pairs with restricted rules and guidelines. For example, convex hull bounding-volume tends to solve the collision detection problems by make the collision more accurate. However, its limitation of performing fast collision detection method must be left behind. In this paper, we introduce new traversal scheme using depth first search algorithm called earlier node detection algorithm. This algorithm automatically performs simultaneously for child nodes intersection compared to root intersection test. Result shows that our algorithm works perfectly for rigid models when performing collision detection. In practice, the new traversal algorithms helps improve the situation of determine two or more collision between rigid models especially in urban simulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Searching for fast and efficient algorithm to perform collision detection between static and moving objects is always fundamental problems in virtual environment. Most of previous method seems trying to tackle the problems of involving specific geometric models colliding pairs with restricted rules and guidelines. For example, convex hull bounding-volume tends to solve the collision detection problems by make the collision more accurate. However, its limitation of performing fast collision detection method must be left behind. In this paper, we introduce new traversal scheme using depth first search algorithm called earlier node detection algorithm. This algorithm automatically performs simultaneously for child nodes intersection compared to root intersection test. Result shows that our algorithm works perfectly for rigid models when performing collision detection. In practice, the new traversal algorithms helps improve the situation of determine two or more collision between rigid models especially in urban simulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Searching for fast and efficient algorithm to perform collision detection between static and moving objects is always fundamental problems in virtual environment. Most of previous method seems trying to tackle the problems of involving specific geometric models colliding pairs with restricted rules and guidelines. For example, convex hull bounding-volume tends to solve the collision detection problems by make the collision more accurate. However, its limitation of performing fast collision detection method must be left behind. In this paper, we introduce new traversal scheme using depth first search algorithm called earlier node detection algorithm. This algorithm automatically performs simultaneously for child nodes intersection compared to root intersection test. Result shows that our algorithm works perfectly for rigid models when performing collision detection. In practice, the new traversal algorithms helps improve the situation of determine two or more collision between rigid models especially in urban simulation.",
"fno": "4043a007",
"keywords": [
"Bounding Volume",
"Bounding Volume Hierarchies",
"Collision Detection",
"Urban Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Hamzah Asyrani Sulaiman",
"givenName": "Hamzah Asyrani",
"surname": "Sulaiman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Abdullah Bade",
"givenName": "Abdullah",
"surname": "Bade",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Norhaida Mohd Suaib",
"givenName": "Norhaida Mohd",
"surname": "Suaib",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccrd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-05-01T00:00:00",
"pubType": "proceedings",
"pages": "7-11",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4043-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4043a003",
"articleId": "12OmNzAohPP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4043a012",
"articleId": "12OmNz5s0MO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icecs/2009/3937/0/3937a436",
"title": "Bounding-Volume Hierarchies Technique for Detecting Object Interference in Urban Environment Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/icecs/2009/3937a436/12OmNAndiev",
"parentPublication": {
"id": "proceedings/icecs/2009/3937/0",
"title": "Environmental and Computer Science, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/2/3962c853",
"title": "Optimization of Collision Detection Algorithm Based on OBB",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962c853/12OmNCwCLou",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/2",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/3/4647c547",
"title": "The Algorithm of Fast Collision Detection Based on Hybrid Bounding Box",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c547/12OmNro0HX1",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2010/4297/0/4297a109",
"title": "An Algorithm of Collision Detection Based on Hybrid Model",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2010/4297a109/12OmNwbLVqf",
"parentPublication": {
"id": "proceedings/cis/2010/4297/0",
"title": "2010 International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifcsta/2009/3930/3/3930c410",
"title": "A Collision Detection Method Based on the Virtual Occluders",
"doi": null,
"abstractUrl": "/proceedings-article/ifcsta/2009/3930c410/12OmNzVGcNi",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/3/4647c538",
"title": "The Collision Detection Algorithm in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c538/12OmNzWx0b7",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/achi/2010/3957/0/3957a107",
"title": "Detecting Self-Collisions Using a Hybrid Bounding Volume Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/achi/2010/3957a107/12OmNzyGH6A",
"parentPublication": {
"id": "proceedings/achi/2010/3957/0",
"title": "International Conference on Advances in Computer-Human Interaction",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctd/2009/3892/2/3892b430",
"title": "On Faster Bounding Volume Hierarchy Construction for Avatar Collision Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icctd/2009/3892b430/12OmNzyp63p",
"parentPublication": {
"id": "proceedings/icctd/2009/3892/2",
"title": "Computer Technology and Development, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/01/v0021",
"title": "Efficient Collision Detection Using Bounding Volume Hierarchies of k-DOPs",
"doi": null,
"abstractUrl": "/journal/tg/1998/01/v0021/13rRUNvgyW9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/04/ttg2009040544",
"title": "ICCD: Interactive Continuous Collision Detection between Deformable Models Using Connectivity-Based Culling",
"doi": null,
"abstractUrl": "/journal/tg/2009/04/ttg2009040544/13rRUyuegp3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxzMnU4",
"title": "2017 Nicograph International (NicoInt)",
"acronym": "nicoint",
"groupId": "1814784",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwCJOQ1",
"doi": "10.1109/NICOInt.2017.22",
"title": "A Study of Assembly Navigation Operation with 2-D Panel for Restoring Fractured Objects",
"normalizedTitle": "A Study of Assembly Navigation Operation with 2-D Panel for Restoring Fractured Objects",
"abstract": "In this paper, we propose an assembly navigation operation using a 2-D panel that takes into account flow patterns and outline information. The aim is to realize an efficient search for adjacent fragments using a fracture restoration interface. In this method, a point cloud representing the surface of the earthenware fragments is measured first by a 3-D measurement device. Then, the obtained data is analyzed to detect the feature points of outlines. The feature points are set to the end points of an outline and the outline is divided to obtain division lines. In addition, a flow pattern is extracted from the measured points and is approximated by straight lines. Finally, the point cloud data, extracted pattern flow, and outline information are input to the interface. Using the input dataset, a navigation function is employed to find adjacent candidates for the fragments. We implemented this system and examined its operation and usefulness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose an assembly navigation operation using a 2-D panel that takes into account flow patterns and outline information. The aim is to realize an efficient search for adjacent fragments using a fracture restoration interface. In this method, a point cloud representing the surface of the earthenware fragments is measured first by a 3-D measurement device. Then, the obtained data is analyzed to detect the feature points of outlines. The feature points are set to the end points of an outline and the outline is divided to obtain division lines. In addition, a flow pattern is extracted from the measured points and is approximated by straight lines. Finally, the point cloud data, extracted pattern flow, and outline information are input to the interface. Using the input dataset, a navigation function is employed to find adjacent candidates for the fragments. We implemented this system and examined its operation and usefulness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose an assembly navigation operation using a 2-D panel that takes into account flow patterns and outline information. The aim is to realize an efficient search for adjacent fragments using a fracture restoration interface. In this method, a point cloud representing the surface of the earthenware fragments is measured first by a 3-D measurement device. Then, the obtained data is analyzed to detect the feature points of outlines. The feature points are set to the end points of an outline and the outline is divided to obtain division lines. In addition, a flow pattern is extracted from the measured points and is approximated by straight lines. Finally, the point cloud data, extracted pattern flow, and outline information are input to the interface. Using the input dataset, a navigation function is employed to find adjacent candidates for the fragments. We implemented this system and examined its operation and usefulness.",
"fno": "5332a057",
"keywords": [
"Three Dimensional Displays",
"Navigation",
"Feature Extraction",
"Data Visualization",
"Data Mining",
"Pattern Matching",
"Surface Cracks",
"Interface",
"2 D Panel",
"Feature Value",
"Assembly Navigation Operation"
],
"authors": [
{
"affiliation": null,
"fullName": "Chunyuan Li",
"givenName": "Chunyuan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Katsutsugu Matsuyama",
"givenName": "Katsutsugu",
"surname": "Matsuyama",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kouichi Konno",
"givenName": "Kouichi",
"surname": "Konno",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "nicoint",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-06-01T00:00:00",
"pubType": "proceedings",
"pages": "57-60",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-5332-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5332a053",
"articleId": "12OmNxwENlS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5332a061",
"articleId": "12OmNB8TU9V",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391c138",
"title": "3D Fragment Reassembly Using Integrated Template Guidance and Fracture-Region Matching",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c138/12OmNCcKQCl",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/1/3583a845",
"title": "Research of Operation Method of Automation Control Model in a Single Ice-Covered Canal",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583a845/12OmNrAMEMN",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/3",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2014/2696/0/06963004",
"title": "Virtual merging of fractured fragments based on constraint cluster",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2014/06963004/12OmNwErpBv",
"parentPublication": {
"id": "proceedings/icccnt/2014/2696/0",
"title": "2014 5th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/citworkshops/2008/3242/0/3242a342",
"title": "Quadric Polynomial Interpolation Based on Minimum Local Stretching Energy",
"doi": null,
"abstractUrl": "/proceedings-article/citworkshops/2008/3242a342/12OmNx8OuuI",
"parentPublication": {
"id": "proceedings/citworkshops/2008/3242/0",
"title": "Computer and Information Technology, IEEE 8th International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2003/1900/1/04624519",
"title": "Accurately Estimating Sherd 3D Surface Geometry with Application to Pot Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2003/04624519/12OmNzVXNPd",
"parentPublication": {
"id": "proceedings/cvprw/2003/1900/1",
"title": "2003 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/11/ttg2012111969",
"title": "3D Scatterplot Navigation",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111969/13rRUB6Sq0y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010030",
"title": "Fast Collision Detection for Fracturing Rigid Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010030/13rRUxNW1TU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/04/ttp2010040662",
"title": "Ricci Flow for 3D Shape Analysis",
"doi": null,
"abstractUrl": "/journal/tp/2010/04/ttp2010040662/13rRUyv53Gz",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545439",
"title": "A Fast Local Analysis by Thresholding applied to image matching",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545439/17D45WXIkG2",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2020/1054/0/09191582",
"title": "Multimodal Information Integration for Indoor Navigation Using a Smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2020/09191582/1n0IyhMjgFa",
"parentPublication": {
"id": "proceedings/iri/2020/1054/0",
"title": "2020 IEEE 21st International Conference on Information Reuse and Integration for Data Science (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBSBk6z",
"title": "2014 5th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"acronym": "icccnt",
"groupId": "1802177",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwErpBv",
"doi": "10.1109/ICCCNT.2014.6963004",
"title": "Virtual merging of fractured fragments based on constraint cluster",
"normalizedTitle": "Virtual merging of fractured fragments based on constraint cluster",
"abstract": "This paper proposes a fracture surface matching algorithm based on integral invariants. We compute points' volume descriptor on multi-scale, search similar feature points to build matching constraint clusters. Extracting constrained cluster feature and giving a representation, according to convex and concave correspondence of cluster surface, it obtains initial matching cluster pairs. We use spatial geometric consistency constraint's vote method and search the maximum independent set to prune non-matching pairs, so this coarse registration becomes an optimization problem. At last two fragments can be precisely aligned by ICPIF method based on the result of coarse matching. Experimental results show that the algorithm can achieve better matching and reassemble.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a fracture surface matching algorithm based on integral invariants. We compute points' volume descriptor on multi-scale, search similar feature points to build matching constraint clusters. Extracting constrained cluster feature and giving a representation, according to convex and concave correspondence of cluster surface, it obtains initial matching cluster pairs. We use spatial geometric consistency constraint's vote method and search the maximum independent set to prune non-matching pairs, so this coarse registration becomes an optimization problem. At last two fragments can be precisely aligned by ICPIF method based on the result of coarse matching. Experimental results show that the algorithm can achieve better matching and reassemble.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a fracture surface matching algorithm based on integral invariants. We compute points' volume descriptor on multi-scale, search similar feature points to build matching constraint clusters. Extracting constrained cluster feature and giving a representation, according to convex and concave correspondence of cluster surface, it obtains initial matching cluster pairs. We use spatial geometric consistency constraint's vote method and search the maximum independent set to prune non-matching pairs, so this coarse registration becomes an optimization problem. At last two fragments can be precisely aligned by ICPIF method based on the result of coarse matching. Experimental results show that the algorithm can achieve better matching and reassemble.",
"fno": "06963004",
"keywords": [
"Surface Cracks",
"Feature Extraction",
"Surface Reconstruction",
"Clustering Algorithms",
"Electronic Mail",
"Computer Vision",
"Search Problems",
"MIS",
"Integral Invariants",
"Multi Scale",
"Constraint Cluster",
"Geometric Consistency"
],
"authors": [
{
"affiliation": "Department of Information Science and Technology, the Northwest University, Xi'an, China",
"fullName": "Li Jijunnan",
"givenName": "Li",
"surname": "Jijunnan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Science and Technology, the Northwest University, Xi'an, China",
"fullName": "Geng Guohua",
"givenName": "Geng",
"surname": "Guohua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Information Science and Technology, the Northwest University, Xi'an, China",
"fullName": "Liu Lunchun",
"givenName": "Liu",
"surname": "Lunchun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icccnt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-2696-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06963003",
"articleId": "12OmNx4gUmu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06963005",
"articleId": "12OmNAQanwV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1988/0862/0/00196231",
"title": "Local constraint integration in a connectionist model of stereo vision",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196231/12OmNC4eSmg",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2007/2786/0/04228542",
"title": "Dense Stereo Range Sensing with Marching Pseudo-Random Patterns",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2007/04228542/12OmNrHSD0U",
"parentPublication": {
"id": "proceedings/crv/2007/2786/0",
"title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2010/5942/0/05480532",
"title": "Detection of Image Fragments Related by Affine Transforms: Matching Triangles and Ellipses",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2010/05480532/12OmNrIrPfH",
"parentPublication": {
"id": "proceedings/icisa/2010/5942/0",
"title": "2010 International Conference on Information Science and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/3/01047853",
"title": "Bayesian pot-assembly from fragments as problems in perceptual-grouping and geometric-learning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/01047853/12OmNwCsdJL",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460716",
"title": "Robust and accurate multi-view reconstruction by prioritized matching",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460716/12OmNwsNRgD",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2010/4047/2/4047b003",
"title": "Image Retrieval Based on an Optimum Matched Cluster-pairs Set",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/4047b003/12OmNyaXPPd",
"parentPublication": {
"id": "proceedings/icic/2010/4047/1",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2012/4797/0/4797a172",
"title": "CLUMOC: Multiple Motion Estimation by Cluster Motion Consensus",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2012/4797a172/12OmNzBOhOK",
"parentPublication": {
"id": "proceedings/avss/2012/4797/0",
"title": "2012 IEEE Ninth International Conference on Advanced Video and Signal-Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acsc/2001/0963/0/09630054",
"title": "A Cluster-based Geophysical Template Matching System",
"doi": null,
"abstractUrl": "/proceedings-article/acsc/2001/09630054/12OmNzTH0GR",
"parentPublication": {
"id": "proceedings/acsc/2001/0963/0",
"title": "Australasian Computer Science Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a577",
"title": "Cluster-Pairwise Discriminant Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a577/12OmNzYNN7j",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457502",
"title": "Virtual 3D bone fracture reconstruction via inter-fragmentary surface alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457502/12OmNzvQHMO",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy9Prj1",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwK7obr",
"doi": "10.1109/ICCVW.2017.346",
"title": "Geometry Based Faceting of 3D Digitized Archaeological Fragments",
"normalizedTitle": "Geometry Based Faceting of 3D Digitized Archaeological Fragments",
"abstract": "We present a robust pipeline for segmenting digital cultural heritage fragments into distinct facets, with few tunable yet archaeologically meaningful parameters. Given a terracotta broken artifact, digitally scanned in the form of irregularly sampled 3D mesh, our method first estimates the local angles of fractures by applying weighted eigenanalysis of the local neighborhoods. Using 3D fit of a quadratic polynomial, we estimate the directional derivative of the angle function along the maximum bending direction for accurate localization of the fracture lines across the mesh. Then, the salient fracture lines are detected and incidental possible gaps between them are closed in order to extract a set of closed facets. Finally, the facets are categorized into fracture and skin. The method is tested on two different datasets of the GRAVITATE project.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a robust pipeline for segmenting digital cultural heritage fragments into distinct facets, with few tunable yet archaeologically meaningful parameters. Given a terracotta broken artifact, digitally scanned in the form of irregularly sampled 3D mesh, our method first estimates the local angles of fractures by applying weighted eigenanalysis of the local neighborhoods. Using 3D fit of a quadratic polynomial, we estimate the directional derivative of the angle function along the maximum bending direction for accurate localization of the fracture lines across the mesh. Then, the salient fracture lines are detected and incidental possible gaps between them are closed in order to extract a set of closed facets. Finally, the facets are categorized into fracture and skin. The method is tested on two different datasets of the GRAVITATE project.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a robust pipeline for segmenting digital cultural heritage fragments into distinct facets, with few tunable yet archaeologically meaningful parameters. Given a terracotta broken artifact, digitally scanned in the form of irregularly sampled 3D mesh, our method first estimates the local angles of fractures by applying weighted eigenanalysis of the local neighborhoods. Using 3D fit of a quadratic polynomial, we estimate the directional derivative of the angle function along the maximum bending direction for accurate localization of the fracture lines across the mesh. Then, the salient fracture lines are detected and incidental possible gaps between them are closed in order to extract a set of closed facets. Finally, the facets are categorized into fracture and skin. The method is tested on two different datasets of the GRAVITATE project.",
"fno": "1034c934",
"keywords": [
"Three Dimensional Displays",
"Surface Cracks",
"Face",
"Pipelines",
"Skin",
"Cultural Differences",
"Covariance Matrices"
],
"authors": [
{
"affiliation": null,
"fullName": "Hanan ElNaghy",
"givenName": "Hanan",
"surname": "ElNaghy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Leo Dorst",
"givenName": "Leo",
"surname": "Dorst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2934-2942",
"year": "2017",
"issn": "2473-9944",
"isbn": "978-1-5386-1034-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1034c925",
"articleId": "12OmNwtEEFm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1034c943",
"articleId": "12OmNyL0Tpq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2009/4442/0/05457607",
"title": "3D line drawing for archaeological illustration",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457607/12OmNrNh0I5",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798294",
"title": "Archaeological Excavation Simulation for Interaction in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798294/1cJ0TNjjJp6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNs0kyru",
"title": "2007 IEEE Virtual Reality Conference",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQYttV",
"doi": "10.1109/VR.2007.352467",
"title": "Balanced Hierarchies for Collision Detection between Fracturing Objects",
"normalizedTitle": "Balanced Hierarchies for Collision Detection between Fracturing Objects",
"abstract": "The simulation of fracture leads to collision-intensive situations that call for efficient collision detection algorithms and data structures. Bounding volume hierarchies (BVHs) are a popular approach for accelerating collision detection, but they rarely see application in fracture simulations, due to the dynamic creation and deletion of geometric primitives. We propose the use of balanced trees for storing BVHs, as well as novel algorithms for dynamically restructuring them in the presence of progressive or instantaneous fracture. By paying a small loss of fitting quality compared with complete reconstruction, we achieve more than one order of magnitude speedup in the update of BVHs",
"abstracts": [
{
"abstractType": "Regular",
"content": "The simulation of fracture leads to collision-intensive situations that call for efficient collision detection algorithms and data structures. Bounding volume hierarchies (BVHs) are a popular approach for accelerating collision detection, but they rarely see application in fracture simulations, due to the dynamic creation and deletion of geometric primitives. We propose the use of balanced trees for storing BVHs, as well as novel algorithms for dynamically restructuring them in the presence of progressive or instantaneous fracture. By paying a small loss of fitting quality compared with complete reconstruction, we achieve more than one order of magnitude speedup in the update of BVHs",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The simulation of fracture leads to collision-intensive situations that call for efficient collision detection algorithms and data structures. Bounding volume hierarchies (BVHs) are a popular approach for accelerating collision detection, but they rarely see application in fracture simulations, due to the dynamic creation and deletion of geometric primitives. We propose the use of balanced trees for storing BVHs, as well as novel algorithms for dynamically restructuring them in the presence of progressive or instantaneous fracture. By paying a small loss of fitting quality compared with complete reconstruction, we achieve more than one order of magnitude speedup in the update of BVHs",
"fno": "04161009",
"keywords": [
"Computational Geometry",
"Path Planning",
"Balanced Hierarchies",
"Collision Detection",
"Fracturing Objects",
"Collision Intensive Situations",
"Data Structures",
"Bounding Volume Hierarchies",
"Instantaneous Fracture",
"Computational Geometry",
"Object Modeling",
"Object Detection",
"Data Structures",
"Costs",
"Computer Graphics",
"Acceleration",
"Computational Modeling",
"Solid Modeling",
"Heuristic Algorithms",
"Computational Geometry",
"Surface Cracks",
"Collision Detection",
"AVL Trees",
"Fracture",
"I 3 5 Computer Graphics Computational Geometry And Object Modeling Object Hierarchies"
],
"authors": [
{
"affiliation": "Computer Graphics Laboratory, ETH Zurich, e-mail: otaduy@inf.ethz.ch",
"fullName": "Miguel A. Otaduy",
"givenName": "Miguel A.",
"surname": "Otaduy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Graphics Laboratory, ETH Zurich, e-mail: olivier.chassot@alumni.ethz.ch",
"fullName": "Olivier Chassot",
"givenName": "Olivier",
"surname": "Chassot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Graphics Laboratory, ETH Zurich, e-mail: deniss@inf.ethz.ch",
"fullName": "Denis Steinemann",
"givenName": "Denis",
"surname": "Steinemann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Graphics Laboratory, ETH Zurich, e-mail: grossm@inf.ethz.ch",
"fullName": "Markus Gross",
"givenName": "Markus",
"surname": "Gross",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "83-90",
"year": "2007",
"issn": "1087-8270",
"isbn": "1-4244-0905-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04160993",
"articleId": "12OmNBZYTov",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04160994",
"articleId": "12OmNwHyZTc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icicse/2012/4705/0/4705a042",
"title": "An Efficiency Collision Detection Algorithm for Rigid Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icicse/2012/4705a042/12OmNvwC5uA",
"parentPublication": {
"id": "proceedings/icicse/2012/4705/0",
"title": "2012 Sixth International Conference on Internet Computing for Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2007/1629/0/04342593",
"title": "Early Split Clipping for Bounding Volume Hierarchies",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2007/04342593/12OmNvxKu0c",
"parentPublication": {
"id": "proceedings/rt/2007/1629/0",
"title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450272",
"title": "An Adaptive Spherical Collision Detection and Resolution Method for Deformable Object Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450272/12OmNwqft4m",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2015/7983/0/07367668",
"title": "Customized anatomically adjusted plate for fixation of mandible internal fractures",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2015/07367668/12OmNxRF70T",
"parentPublication": {
"id": "proceedings/bibe/2015/7983/0",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220063",
"title": "Fast distance computation for on-line collision detection with multi-arm robots",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220063/12OmNz61d1V",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/08/08419292",
"title": "Real-Time Collision Detection for Deformable Characters with Radial Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/08/08419292/13rRUwInvsZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0370",
"title": "Fracturing Rigid Materials",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0370/13rRUwfZC07",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010030",
"title": "Fast Collision Detection for Fracturing Rigid Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010030/13rRUxNW1TU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/02/ttg2009020311",
"title": "Continuous Collision Detection for Ellipsoids",
"doi": null,
"abstractUrl": "/journal/tg/2009/02/ttg2009020311/13rRUxlgxOe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08219711",
"title": "Chunked Bounding Volume Hierarchies for Fast Digital Prototyping Using Volumetric Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08219711/14H4WN3R0By",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxwWorE",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzvQHMO",
"doi": "10.1109/ICCVW.2009.5457502",
"title": "Virtual 3D bone fracture reconstruction via inter-fragmentary surface alignment",
"normalizedTitle": "Virtual 3D bone fracture reconstruction via inter-fragmentary surface alignment",
"abstract": "This paper presents a system for virtual reconstruction of comminuted bone fractures. The system takes as input a collection of bone fragment models represented as surface meshes, typically segmented from CT data. Users interact with fragment models in a virtual environment to reconstruct the fracture. In contrast to other approaches that are either completely automatic or completely interactive, the system attempts to strike a balance between interaction and automation. There are two key fracture reconstruction interactions: (1) specifying matching surface regions between fragment pairs and (2) initiating pairwise and global fragment alignment optimizations. Each match includes two fragment surface patches hypothesized to correspond in the reconstruction. Each alignment optimization initialized by the user triggers a 3D surface registration which takes as input: (1) the specified matches and (2) the current position of the fragments. The proposed system leverages domain knowledge via user interaction, and incorporates recent advancements in surface registration, to generate fragment reconstructions that are more accurate than manual methods and more reliable than completely automatic methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a system for virtual reconstruction of comminuted bone fractures. The system takes as input a collection of bone fragment models represented as surface meshes, typically segmented from CT data. Users interact with fragment models in a virtual environment to reconstruct the fracture. In contrast to other approaches that are either completely automatic or completely interactive, the system attempts to strike a balance between interaction and automation. There are two key fracture reconstruction interactions: (1) specifying matching surface regions between fragment pairs and (2) initiating pairwise and global fragment alignment optimizations. Each match includes two fragment surface patches hypothesized to correspond in the reconstruction. Each alignment optimization initialized by the user triggers a 3D surface registration which takes as input: (1) the specified matches and (2) the current position of the fragments. The proposed system leverages domain knowledge via user interaction, and incorporates recent advancements in surface registration, to generate fragment reconstructions that are more accurate than manual methods and more reliable than completely automatic methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a system for virtual reconstruction of comminuted bone fractures. The system takes as input a collection of bone fragment models represented as surface meshes, typically segmented from CT data. Users interact with fragment models in a virtual environment to reconstruct the fracture. In contrast to other approaches that are either completely automatic or completely interactive, the system attempts to strike a balance between interaction and automation. There are two key fracture reconstruction interactions: (1) specifying matching surface regions between fragment pairs and (2) initiating pairwise and global fragment alignment optimizations. Each match includes two fragment surface patches hypothesized to correspond in the reconstruction. Each alignment optimization initialized by the user triggers a 3D surface registration which takes as input: (1) the specified matches and (2) the current position of the fragments. The proposed system leverages domain knowledge via user interaction, and incorporates recent advancements in surface registration, to generate fragment reconstructions that are more accurate than manual methods and more reliable than completely automatic methods.",
"fno": "05457502",
"keywords": [
"Bone",
"Computational Geometry",
"Medical Computing",
"Orthopaedics",
"Virtual Reality",
"Virtual 3 D Bone Fracture Reconstruction",
"Interfragmentary Surface Alignment",
"Virtual Reconstruction",
"Comminuted Bone Fractures",
"Bone Fragment Model",
"Surface Meshes",
"Virtual Environment",
"Surface Regions Matching",
"Pairwise Fragment Alignment Optimization",
"Global Fragment Alignment Optimization",
"3 D Surface Registration",
"Fragment Reconstruction",
"Bones",
"Surface Cracks",
"Surface Reconstruction",
"Surgery",
"Virtual Environment",
"Surges",
"Biological Tissues",
"Surface Treatment",
"Conferences",
"Cities And Towns"
],
"authors": [
{
"affiliation": "University of North Carolina at Charlotte, 28223, USA",
"fullName": "Beibei Zhou",
"givenName": null,
"surname": "Beibei Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Charlotte, 28223, USA",
"fullName": "Andrew Willis",
"givenName": "Andrew",
"surname": "Willis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Charlotte, 28223, USA",
"fullName": "Yunfeng Sui",
"givenName": null,
"surname": "Yunfeng Sui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Iowa, 2181 Westlawn Building, 52242, USA",
"fullName": "Donald D. Anderson",
"givenName": "Donald D.",
"surname": "Anderson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Iowa, 2181 Westlawn Building, 52242, USA",
"fullName": "Thomas D. Brown",
"givenName": "Thomas D.",
"surname": "Brown",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Iowa, 2181 Westlawn Building, 52242, USA",
"fullName": "Thaddeus P. Thomas",
"givenName": "Thaddeus P.",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1809-1816",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-4442-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05457501",
"articleId": "12OmNya72qU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05457503",
"articleId": "12OmNrK9q0D",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icccnt/2013/3926/0/06726590",
"title": "Quantitative analysis and fracture detection of pelvic bone X-ray images",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726590/12OmNAFnCwS",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a196",
"title": "Trabecular Bone Radiograph Characterization Using Lacunarity Measurement",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a196/12OmNAkWvnp",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/3/3583c214",
"title": "Genetic Algorithm Based Feature Selection for Fracture Surface Images Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583c214/12OmNqJHFw9",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2012/9999/0/06395889",
"title": "Multiple classification system for fracture detection in human bone x-ray images",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2012/06395889/12OmNqzu6Pq",
"parentPublication": {
"id": "proceedings/icccnt/2012/9999/0",
"title": "2012 Third International Conference on Computing, Communication and Networking Technologies (ICCCNT 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2015/8302/0/8302a154",
"title": "Research Hotspots Analysis of Advanced Aged Patients Fracture Treatment by PubMed",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2015/8302a154/12OmNvTTcc9",
"parentPublication": {
"id": "proceedings/itme/2015/8302/0",
"title": "2015 7th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/at-equal/2010/8842/0/05663576",
"title": "Intramedullary/Intraosseus Osteosynthesis of Pilon and Ankle Fractures",
"doi": null,
"abstractUrl": "/proceedings-article/at-equal/2010/05663576/12OmNwO5M0x",
"parentPublication": {
"id": "proceedings/at-equal/2010/8842/0",
"title": "2010 Advanced Technologies for Enhancing Quality of Life (ATEQUAL 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbec/2016/2132/0/07459016",
"title": "Confounding in Electrical Signals of Bone as the Fracture Heals",
"doi": null,
"abstractUrl": "/proceedings-article/sbec/2016/07459016/12OmNylKAI7",
"parentPublication": {
"id": "proceedings/sbec/2016/2132/0",
"title": "2016 32nd Southern Biomedical Engineering Conference (SBEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/02/mcs2013020010",
"title": "Automatically Assessing Limb Alignment and Hip Fracture Using 3D Models",
"doi": null,
"abstractUrl": "/magazine/cs/2013/02/mcs2013020010/13rRUx0xPDP",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a385",
"title": "Measurement spatial deformation of bone loading based on 3D surface modeling",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a385/1BLnsecgoQ8",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0",
"title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismvl/2020/5406/0/540600a012",
"title": "Ultrasonic Diagnosis for Bone Fracture Healing Process",
"doi": null,
"abstractUrl": "/proceedings-article/ismvl/2020/540600a012/1qciaUOvFIc",
"parentPublication": {
"id": "proceedings/ismvl/2020/5406/0",
"title": "2020 IEEE 50th International Symposium on Multiple-Valued Logic (ISMVL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy5hRcV",
"title": "Information Visualization, IEEE Symposium on",
"acronym": "ieee-infovis",
"groupId": "1000371",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB0nW9E",
"doi": "10.1109/INFVIS.2000.885094",
"title": "Redefining the Focus and Context of Focus+Context Visualizations",
"normalizedTitle": "Redefining the Focus and Context of Focus+Context Visualizations",
"abstract": "The increasing diversity of computers, especially among small mobile devices such as mobile phones and PDAs, raise new questions about information visualization techniques developed for the desktop computer. Using a series of examples ranging from applications for ordinary desktop displays to web-browsers and other applications for PDAs, we describe how a focus+context technique, Flip Zooming, is changed due to the situation it is used in. Based on these examples, we discuss how the use of \"focus\" and \"context\" in focus+context techniques change in order to fit new areas of use for information visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The increasing diversity of computers, especially among small mobile devices such as mobile phones and PDAs, raise new questions about information visualization techniques developed for the desktop computer. Using a series of examples ranging from applications for ordinary desktop displays to web-browsers and other applications for PDAs, we describe how a focus+context technique, Flip Zooming, is changed due to the situation it is used in. Based on these examples, we discuss how the use of \"focus\" and \"context\" in focus+context techniques change in order to fit new areas of use for information visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The increasing diversity of computers, especially among small mobile devices such as mobile phones and PDAs, raise new questions about information visualization techniques developed for the desktop computer. Using a series of examples ranging from applications for ordinary desktop displays to web-browsers and other applications for PDAs, we describe how a focus+context technique, Flip Zooming, is changed due to the situation it is used in. Based on these examples, we discuss how the use of \"focus\" and \"context\" in focus+context techniques change in order to fit new areas of use for information visualization.",
"fno": "08040085",
"keywords": [
"Information Visualization",
"Personal Digital Assistants",
"Focus Context Techniques"
],
"authors": [
{
"affiliation": "The Interactive Institute",
"fullName": "Staffan Björk",
"givenName": "Staffan",
"surname": "Björk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Interactive Institute",
"fullName": "Johan Redström",
"givenName": "Johan",
"surname": "Redström",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-infovis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-10-01T00:00:00",
"pubType": "proceedings",
"pages": "85",
"year": "2000",
"issn": "1522-404X",
"isbn": "0-7695-0804-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08040077",
"articleId": "12OmNx7ov0k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08040091",
"articleId": "12OmNy314jP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBQ2VPO",
"title": "Information Visualization, IEEE Symposium on",
"acronym": "ieee-infovis",
"groupId": "1000371",
"volume": "0",
"displayVolume": "0",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBhpS0L",
"doi": "10.1109/INFVIS.1999.801857",
"title": "A Framework for Focus+Context Visualization",
"normalizedTitle": "A Framework for Focus+Context Visualization",
"abstract": "This paper aims to give a systematic account of focus+context visualization techniques. i.e. visualizations which aim to give users integrated visual access to details and context in a data set. We introduce the notion that there are different orders of information visualization, with focus+context being a second-order visualization and provide a formal framework for describing and constructing focus+context visualizations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper aims to give a systematic account of focus+context visualization techniques. i.e. visualizations which aim to give users integrated visual access to details and context in a data set. We introduce the notion that there are different orders of information visualization, with focus+context being a second-order visualization and provide a formal framework for describing and constructing focus+context visualizations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper aims to give a systematic account of focus+context visualization techniques. i.e. visualizations which aim to give users integrated visual access to details and context in a data set. We introduce the notion that there are different orders of information visualization, with focus+context being a second-order visualization and provide a formal framework for describing and constructing focus+context visualizations.",
"fno": "04310053",
"keywords": [],
"authors": [
{
"affiliation": "The Viktoria Institute",
"fullName": "Staffan Björk",
"givenName": "Staffan",
"surname": "Björk",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Viktoria Institute",
"fullName": "Lars Erik Holmquist",
"givenName": "Lars Erik",
"surname": "Holmquist",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Viktoria Institute",
"fullName": "Johan Redström",
"givenName": "Johan",
"surname": "Redström",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-infovis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-10-01T00:00:00",
"pubType": "proceedings",
"pages": "53",
"year": "1999",
"issn": "1522-404X",
"isbn": "0-7695-0431-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04310044",
"articleId": "12OmNzA6GHc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04310058",
"articleId": "12OmNBO3KhU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxWcH18",
"title": "2014 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwGZNGM",
"doi": "10.1109/CW.2014.28",
"title": "Multilevel Focus+Context Visualization Using Balanced Multiresolution",
"normalizedTitle": "Multilevel Focus+Context Visualization Using Balanced Multiresolution",
"abstract": "In this paper, we present the construction of a multilevel focus context visualization framework for the navigation and exploration of large-scale 2D and 3D images. The presented framework utilizes a balanced multiresolution (BMR) technique supported by a balanced wavelet transform (BWT). This devised framework extends the mode of focus context visualization, where spatially separate magnification of regions of interest (ROIs) is performed, as opposed to in-place magnification. Each resulting visualization scenario resembles a tree structure, where the root constitutes the main context, each non-root internal node plays the dual roles of both focus and context, and each leaf solely represents a focus. We use the local multiresolution filters of quadratic B-spline to construct the BWT. Our developed prototype supports interactive manipulation of the visualization hierarchy, such as addition and deletion of ROIs and desired changes in their resolutions at any level of the hierarchy on the fly. Changes in the spatial locations of query windows that define the ROIs trigger on-demand reconstruction queries. We describe in detail how to efficiently process such reconstruction queries within the hierarchy of details (wavelet coefficients) contained in the BWT in order to ensure real-time feedback. As the BWT need only be constructed once in a preprocessing phase on the server-side and robust on-demand reconstruction queries require minimal data communication overhead, our presented framework is a suitable candidate for efficient web-based visualization and exploration of complex large-scale imagery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present the construction of a multilevel focus context visualization framework for the navigation and exploration of large-scale 2D and 3D images. The presented framework utilizes a balanced multiresolution (BMR) technique supported by a balanced wavelet transform (BWT). This devised framework extends the mode of focus context visualization, where spatially separate magnification of regions of interest (ROIs) is performed, as opposed to in-place magnification. Each resulting visualization scenario resembles a tree structure, where the root constitutes the main context, each non-root internal node plays the dual roles of both focus and context, and each leaf solely represents a focus. We use the local multiresolution filters of quadratic B-spline to construct the BWT. Our developed prototype supports interactive manipulation of the visualization hierarchy, such as addition and deletion of ROIs and desired changes in their resolutions at any level of the hierarchy on the fly. Changes in the spatial locations of query windows that define the ROIs trigger on-demand reconstruction queries. We describe in detail how to efficiently process such reconstruction queries within the hierarchy of details (wavelet coefficients) contained in the BWT in order to ensure real-time feedback. As the BWT need only be constructed once in a preprocessing phase on the server-side and robust on-demand reconstruction queries require minimal data communication overhead, our presented framework is a suitable candidate for efficient web-based visualization and exploration of complex large-scale imagery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present the construction of a multilevel focus context visualization framework for the navigation and exploration of large-scale 2D and 3D images. The presented framework utilizes a balanced multiresolution (BMR) technique supported by a balanced wavelet transform (BWT). This devised framework extends the mode of focus context visualization, where spatially separate magnification of regions of interest (ROIs) is performed, as opposed to in-place magnification. Each resulting visualization scenario resembles a tree structure, where the root constitutes the main context, each non-root internal node plays the dual roles of both focus and context, and each leaf solely represents a focus. We use the local multiresolution filters of quadratic B-spline to construct the BWT. Our developed prototype supports interactive manipulation of the visualization hierarchy, such as addition and deletion of ROIs and desired changes in their resolutions at any level of the hierarchy on the fly. Changes in the spatial locations of query windows that define the ROIs trigger on-demand reconstruction queries. We describe in detail how to efficiently process such reconstruction queries within the hierarchy of details (wavelet coefficients) contained in the BWT in order to ensure real-time feedback. As the BWT need only be constructed once in a preprocessing phase on the server-side and robust on-demand reconstruction queries require minimal data communication overhead, our presented framework is a suitable candidate for efficient web-based visualization and exploration of complex large-scale imagery.",
"fno": "4677a145",
"keywords": [
"Image Reconstruction",
"Data Visualization",
"Context",
"Image Resolution",
"Vectors",
"Approximation Methods",
"Wavelet Transforms",
"Balanced Wavelet Transform",
"Focus Context Visualization",
"Contextual Close Up",
"Multilevel Visualization",
"Multilevel Hierarchy",
"Multiscale Visualization",
"Balanced Multiresolution",
"Balanced Decomposition",
"Perfect Reconstruction"
],
"authors": [
{
"affiliation": null,
"fullName": "Mahmudul Hasan",
"givenName": "Mahmudul",
"surname": "Hasan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Faramarz F. Samavati",
"givenName": "Faramarz F.",
"surname": "Samavati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Jacob",
"givenName": "Christian",
"surname": "Jacob",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "145-152",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4677-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4677a137",
"articleId": "12OmNvonIME",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4677a153",
"articleId": "12OmNqBKTZb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2005/2392/0/23920162",
"title": "DualView: A Focus+Context Technique for Navigating Large Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2005/23920162/12OmNBBQZqd",
"parentPublication": {
"id": "proceedings/cgiv/2005/2392/0",
"title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eait/2014/4272/0/4272a215",
"title": "Image Super Resolution with Direct Mapping and De-Noising",
"doi": null,
"abstractUrl": "/proceedings-article/eait/2014/4272a215/12OmNvw2Tf3",
"parentPublication": {
"id": "proceedings/eait/2014/4272/0",
"title": "2014 Fourth International Conference of Emerging Applications of Information Technology (EAIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500519",
"title": "A multiresolution volume rendering framework for large-scale time-varying data visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500519/12OmNwDACgs",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/3/01326475",
"title": "A multiresolution directional filter bank for image applications",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326475/12OmNwekjEG",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/3",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413687",
"title": "Multiresolution tomographic reconstruction using wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413687/12OmNyRxFIT",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdip/2009/3565/0/3565a027",
"title": "Multilevel Image Reconstruction by Interpolating Wavelet Coefficients",
"doi": null,
"abstractUrl": "/proceedings-article/icdip/2009/3565a027/12OmNz5s0L2",
"parentPublication": {
"id": "proceedings/icdip/2009/3565/0",
"title": "Digital Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1993/4120/0/00342460",
"title": "A multiresolution approach for image morphing",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1993/00342460/12OmNzmclLS",
"parentPublication": {
"id": "proceedings/acssc/1993/4120/0",
"title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/05/07120994",
"title": "Multiperspective Focus+Context Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/05/07120994/13rRUyft7D5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000a947",
"title": "IRGUN : Improved Residue Based Gradual Up-Scaling Network for Single Image Super Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000a947/17D45Wda7hb",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2019/5341/0/09001874",
"title": "Fast Enhanced DWT based Video Micro Movement Magnification",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2019/09001874/1hHMj6Ir68g",
"parentPublication": {
"id": "proceedings/isspit/2019/5341/0",
"title": "2019 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqAU6tm",
"title": "Intelligent Systems Design and Applications, International Conference on",
"acronym": "isda",
"groupId": "1001454",
"volume": "1",
"displayVolume": "1",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy5R3sk",
"doi": "10.1109/ISDA.2008.232",
"title": "Intelligent Focus+Context Volume Visualization",
"normalizedTitle": "Intelligent Focus+Context Volume Visualization",
"abstract": "Although graphics processing unit (GPU) acceleration makes possible interactive volume rendering, successful volume visualization relies on the ability to quickly and correctly classify the volume into different materials or features. Among various classification techniques, one very attractive and effective method is employing machine learning to classify the whole volume according to some minimum user input through an interactive brushing interface, where users paint directly on slices of the volume. For routine visualization tasks, we can thus reduce their cost if the visualization system can learn the tasks and apply the captured knowledge in future tasks. This paper presents an intelligent, interactive visualization system that supports Focus+Context viewing of volume data. Features of interest should be the focal point of the visualization, and by applying appropriate rendering methods we are able to enhance these features and create more illustrative visualizations in a Focus+Context style. We show with a set of case studies that it is possible to use machine learning to not only help classify volume but also better present the classified results. This new capability makes visualization a more usable tool.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although graphics processing unit (GPU) acceleration makes possible interactive volume rendering, successful volume visualization relies on the ability to quickly and correctly classify the volume into different materials or features. Among various classification techniques, one very attractive and effective method is employing machine learning to classify the whole volume according to some minimum user input through an interactive brushing interface, where users paint directly on slices of the volume. For routine visualization tasks, we can thus reduce their cost if the visualization system can learn the tasks and apply the captured knowledge in future tasks. This paper presents an intelligent, interactive visualization system that supports Focus+Context viewing of volume data. Features of interest should be the focal point of the visualization, and by applying appropriate rendering methods we are able to enhance these features and create more illustrative visualizations in a Focus+Context style. We show with a set of case studies that it is possible to use machine learning to not only help classify volume but also better present the classified results. This new capability makes visualization a more usable tool.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although graphics processing unit (GPU) acceleration makes possible interactive volume rendering, successful volume visualization relies on the ability to quickly and correctly classify the volume into different materials or features. Among various classification techniques, one very attractive and effective method is employing machine learning to classify the whole volume according to some minimum user input through an interactive brushing interface, where users paint directly on slices of the volume. For routine visualization tasks, we can thus reduce their cost if the visualization system can learn the tasks and apply the captured knowledge in future tasks. This paper presents an intelligent, interactive visualization system that supports Focus+Context viewing of volume data. Features of interest should be the focal point of the visualization, and by applying appropriate rendering methods we are able to enhance these features and create more illustrative visualizations in a Focus+Context style. We show with a set of case studies that it is possible to use machine learning to not only help classify volume but also better present the classified results. This new capability makes visualization a more usable tool.",
"fno": "3382a368",
"keywords": [
"Volume Rendering",
"Volume Graphics",
"Visualization",
"Intelligent System",
"Interaction",
"Focus Context"
],
"authors": [
{
"affiliation": null,
"fullName": "Cheng-Kai Chen",
"givenName": "Cheng-Kai",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Russell Thomason",
"givenName": "Russell",
"surname": "Thomason",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kwan-Liu Ma",
"givenName": "Kwan-Liu",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isda",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-11-01T00:00:00",
"pubType": "proceedings",
"pages": "368-374",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3382-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3382a356",
"articleId": "12OmNBzAck2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3382a375",
"articleId": "12OmNz5apL3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/miar/2001/1113/0/11130021",
"title": "Interactive Surgical Planning Using Context Based Volume Visualization Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/miar/2001/11130021/12OmNqBtiRo",
"parentPublication": {
"id": "proceedings/miar/2001/1113/0",
"title": "Medical Imaging and Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880385",
"title": "The VesselGlyph: Focus & Context Visualization in CT-Angiography",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880385/12OmNqFJhQU",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a381",
"title": "Depth-Based Feature Enhancement for Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a381/12OmNvAS4oS",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660085",
"title": "VolumeShop: An Interactive System for Direct Volume Illustration",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660085/12OmNwIpNlJ",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccompanion/2012/4956/0/4956b479",
"title": "Abstract: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sccompanion/2012/4956b479/12OmNxGSmft",
"parentPublication": {
"id": "proceedings/sccompanion/2012/4956/0",
"title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660042",
"title": "High Dynamic Range Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660042/12OmNzUxO6V",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660047",
"title": "The Magic Volume Lens: An Interactive Focus+Context Technique for Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660047/12OmNzmLxM5",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020171",
"title": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020171/13rRUx0xPTN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061551",
"title": "Visualization by Proxy: A Novel Framework for Deferred Interaction with Volume Data",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061551/13rRUxjyX3V",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/03/mcg2005030031",
"title": "Illustration Motifs for Effective Medical Volume Illustration",
"doi": null,
"abstractUrl": "/magazine/cg/2005/03/mcg2005030031/13rRUyft7x2",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBSBk5H",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"acronym": "cadgraphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKrHen",
"doi": "10.1109/CAD/Graphics.2011.43",
"title": "2.5D Focus+Context Map Visualization",
"normalizedTitle": "2.5D Focus+Context Map Visualization",
"abstract": "Many applications involve map visualization. Nevertheless, almost all of them display the map in the same style, which often cause information overloading. In this paper, we present a novel user-centered 2.5D focus context map visualization technique. Instead of treating all the information equally, we can custom the map according to user requirements. The main contributions of our technique are in three aspects. Firstly, our system can automatically construct a hierarchical representation of the city according to the focus point of the users by using the R-trees data structure, then presenting users the map in multiple rendering styles, highlighting the user concerned information and deemphasizing the less important information. Secondly, a landmark margin is automatically added to the original map according to the points-of-interest(POI), which illustrates the context information and enables users to maintain a macro view of the city. Moreover, by applying the 2.5D technique, we can reduce the amount of data and accelerate the transport of the information. The results show that our user-centered map visualization technique can greatly improve the efficiency for users to get their concerned information.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many applications involve map visualization. Nevertheless, almost all of them display the map in the same style, which often cause information overloading. In this paper, we present a novel user-centered 2.5D focus context map visualization technique. Instead of treating all the information equally, we can custom the map according to user requirements. The main contributions of our technique are in three aspects. Firstly, our system can automatically construct a hierarchical representation of the city according to the focus point of the users by using the R-trees data structure, then presenting users the map in multiple rendering styles, highlighting the user concerned information and deemphasizing the less important information. Secondly, a landmark margin is automatically added to the original map according to the points-of-interest(POI), which illustrates the context information and enables users to maintain a macro view of the city. Moreover, by applying the 2.5D technique, we can reduce the amount of data and accelerate the transport of the information. The results show that our user-centered map visualization technique can greatly improve the efficiency for users to get their concerned information.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many applications involve map visualization. Nevertheless, almost all of them display the map in the same style, which often cause information overloading. In this paper, we present a novel user-centered 2.5D focus context map visualization technique. Instead of treating all the information equally, we can custom the map according to user requirements. The main contributions of our technique are in three aspects. Firstly, our system can automatically construct a hierarchical representation of the city according to the focus point of the users by using the R-trees data structure, then presenting users the map in multiple rendering styles, highlighting the user concerned information and deemphasizing the less important information. Secondly, a landmark margin is automatically added to the original map according to the points-of-interest(POI), which illustrates the context information and enables users to maintain a macro view of the city. Moreover, by applying the 2.5D technique, we can reduce the amount of data and accelerate the transport of the information. The results show that our user-centered map visualization technique can greatly improve the efficiency for users to get their concerned information.",
"fno": "4497a389",
"keywords": [
"2 5 D",
"Focus Context",
"Map",
"Visualization"
],
"authors": [
{
"affiliation": null,
"fullName": "Shuai Wang",
"givenName": "Shuai",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bin Pan",
"givenName": "Bin",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xiaoming Guo",
"givenName": "Xiaoming",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhangye Wang",
"givenName": "Zhangye",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qunsheng Peng",
"givenName": "Qunsheng",
"surname": "Peng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cadgraphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "389-396",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4497-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4497a381",
"articleId": "12OmNvAS4oS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4497a397",
"articleId": "12OmNyRxFyS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2000/0804/0/08040085",
"title": "Redefining the Focus and Context of Focus+Context Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2000/08040085/12OmNB0nW9E",
"parentPublication": {
"id": "proceedings/ieee-infovis/2000/0804/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2012/4771/0/4771a270",
"title": "Heat Map Scope Technique for Stacked Time-series Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2012/4771a270/12OmNBtl1sa",
"parentPublication": {
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880385",
"title": "The VesselGlyph: Focus & Context Visualization in CT-Angiography",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880385/12OmNqFJhQU",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995611",
"title": "2.5D building modeling with topology control",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995611/12OmNy2agUI",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a356",
"title": "3D Generalization Lenses for Interactive Focus + Context Visualization of Virtual City Models",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a356/12OmNzaQoEB",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020171",
"title": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020171/13rRUx0xPTN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122528",
"title": "Focus+Context Metro Maps",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122528/13rRUyY294B",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a080",
"title": "BDLoc: Global Localization from 2.5D Building Map",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a080/1yeCVFqROFO",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAQJzKb",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"acronym": "pacificvis",
"groupId": "1001657",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNznkJUv",
"doi": "10.1109/PACIFICVIS.2015.7156349",
"title": "Interactive streamline exploration and manipulation using deformation",
"normalizedTitle": "Interactive streamline exploration and manipulation using deformation",
"abstract": "Occlusion presents a major challenge in visualizing 3D flow fields using streamlines. Displaying too many streamlines creates a dense visualization filled with occluded structures, but displaying too few streams risks losing important features. A more ideal streamline exploration approach is to visually manipulate the cluttered streamlines by pulling visible layers apart and revealing the hidden structures underneath. This paper presents a customized deformation algorithm and an interactive visualization tool to minimize visual cluttering. The algorithm is able to maintain the overall integrity of the flow field and expose the previously hidden structures. Our system supports both mouse and direct-touch interactions to manipulate the viewing perspectives and visualize the streamlines in depth. By using a lens metaphor of different shapes to select the transition zone of the targeted area interactively, the users can move their focus and examine the flow field freely.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occlusion presents a major challenge in visualizing 3D flow fields using streamlines. Displaying too many streamlines creates a dense visualization filled with occluded structures, but displaying too few streams risks losing important features. A more ideal streamline exploration approach is to visually manipulate the cluttered streamlines by pulling visible layers apart and revealing the hidden structures underneath. This paper presents a customized deformation algorithm and an interactive visualization tool to minimize visual cluttering. The algorithm is able to maintain the overall integrity of the flow field and expose the previously hidden structures. Our system supports both mouse and direct-touch interactions to manipulate the viewing perspectives and visualize the streamlines in depth. By using a lens metaphor of different shapes to select the transition zone of the targeted area interactively, the users can move their focus and examine the flow field freely.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occlusion presents a major challenge in visualizing 3D flow fields using streamlines. Displaying too many streamlines creates a dense visualization filled with occluded structures, but displaying too few streams risks losing important features. A more ideal streamline exploration approach is to visually manipulate the cluttered streamlines by pulling visible layers apart and revealing the hidden structures underneath. This paper presents a customized deformation algorithm and an interactive visualization tool to minimize visual cluttering. The algorithm is able to maintain the overall integrity of the flow field and expose the previously hidden structures. Our system supports both mouse and direct-touch interactions to manipulate the viewing perspectives and visualize the streamlines in depth. By using a lens metaphor of different shapes to select the transition zone of the targeted area interactively, the users can move their focus and examine the flow field freely.",
"fno": "07156349",
"keywords": [
"Shape",
"Lenses",
"Deformable Models",
"Context",
"Three Dimensional Displays",
"Streaming Media",
"Visualization",
"I 3 8 Computer Graphics Applications",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques"
],
"authors": [
{
"affiliation": "The Ohio State University, USA",
"fullName": "Xin Tong",
"givenName": null,
"surname": "Xin Tong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Ohio State University, USA",
"fullName": "Chun-Ming Chen",
"givenName": null,
"surname": "Chun-Ming Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The Ohio State University, USA",
"fullName": "Han-Wei Shen",
"givenName": "Han-Wei",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Pacific Northwest National Laboratory, USA",
"fullName": "Pak Chung Wong",
"givenName": null,
"surname": "Pak Chung Wong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pacificvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6879-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07156348",
"articleId": "12OmNvlPkAl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07156350",
"articleId": "12OmNBpEeTF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2011/935/0/05742376",
"title": "View point evaluation and streamline filtering for flow visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2011/05742376/12OmNqyDjoV",
"parentPublication": {
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596153",
"title": "Exploring vector fields with distribution-based streamline analysis",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596153/12OmNvAiSjV",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2008/1966/0/04475462",
"title": "Illustrative Streamline Placement and Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2008/04475462/12OmNwoPty5",
"parentPublication": {
"id": "proceedings/pacificvis/2008/1966/0",
"title": "IEEE Pacific Visualization Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a296",
"title": "Streamline-Based Topological Graph Construction with Application to Self-Animated Images",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a296/12OmNyo1nYM",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2011/4584/0/4584b174",
"title": "Multiresolution Streamline Placement for 2D Flow Fields",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2011/4584b174/12OmNz6iOml",
"parentPublication": {
"id": "proceedings/cis/2011/4584/0",
"title": "2011 Seventh International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/07/07332955",
"title": "View-Dependent Streamline Deformation and Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2016/07/07332955/13rRUB7a1fV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0965",
"title": "An Advanced Evenly-Spaced Streamline Placement Algorithm",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0965/13rRUIM2VBz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081342",
"title": "Similarity Measures for Enhancing Interactive Streamline Seeding",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081342/13rRUwInvB3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06025348",
"title": "Hierarchical Streamline Bundles",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06025348/13rRUyY28Yt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/07/ttg2013071185",
"title": "Parallel Streamline Placement for 2D Flow Fields",
"doi": null,
"abstractUrl": "/journal/tg/2013/07/ttg2013071185/13rRUyfbwqG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzBOhX2",
"title": "Proceedings. 2004 IEEE Symposium on Visual Languages and Human Centric Computing",
"acronym": "vlhcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAgoV6r",
"doi": "10.1109/VLHCC.2004.21",
"title": "Dynamic Euler Diagram Drawing",
"normalizedTitle": "Dynamic Euler Diagram Drawing",
"abstract": "In this paper we describe a method to lay out a graph enhanced Euler diagram so that it looks similar to a previously drawn graph enhanced Euler diagram. This task is non-trivial when the underlying structures of the diagrams differ. In particular, if a structural change is made to an existing drawn diagram, our work enables the presentation of the new diagram with minor disruption to the user's mental map. As the new diagram can be generated from an abstract representation, its initial embedding may be very different from that of the original. We have developed comparison measures for Euler diagrams, integrated into a multicriteria optimizer, and applied a force model for associated graphs that attempts to move nodes towards their positions in the original layout. To further enhance the usability of the system, the transition between diagrams can be animated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we describe a method to lay out a graph enhanced Euler diagram so that it looks similar to a previously drawn graph enhanced Euler diagram. This task is non-trivial when the underlying structures of the diagrams differ. In particular, if a structural change is made to an existing drawn diagram, our work enables the presentation of the new diagram with minor disruption to the user's mental map. As the new diagram can be generated from an abstract representation, its initial embedding may be very different from that of the original. We have developed comparison measures for Euler diagrams, integrated into a multicriteria optimizer, and applied a force model for associated graphs that attempts to move nodes towards their positions in the original layout. To further enhance the usability of the system, the transition between diagrams can be animated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we describe a method to lay out a graph enhanced Euler diagram so that it looks similar to a previously drawn graph enhanced Euler diagram. This task is non-trivial when the underlying structures of the diagrams differ. In particular, if a structural change is made to an existing drawn diagram, our work enables the presentation of the new diagram with minor disruption to the user's mental map. As the new diagram can be generated from an abstract representation, its initial embedding may be very different from that of the original. We have developed comparison measures for Euler diagrams, integrated into a multicriteria optimizer, and applied a force model for associated graphs that attempts to move nodes towards their positions in the original layout. To further enhance the usability of the system, the transition between diagrams can be animated.",
"fno": "86960147",
"keywords": [],
"authors": [
{
"affiliation": "University of Kent, UK",
"fullName": "Peter Rodgers",
"givenName": "Peter",
"surname": "Rodgers",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Kent, UK",
"fullName": "Paul Mutton",
"givenName": "Paul",
"surname": "Mutton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Brighton, UK",
"fullName": "Jean Flower",
"givenName": "Jean",
"surname": "Flower",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vlhcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-09-01T00:00:00",
"pubType": "proceedings",
"pages": "147-156",
"year": "2004",
"issn": null,
"isbn": "0-7803-8696-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "86960139",
"articleId": "12OmNAKcNOc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "86960157",
"articleId": "12OmNywxlNr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2010/8485/0/05635206",
"title": "Euler Graph Transformations for Euler Diagram Layout",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2010/05635206/12OmNA0vnUl",
"parentPublication": {
"id": "proceedings/vlhcc/2010/8485/0",
"title": "2010 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2009/4876/0/05295268",
"title": "Changing euler diagram properties by edge transformation of euler dual graphs",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2009/05295268/12OmNAIdBQa",
"parentPublication": {
"id": "proceedings/vlhcc/2009/4876/0",
"title": "2009 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a594",
"title": "Visualise Undrawable Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070401",
"title": "Drawing Euler diagrams with circles and ellipses",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070401/12OmNvpew49",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070382",
"title": "SketchSet: Creating Euler diagrams using pen or mouse",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070382/12OmNx965CA",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a585",
"title": "Embedding Wellformed Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011071020",
"title": "Drawing Euler Diagrams with Circles: The Theory of Piercings",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011071020/13rRUEgarBq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010088",
"title": "Inductively Generating Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010088/13rRUNvgziB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010056",
"title": "Drawing Area-Proportional Euler Diagrams Representing Up To Three Sets",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010056/13rRUx0Pqpy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a561",
"title": "A New Diagram for Amino Acids: User Study Comparing Rainbow Boxes to Venn/Euler Diagram",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a561/17D45WaTkc1",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyqRne6",
"title": "Visual Languages, IEEE Symposium on",
"acronym": "vl",
"groupId": "1000793",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNB8Cj3l",
"doi": "10.1109/VL.2000.874375",
"title": "Projections in Venn-Euler Diagrams",
"normalizedTitle": "Projections in Venn-Euler Diagrams",
"abstract": "Venn diagrams and Euler circles have long been used to express constraints on sets and their relationships with other sets. However, these notations can get very cluttered when we consider many closed curves or contours. In order to reduce this clutter, and to focus attention within the diagram appropriately, the notion of a projected contour, or projection, is introduced. Informally, a projected contour is a contour that describes a set of elements limited to a certain context. Through a series of examples, we develop a formal semantics of projections and discuss the issues involved in introducing these.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Venn diagrams and Euler circles have long been used to express constraints on sets and their relationships with other sets. However, these notations can get very cluttered when we consider many closed curves or contours. In order to reduce this clutter, and to focus attention within the diagram appropriately, the notion of a projected contour, or projection, is introduced. Informally, a projected contour is a contour that describes a set of elements limited to a certain context. Through a series of examples, we develop a formal semantics of projections and discuss the issues involved in introducing these.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Venn diagrams and Euler circles have long been used to express constraints on sets and their relationships with other sets. However, these notations can get very cluttered when we consider many closed curves or contours. In order to reduce this clutter, and to focus attention within the diagram appropriately, the notion of a projected contour, or projection, is introduced. Informally, a projected contour is a contour that describes a set of elements limited to a certain context. Through a series of examples, we develop a formal semantics of projections and discuss the issues involved in introducing these.",
"fno": "08400119",
"keywords": [
"Visual Formalisms",
"Diagrammatic Notations"
],
"authors": [
{
"affiliation": "Technion-IIT",
"fullName": "Joseph (Yossi) Gil",
"givenName": "Joseph (Yossi)",
"surname": "Gil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Brighton",
"fullName": "John Howse",
"givenName": "John",
"surname": "Howse",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Brighton",
"fullName": "John Taylor",
"givenName": "John",
"surname": "Taylor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Kent at Canterbury",
"fullName": "Stuart Kent",
"givenName": "Stuart",
"surname": "Kent",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vl",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-09-01T00:00:00",
"pubType": "proceedings",
"pages": "119",
"year": "2000",
"issn": "1049-2615",
"isbn": "0-7695-0840-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08400111",
"articleId": "12OmNBKEynE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08400127",
"articleId": "12OmNz5JC3C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvs4vpI",
"title": "Visual Languages, IEEE Symposium on",
"acronym": "vl",
"groupId": "1000793",
"volume": "0",
"displayVolume": "0",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBQkwZS",
"doi": "10.1109/VL.1999.795885",
"title": "Reasoning with Spider Diagrams",
"normalizedTitle": "Reasoning with Spider Diagrams",
"abstract": "Spider diagrams combine and extend Venn diagrams and Euler circles to express constraints on sets and their relationships with other sets. These diagrams can usefully be used in conjunction with object-oriented modeling notations such as the Unified Modeling Language. This paper summaries the main syntax and semantics of spider diagrams and introduces four inference rules for reasoning with spider diagrams and a rule governing the equivalence of Venn and Euler forms of spider diagrams. This paper also details rules for combining two spider diagrams to produce a single diagram which retains as much of their combined semantic information as possible and discusses disjunctive diagrams as one possible way of enriching the system in order to combine spider diagrams so that no semantic information is lost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spider diagrams combine and extend Venn diagrams and Euler circles to express constraints on sets and their relationships with other sets. These diagrams can usefully be used in conjunction with object-oriented modeling notations such as the Unified Modeling Language. This paper summaries the main syntax and semantics of spider diagrams and introduces four inference rules for reasoning with spider diagrams and a rule governing the equivalence of Venn and Euler forms of spider diagrams. This paper also details rules for combining two spider diagrams to produce a single diagram which retains as much of their combined semantic information as possible and discusses disjunctive diagrams as one possible way of enriching the system in order to combine spider diagrams so that no semantic information is lost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spider diagrams combine and extend Venn diagrams and Euler circles to express constraints on sets and their relationships with other sets. These diagrams can usefully be used in conjunction with object-oriented modeling notations such as the Unified Modeling Language. This paper summaries the main syntax and semantics of spider diagrams and introduces four inference rules for reasoning with spider diagrams and a rule governing the equivalence of Venn and Euler forms of spider diagrams. This paper also details rules for combining two spider diagrams to produce a single diagram which retains as much of their combined semantic information as possible and discusses disjunctive diagrams as one possible way of enriching the system in order to combine spider diagrams so that no semantic information is lost.",
"fno": "02160138",
"keywords": [
"Diagrammatic Reasoning",
"Visual Formalisms"
],
"authors": [
{
"affiliation": "University of Brighton",
"fullName": "John Howse",
"givenName": "John",
"surname": "Howse",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Brighton",
"fullName": "Fernando Molina",
"givenName": "Fernando",
"surname": "Molina",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Brighton",
"fullName": "John Taylor",
"givenName": "John",
"surname": "Taylor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Kent",
"fullName": "Stuart Kent",
"givenName": "Stuart",
"surname": "Kent",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vl",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-09-01T00:00:00",
"pubType": "proceedings",
"pages": "138",
"year": "1999",
"issn": "1049-2615",
"isbn": "0-7695-0216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "02160130",
"articleId": "12OmNCd2rEH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "02160148",
"articleId": "12OmNyqRn57",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAsTgXa",
"title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwF0BKj",
"doi": "10.1109/ICALT.2017.107",
"title": "Automatic Assessment of Student Answers Consisting of Venn and Euler Diagrams",
"normalizedTitle": "Automatic Assessment of Student Answers Consisting of Venn and Euler Diagrams",
"abstract": "Venn and Euler diagrams are well-defined mathematical diagram types, which are the major representation methods of Set Theory. Venn and Euler diagrams are part of major Mathematics examinations in secondary education such as London Ordinary Level and SAT. Although computer assessment of different diagram types has been addressed, no such research has been done for Venn and Euler diagrams. In this research, we present a system capable of automatically assessing student answers consisting of Venn and Euler diagrams. The student answer is compared against a model answer, and marks are allocated according to a marking rubric.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Venn and Euler diagrams are well-defined mathematical diagram types, which are the major representation methods of Set Theory. Venn and Euler diagrams are part of major Mathematics examinations in secondary education such as London Ordinary Level and SAT. Although computer assessment of different diagram types has been addressed, no such research has been done for Venn and Euler diagrams. In this research, we present a system capable of automatically assessing student answers consisting of Venn and Euler diagrams. The student answer is compared against a model answer, and marks are allocated according to a marking rubric.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Venn and Euler diagrams are well-defined mathematical diagram types, which are the major representation methods of Set Theory. Venn and Euler diagrams are part of major Mathematics examinations in secondary education such as London Ordinary Level and SAT. Although computer assessment of different diagram types has been addressed, no such research has been done for Venn and Euler diagrams. In this research, we present a system capable of automatically assessing student answers consisting of Venn and Euler diagrams. The student answer is compared against a model answer, and marks are allocated according to a marking rubric.",
"fno": "3870a243",
"keywords": [
"Data Structures",
"Syntactics",
"Data Models",
"Set Theory",
"Data Mining",
"Generators",
"Diagram Assessment",
"Venn Diagram",
"Euler Diagram",
"Set Theory",
"Diagram Rubric"
],
"authors": [
{
"affiliation": null,
"fullName": "Diunuge Buddhika Wijesinghe",
"givenName": "Diunuge Buddhika",
"surname": "Wijesinghe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jcs Kadupitiya",
"givenName": "Jcs",
"surname": "Kadupitiya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Surangika Ranathunga",
"givenName": "Surangika",
"surname": "Ranathunga",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gihan Dias",
"givenName": "Gihan",
"surname": "Dias",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "243-247",
"year": "2017",
"issn": "2161-377X",
"isbn": "978-1-5386-3870-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3870a238",
"articleId": "12OmNzZEAoU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3870a248",
"articleId": "12OmNvjQ91p",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vl/2000/0840/0/08400119",
"title": "Projections in Venn-Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vl/2000/08400119/12OmNB8Cj3l",
"parentPublication": {
"id": "proceedings/vl/2000/0840/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a594",
"title": "Visualise Undrawable Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1999/0216/0/02160138",
"title": "Reasoning with Spider Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1999/02160138/12OmNBQkwZS",
"parentPublication": {
"id": "proceedings/vl/1999/0216/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/synasc/2015/0461/0/0461a181",
"title": "Simple Venn Diagrams for Multisets",
"doi": null,
"abstractUrl": "/proceedings-article/synasc/2015/0461a181/12OmNC4eSvg",
"parentPublication": {
"id": "proceedings/synasc/2015/0461/0",
"title": "2015 17th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070401",
"title": "Drawing Euler diagrams with circles and ellipses",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070401/12OmNvpew49",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2013/0369/0/06645262",
"title": "Improving user comprehension of Euler diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2013/06645262/12OmNxveNOL",
"parentPublication": {
"id": "proceedings/vlhcc/2013/0369/0",
"title": "2013 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a585",
"title": "Embedding Wellformed Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010088",
"title": "Inductively Generating Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010088/13rRUNvgziB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010056",
"title": "Drawing Area-Proportional Euler Diagrams Representing Up To Three Sets",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010056/13rRUx0Pqpy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a561",
"title": "A New Diagram for Amino Acids: User Study Comparing Rainbow Boxes to Venn/Euler Diagram",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a561/17D45WaTkc1",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxFJXDg",
"title": "2010 Second World Congress on Software Engineering",
"acronym": "wcse",
"groupId": "1002945",
"volume": "2",
"displayVolume": "2",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCb3ftt",
"doi": "10.1109/WCSE.2010.126",
"title": "A Combined Finite-Element and Finite-Volume Method in Reservoir Simulation",
"normalizedTitle": "A Combined Finite-Element and Finite-Volume Method in Reservoir Simulation",
"abstract": "A decoupled algorithm which combines finite-element and finite-volume method (FEM & FVM) for reservoir simulation is presented in the paper. The governing equations of fluid flowing in porous medium are decoupled to solve unknown parameters of pressure and saturation. The convective term of saturation equation is calculated by FVM, while the other terms and the pressure equation are solved by FEM, new method is proposed to make the two methods compatible in one FEM program: the flux are calculated element by element, that is, in each element, local flux of all the nods belong to the element are calculated, then the superposition principle is applied to obtain total flux of every nods as load term, then the saturation can be solved by classic FEM. A water displacing oil problem is calculated based on the method mentioned above. Results show that the method combines the advantages of FEM and FVM, the difficulties of solving saturation with FEM can be consequently solved. Also, the application of decoupled algorithms can greatly reduce storage demands and computational complexity, thus make the simulation more efficiency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A decoupled algorithm which combines finite-element and finite-volume method (FEM & FVM) for reservoir simulation is presented in the paper. The governing equations of fluid flowing in porous medium are decoupled to solve unknown parameters of pressure and saturation. The convective term of saturation equation is calculated by FVM, while the other terms and the pressure equation are solved by FEM, new method is proposed to make the two methods compatible in one FEM program: the flux are calculated element by element, that is, in each element, local flux of all the nods belong to the element are calculated, then the superposition principle is applied to obtain total flux of every nods as load term, then the saturation can be solved by classic FEM. A water displacing oil problem is calculated based on the method mentioned above. Results show that the method combines the advantages of FEM and FVM, the difficulties of solving saturation with FEM can be consequently solved. Also, the application of decoupled algorithms can greatly reduce storage demands and computational complexity, thus make the simulation more efficiency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A decoupled algorithm which combines finite-element and finite-volume method (FEM & FVM) for reservoir simulation is presented in the paper. The governing equations of fluid flowing in porous medium are decoupled to solve unknown parameters of pressure and saturation. The convective term of saturation equation is calculated by FVM, while the other terms and the pressure equation are solved by FEM, new method is proposed to make the two methods compatible in one FEM program: the flux are calculated element by element, that is, in each element, local flux of all the nods belong to the element are calculated, then the superposition principle is applied to obtain total flux of every nods as load term, then the saturation can be solved by classic FEM. A water displacing oil problem is calculated based on the method mentioned above. Results show that the method combines the advantages of FEM and FVM, the difficulties of solving saturation with FEM can be consequently solved. Also, the application of decoupled algorithms can greatly reduce storage demands and computational complexity, thus make the simulation more efficiency.",
"fno": "4303b325",
"keywords": [
"Finite Element",
"Finite Volume",
"Decoupled Algorithms",
"Reservoir Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Junzheng Yang",
"givenName": "Junzheng",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xugang Wang",
"givenName": "Xugang",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Honglan Zou",
"givenName": "Honglan",
"surname": "Zou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guoping Liang",
"givenName": "Guoping",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wcse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-12-01T00:00:00",
"pubType": "proceedings",
"pages": "325-328",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4303-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4303b318",
"articleId": "12OmNCd2ryd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4303b329",
"articleId": "12OmNxRF74V",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2010/3962/2/3962c278",
"title": "Finite Element Analysis of Mechanical and Temperature Field for a Rolling Tire",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962c278/12OmNBTs7od",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/2",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dac/2002/2402/0/24020771",
"title": "Combined BEM/FEM Substrate Resistance Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/dac/2002/24020771/12OmNBt3qmg",
"parentPublication": {
"id": "proceedings/dac/2002/2402/0",
"title": "Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2010/3987/1/3987a206",
"title": "A New Non-rigid Image Registration Algorithm Using the Finite-Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2010/3987a206/12OmNCesr3f",
"parentPublication": {
"id": "proceedings/etcs/2010/3987/1",
"title": "Education Technology and Computer Science, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2016/0662/0/0662a174",
"title": "Demonstrating a Linked Data Visualiser for Finite Element Biosimulations",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a174/12OmNqBbHvz",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icecs/2009/3937/0/3937a161",
"title": "Mathematical Modeling of Deflection of a Beam: A Finite Element Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icecs/2009/3937a161/12OmNroij0U",
"parentPublication": {
"id": "proceedings/icecs/2009/3937/0",
"title": "Environmental and Computer Science, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/saahpc/2012/4838/0/4838a141",
"title": "Automatically Optimized GPU Acceleration of Element Subroutines in Finite Element Method",
"doi": null,
"abstractUrl": "/proceedings-article/saahpc/2012/4838a141/12OmNx6xHly",
"parentPublication": {
"id": "proceedings/saahpc/2012/4838/0",
"title": "Application Accelerators in High-Performance Computing, Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2014/4261/0/4261b036",
"title": "The Discuss of the Finite Element Simulation Method of the Combined Connection with Bolts and Welds",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2014/4261b036/12OmNxdDFRn",
"parentPublication": {
"id": "proceedings/isdea/2014/4261/0",
"title": "2014 Fifth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cicsyn/2011/4482/0/4482a173",
"title": "OFEM: An Optimum Finite Element Algorithm for Heat Transfer Problem in Two-dimensional Insulated-tip Rectangular Fin",
"doi": null,
"abstractUrl": "/proceedings-article/cicsyn/2011/4482a173/12OmNyen1ym",
"parentPublication": {
"id": "proceedings/cicsyn/2011/4482/0",
"title": "Computational Intelligence, Communication Systems and Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1995/6985/0/69850330",
"title": "Magnetostatic analysis using a general purpose finite element program",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1995/69850330/12OmNylboLr",
"parentPublication": {
"id": "proceedings/ssst/1995/6985/0",
"title": "Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010070",
"title": "GPU-Based Volume Visualization from High-Order Finite Element Fields",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010070/13rRUEgs2M1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzlD94W",
"title": "2013 Fourth World Congress on Software Engineering (WCSE)",
"acronym": "wcse",
"groupId": "1002945",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx3q6V3",
"doi": "10.1109/WCSE.2013.33",
"title": "A Flexible Transfer Function Model for the Volume Rendering of Finite Element Data",
"normalizedTitle": "A Flexible Transfer Function Model for the Volume Rendering of Finite Element Data",
"abstract": "As a technique exploring the data inside geometric models, volume rendering is becoming a hot topic in graphics. Obtaining a well-designed transfer function is a challenging task in general volume rendering. This paper constructs a flexible transfer function model for the rendering of finite element data. In finite element analysis, the information in a critical region, where the concerned properties have the extreme value, is generally most interesting. The extreme region and the embedded data are displayed by adjusting the opacity and the color parameters of the transfer function, respectively. The transfer function is beneficial for the generation of cloud pictures inside the finite element model. The flexible is tested and verified through a case study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As a technique exploring the data inside geometric models, volume rendering is becoming a hot topic in graphics. Obtaining a well-designed transfer function is a challenging task in general volume rendering. This paper constructs a flexible transfer function model for the rendering of finite element data. In finite element analysis, the information in a critical region, where the concerned properties have the extreme value, is generally most interesting. The extreme region and the embedded data are displayed by adjusting the opacity and the color parameters of the transfer function, respectively. The transfer function is beneficial for the generation of cloud pictures inside the finite element model. The flexible is tested and verified through a case study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As a technique exploring the data inside geometric models, volume rendering is becoming a hot topic in graphics. Obtaining a well-designed transfer function is a challenging task in general volume rendering. This paper constructs a flexible transfer function model for the rendering of finite element data. In finite element analysis, the information in a critical region, where the concerned properties have the extreme value, is generally most interesting. The extreme region and the embedded data are displayed by adjusting the opacity and the color parameters of the transfer function, respectively. The transfer function is beneficial for the generation of cloud pictures inside the finite element model. The flexible is tested and verified through a case study.",
"fno": "06754283",
"keywords": [
"Transfer Functions",
"Rendering Computer Graphics",
"Finite Element Analysis",
"Data Models",
"Data Visualization",
"Solid Modeling",
"Mathematical Model",
"Volume Rendering",
"Transfer Function",
"Finite Element Data"
],
"authors": [
{
"affiliation": null,
"fullName": "Yi Tang",
"givenName": "Yi",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhong Ning Guo",
"givenName": "Zhong Ning",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhi Gang Huang",
"givenName": "Zhi Gang",
"surname": "Huang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wcse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "185-189",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2882-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06754282",
"articleId": "12OmNxd4tyG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06754284",
"articleId": "12OmNAo45N7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccsee/2012/4647/3/4647c098",
"title": "Structural Finite Element Method Based on Cloud Computing",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c098/12OmNBqdrcu",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcse/2010/4303/2/4303b325",
"title": "A Combined Finite-Element and Finite-Volume Method in Reservoir Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/wcse/2010/4303b325/12OmNCb3ftt",
"parentPublication": {
"id": "proceedings/wcse/2010/4303/2",
"title": "2010 Second World Congress on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579369",
"title": "A Finite Element Analysis of Conjugate Heat Transfer inside a Cavity with a Heat Generating Conducting Body",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579369/12OmNCulYls",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620387",
"title": "The multilevel finite element method for adaptive mesh optimization and visualization of volume data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620387/12OmNrAMEQ3",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2008/3357/2/3357c889",
"title": "Finite Element Analysis Application in the Long Distance Pipeline Maintenance",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2008/3357c889/12OmNyz5K0Z",
"parentPublication": {
"id": "icicta/2008/3357/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620213",
"title": "Efficient subdivision of finite-element datasets into consistent tetrahedra",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620213/12OmNzG4gA4",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720337",
"title": "Comparison of Finite Element Bases for Global Illumination in Image Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720337/12OmNzYeB35",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010070",
"title": "GPU-Based Volume Visualization from High-Order Finite Element Fields",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010070/13rRUEgs2M1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1995/04/t0531",
"title": "SPAR: A New Architecture for Large Finite Element Computations",
"doi": null,
"abstractUrl": "/journal/tc/1995/04/t0531/13rRUwbs1RD",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122315",
"title": "Coherency-Based Curve Compression for High-Order Finite Element Model Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122315/13rRUxYIMUY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxQOjzD",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzG4gA4",
"doi": "10.1109/VISUAL.1997.663885",
"title": "Efficient subdivision of finite-element datasets into consistent tetrahedra",
"normalizedTitle": "Efficient subdivision of finite-element datasets into consistent tetrahedra",
"abstract": "The paper discusses the problem of subdividing unstructured mesh topologies containing hexahedra, prisms, pyramids and tetrahedra into a consistent set of only tetrahedra, while preserving the overall mesh topology. Efficient algorithms for volume rendering, iso-contouring and particle advection exist for mesh topologies comprised solely of tetrahedra. General finite-element simulations however, consist mainly of hexahedra, and possibly prisms, pyramids and tetrahedra. Arbitrary subdivision of these mesh topologies into tetrahedra can lead to discontinuous behaviour across element faces. This will show up as visible artifacts in the iso-contouring and volume rendering algorithms, and lead to impossible face adjacency graphs for many algorithms. The authors present various properties of tetrahedral subdivisions, and an algorithm SOP determining a consistent subdivision containing a minimal set of tetrahedra.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The paper discusses the problem of subdividing unstructured mesh topologies containing hexahedra, prisms, pyramids and tetrahedra into a consistent set of only tetrahedra, while preserving the overall mesh topology. Efficient algorithms for volume rendering, iso-contouring and particle advection exist for mesh topologies comprised solely of tetrahedra. General finite-element simulations however, consist mainly of hexahedra, and possibly prisms, pyramids and tetrahedra. Arbitrary subdivision of these mesh topologies into tetrahedra can lead to discontinuous behaviour across element faces. This will show up as visible artifacts in the iso-contouring and volume rendering algorithms, and lead to impossible face adjacency graphs for many algorithms. The authors present various properties of tetrahedral subdivisions, and an algorithm SOP determining a consistent subdivision containing a minimal set of tetrahedra.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The paper discusses the problem of subdividing unstructured mesh topologies containing hexahedra, prisms, pyramids and tetrahedra into a consistent set of only tetrahedra, while preserving the overall mesh topology. Efficient algorithms for volume rendering, iso-contouring and particle advection exist for mesh topologies comprised solely of tetrahedra. General finite-element simulations however, consist mainly of hexahedra, and possibly prisms, pyramids and tetrahedra. Arbitrary subdivision of these mesh topologies into tetrahedra can lead to discontinuous behaviour across element faces. This will show up as visible artifacts in the iso-contouring and volume rendering algorithms, and lead to impossible face adjacency graphs for many algorithms. The authors present various properties of tetrahedral subdivisions, and an algorithm SOP determining a consistent subdivision containing a minimal set of tetrahedra.",
"fno": "82620213",
"keywords": [
"Finite Element Analysis Efficient Subdivision Finite Element Datasets Consistent Tetrahedra Unstructured Mesh Topology Hexahedra Prisms Pyramids Tetrahedra Mesh Topology Preservation Efficient Algorithms Volume Rendering Iso Contouring Particle Advection Finite Element Simulations Discontinuous Behaviour Element Faces Visible Artifacts Face Adjacency Graphs"
],
"authors": [
{
"affiliation": "Dept. of Comput. & Inf. Sci., Ohio State Univ., Columbus, OH, USA",
"fullName": "G. Albertelli",
"givenName": "G.",
"surname": "Albertelli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. & Inf. Sci., Ohio State Univ., Columbus, OH, USA",
"fullName": "R.A. Crawfis",
"givenName": "R.A.",
"surname": "Crawfis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-10-01T00:00:00",
"pubType": "proceedings",
"pages": "213",
"year": "1997",
"issn": "1070-2385",
"isbn": "0-8186-8262-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "82620205",
"articleId": "12OmNzQhP7p",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "82620221",
"articleId": "12OmNC4eSCM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbUZd",
"title": "Geometric Modeling and Processing",
"acronym": "gmp",
"groupId": "1000306",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAhxjFi",
"doi": "10.1109/GMAP.2002.1027502",
"title": "Fair Triangle Mesh Generation with Discrete Elastica",
"normalizedTitle": "Fair Triangle Mesh Generation with Discrete Elastica",
"abstract": "Surface fairing, generating free-form surfaces satisfying aesthetic requirements, is important for many computer graphics and geometric modeling applications. A common approach for fair surface design consists of minimization of fairness measures penalizing large curvature values and curvature oscillations. The paper develops a numerical approach for fair surface modeling via curvature-driven evolutions of triangle meshes. Consider a smooth surface each point of which moves in the normal direction with speed equal to a function of curvature and curvature derivatives. Chosen the speed function properly, the evolving surface converges to a desired shape minimizing a given fairness measure. Smooth surface evolutions are approximated by evolutions of triangle meshes. A tangent speed component is used to improve the quality of the evolving mesh and to increase computational stability. Contributions of the paper include also an improved method for estimating the mean curvature.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Surface fairing, generating free-form surfaces satisfying aesthetic requirements, is important for many computer graphics and geometric modeling applications. A common approach for fair surface design consists of minimization of fairness measures penalizing large curvature values and curvature oscillations. The paper develops a numerical approach for fair surface modeling via curvature-driven evolutions of triangle meshes. Consider a smooth surface each point of which moves in the normal direction with speed equal to a function of curvature and curvature derivatives. Chosen the speed function properly, the evolving surface converges to a desired shape minimizing a given fairness measure. Smooth surface evolutions are approximated by evolutions of triangle meshes. A tangent speed component is used to improve the quality of the evolving mesh and to increase computational stability. Contributions of the paper include also an improved method for estimating the mean curvature.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Surface fairing, generating free-form surfaces satisfying aesthetic requirements, is important for many computer graphics and geometric modeling applications. A common approach for fair surface design consists of minimization of fairness measures penalizing large curvature values and curvature oscillations. The paper develops a numerical approach for fair surface modeling via curvature-driven evolutions of triangle meshes. Consider a smooth surface each point of which moves in the normal direction with speed equal to a function of curvature and curvature derivatives. Chosen the speed function properly, the evolving surface converges to a desired shape minimizing a given fairness measure. Smooth surface evolutions are approximated by evolutions of triangle meshes. A tangent speed component is used to improve the quality of the evolving mesh and to increase computational stability. Contributions of the paper include also an improved method for estimating the mean curvature.",
"fno": "16740119",
"keywords": [
"Mesh Fairing",
"Elastica Surfaces",
"Discrete Surface Flow",
"Laplace Beltrami Operator"
],
"authors": [
{
"affiliation": "Max-Planck-Institut für Informatik and University of Aizu",
"fullName": "Shin Yoshizawa",
"givenName": "Shin",
"surname": "Yoshizawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max-Planck-Institut für Informatik and University of Aizu",
"fullName": "Alexander G. Belyaev",
"givenName": "Alexander G.",
"surname": "Belyaev",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gmp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-07-01T00:00:00",
"pubType": "proceedings",
"pages": "119",
"year": "2002",
"issn": null,
"isbn": "0-7695-1674-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "16740107",
"articleId": "12OmNqIzgYd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "16740124",
"articleId": "12OmNy6qfHR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dpvt/2002/1521/0/15210588",
"title": "Triangle Mesh-Based Surface Modeling Using Adaptive Smoothing and Implicit Surface Texture Integration",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2002/15210588/12OmNAObbH4",
"parentPublication": {
"id": "proceedings/3dpvt/2002/1521/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-cg/2005/2473/0/24730269",
"title": "A Global Laplacian Smoothing Approach with Feature Preservation",
"doi": null,
"abstractUrl": "/proceedings-article/cad-cg/2005/24730269/12OmNBTJIAu",
"parentPublication": {
"id": "proceedings/cad-cg/2005/2473/0",
"title": "Ninth International Conference on Computer Aided Design and Computer Graphics (CAD-CG'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a669",
"title": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a669/12OmNBp52IP",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710010",
"title": "Curvature Tensor Based Triangle Mesh Segmentation with Boundary Rectification",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710010/12OmNvT2oXj",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500538",
"title": "Robust generation of signed distance fields from triangle meshes",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500538/12OmNzT7Otj",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010084",
"title": "Grouper: A Compact, Streamable Triangle Mesh Data Structure",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010084/13rRUxBa562",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/02/v0145",
"title": "Constructing Hierarchies for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1998/02/v0145/13rRUy0qnGc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/10/08423201",
"title": "Empirical Comparison of Curvature Estimators on Volume Images and Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2019/10/08423201/1cYd4WbM0mY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBqMDoJ",
"title": "2008 International Symposium on Information Science and Engineering (ISISE)",
"acronym": "isise",
"groupId": "1002561",
"volume": "2",
"displayVolume": "2",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBUS7cC",
"doi": "10.1109/ISISE.2008.230",
"title": "A Tetrahedral Mesh Generation Algorithm from Medical Images",
"normalizedTitle": "A Tetrahedral Mesh Generation Algorithm from Medical Images",
"abstract": "Contour based finite element modeling techniques allow flexible control of the geometric accuracy and of the mesh density. A tetrahedral meshing algorithm is proposed to create tetrahedral meshes from medical images in two steps, the first is surface triangulation, and the second is volume triangulation, both performed between adjacent sections. After the side surfaces between adjacent sections and the planar domains enclosed by sectional contours are triangulated, the solid domains between adjacent sections are tetrahedralized. An advancing front method is used, in which a front consisting of boundary triangles from the sections is maintained, and two group operators are used to construct a group of tetrahedra at a time. The solid domain is discretized efficiently, and the quality of the generated tetrahedra is flexibly controlled due to the strategy of choosing a generation triangle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Contour based finite element modeling techniques allow flexible control of the geometric accuracy and of the mesh density. A tetrahedral meshing algorithm is proposed to create tetrahedral meshes from medical images in two steps, the first is surface triangulation, and the second is volume triangulation, both performed between adjacent sections. After the side surfaces between adjacent sections and the planar domains enclosed by sectional contours are triangulated, the solid domains between adjacent sections are tetrahedralized. An advancing front method is used, in which a front consisting of boundary triangles from the sections is maintained, and two group operators are used to construct a group of tetrahedra at a time. The solid domain is discretized efficiently, and the quality of the generated tetrahedra is flexibly controlled due to the strategy of choosing a generation triangle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Contour based finite element modeling techniques allow flexible control of the geometric accuracy and of the mesh density. A tetrahedral meshing algorithm is proposed to create tetrahedral meshes from medical images in two steps, the first is surface triangulation, and the second is volume triangulation, both performed between adjacent sections. After the side surfaces between adjacent sections and the planar domains enclosed by sectional contours are triangulated, the solid domains between adjacent sections are tetrahedralized. An advancing front method is used, in which a front consisting of boundary triangles from the sections is maintained, and two group operators are used to construct a group of tetrahedra at a time. The solid domain is discretized efficiently, and the quality of the generated tetrahedra is flexibly controlled due to the strategy of choosing a generation triangle.",
"fno": "3494b414",
"keywords": [
"Mesh Generation",
"Tetrahedra",
"Contours"
],
"authors": [
{
"affiliation": null,
"fullName": "Xin Chen",
"givenName": "Xin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jie Shen",
"givenName": "Jie",
"surname": "Shen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isise",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "414-417",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3494-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3494b410",
"articleId": "12OmNs0C9TV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3494b418",
"articleId": "12OmNviZlxy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isvd/2010/4112/0/4112a215",
"title": "Guaranteed Quality Tetrahedral Delaunay Meshing for Medical Images",
"doi": null,
"abstractUrl": "/proceedings-article/isvd/2010/4112a215/12OmNBOCWjK",
"parentPublication": {
"id": "proceedings/isvd/2010/4112/0",
"title": "2010 International Symposium on Voronoi Diagrams in Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1999/5897/0/00809868",
"title": "Tetrahedral mesh compression with the cut-border machine",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1999/00809868/12OmNBr4ev1",
"parentPublication": {
"id": "proceedings/visual/1999/5897/0",
"title": "Proceedings Visualization '99",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2009/3804/4/3804e525",
"title": "Generating Tetrahedral Mesh for Manifold Surface Model",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804e525/12OmNBvkdky",
"parentPublication": {
"id": "proceedings/icicta/2009/3804/4",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smi/2001/0853/0/08530286",
"title": "Constant-Time Neighbor Finding in Hierarchical Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/smi/2001/08530286/12OmNC943F8",
"parentPublication": {
"id": "proceedings/smi/2001/0853/0",
"title": "Shape Modeling and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1998/9176/0/91760287",
"title": "Simplification of Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1998/91760287/12OmNCeK2eN",
"parentPublication": {
"id": "proceedings/ieee-vis/1998/9176/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdcat/2016/5081/0/07943387",
"title": "Tetrahedral Mesh Segmentation Based on Quality Criteria",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943387/12OmNwM6A1x",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1999/5897/0/58970009",
"title": "Tetrahedral Mesh Compression with the Cut-Border Machine",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/58970009/12OmNxeM46m",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498chopra",
"title": "TetFusion: An Algorithm For Rapid Tetrahedral Mesh Simplification",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498chopra/12OmNyQphf1",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620213",
"title": "Efficient subdivision of finite-element datasets into consistent tetrahedra",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620213/12OmNzG4gA4",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/03/v0224",
"title": "Simplification of Tetrahedral Meshes with Error Bounds",
"doi": null,
"abstractUrl": "/journal/tg/1999/03/v0224/13rRUxly95r",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxYUS",
"title": "2008 International Conference on Computer Science and Information Technology",
"acronym": "iccsit",
"groupId": "1002437",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBp52IP",
"doi": "10.1109/ICCSIT.2008.41",
"title": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"normalizedTitle": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"abstract": "A hybrid approach for the surface segmentation of sparse triangle meshes is presented. The algorithm realizes the segmentation of different regions of triangle meshes through twice segmentations phases: in the first phase, for sparse mesh regions, the edge-based method is used after combination of planes according to the variance of the dihedral angle of normal vector; in the second phase, for other surfaces, the vertex-based method is used to calculate approximate curvature of every triangle mesh according to the evaluated curvature of vertices, and then to realize segmentation through region growing method. Satisfactory results have been achieved for sparse meshes of mechanical CAD models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A hybrid approach for the surface segmentation of sparse triangle meshes is presented. The algorithm realizes the segmentation of different regions of triangle meshes through twice segmentations phases: in the first phase, for sparse mesh regions, the edge-based method is used after combination of planes according to the variance of the dihedral angle of normal vector; in the second phase, for other surfaces, the vertex-based method is used to calculate approximate curvature of every triangle mesh according to the evaluated curvature of vertices, and then to realize segmentation through region growing method. Satisfactory results have been achieved for sparse meshes of mechanical CAD models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A hybrid approach for the surface segmentation of sparse triangle meshes is presented. The algorithm realizes the segmentation of different regions of triangle meshes through twice segmentations phases: in the first phase, for sparse mesh regions, the edge-based method is used after combination of planes according to the variance of the dihedral angle of normal vector; in the second phase, for other surfaces, the vertex-based method is used to calculate approximate curvature of every triangle mesh according to the evaluated curvature of vertices, and then to realize segmentation through region growing method. Satisfactory results have been achieved for sparse meshes of mechanical CAD models.",
"fno": "3308a669",
"keywords": [
"Surface Segmentation",
"Sparse Meshes",
"Triangle Mesh",
"Algorithm"
],
"authors": [
{
"affiliation": null,
"fullName": "Fangmin Dong",
"givenName": "Fangmin",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rui Zhang",
"givenName": "Rui",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yong Liu",
"givenName": "Yong",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccsit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-08-01T00:00:00",
"pubType": "proceedings",
"pages": "669-674",
"year": "2008",
"issn": null,
"isbn": "978-0-7695-3308-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3308a665",
"articleId": "12OmNzkuKLl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3308a675",
"articleId": "12OmNwekjws",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/gmp/2002/1674/0/16740119",
"title": "Fair Triangle Mesh Generation with Discrete Elastica",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2002/16740119/12OmNAhxjFi",
"parentPublication": {
"id": "proceedings/gmp/2002/1674/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620379",
"title": "Smooth hierarchical surface triangulations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620379/12OmNqH9hgj",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2009/3592/0/3592a420",
"title": "Out-of-Core Progressive Lossless Compression and Selective Decompression of Large Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2009/3592a420/12OmNs5rkQI",
"parentPublication": {
"id": "proceedings/dcc/2009/3592/0",
"title": "2009 Data Compression Conference. DCC 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a110",
"title": "ESQ: Editable SQuad Representation for Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a110/12OmNxR5UPg",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2009/3813/0/3813a072",
"title": "Surface Reconstruction: An Improved Marching Triangle Algorithm for Scalar and Vector Implicit Field Representations",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2009/3813a072/12OmNy2Jt55",
"parentPublication": {
"id": "proceedings/sibgrapi/2009/3813/0",
"title": "2009 XXII Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/4/3305d590",
"title": "Partition Triangle Meshes into Coarsely Quadrangular Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/3305d590/12OmNzBOi0N",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/4",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2009/3883/0/3883a577",
"title": "Construct G1 Smooth Surface by Using Triangular Gregory Patches",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2009/3883a577/12OmNzdoMtX",
"parentPublication": {
"id": "proceedings/icig/2009/3883/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/01/v0079",
"title": "Compressed Progressive Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2000/01/v0079/13rRUwhpBNZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/04/v0308",
"title": "Partitioning 3D Surface Meshes Using Watershed Segmentation",
"doi": null,
"abstractUrl": "/journal/tg/1999/04/v0308/13rRUxly9dI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/02/v0145",
"title": "Constructing Hierarchies for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1998/02/v0145/13rRUy0qnGc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxQOjzD",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqH9hgj",
"doi": "10.1109/VISUAL.1997.663906",
"title": "Smooth hierarchical surface triangulations",
"normalizedTitle": "Smooth hierarchical surface triangulations",
"abstract": "Presents a new method to produce a hierarchical set of triangle meshes that can be used to blend different levels of detail in a smooth fashion. The algorithm produces a sequence of meshes /spl Mscr//sub 0/, /spl Mscr//sub 1/, /spl Mscr//sub 2/..., /spl Mscr//sub n/, where each mesh /spl Mscr//sub i/ can be transformed to mesh /spl Mscr//sub i+1/ through a set of triangle-collapse operations. For each triangle, a function is generated that approximates the underlying surface in the area of the triangle, and this function serves as a basis for assigning a weight to the triangle in the ordering operation, and for supplying the point to which the triangles are collapsed. This technique allows us to view a triangulated surface model at varying levels of detail while insuring that the simplified mesh approximates the original surface well.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents a new method to produce a hierarchical set of triangle meshes that can be used to blend different levels of detail in a smooth fashion. The algorithm produces a sequence of meshes /spl Mscr//sub 0/, /spl Mscr//sub 1/, /spl Mscr//sub 2/..., /spl Mscr//sub n/, where each mesh /spl Mscr//sub i/ can be transformed to mesh /spl Mscr//sub i+1/ through a set of triangle-collapse operations. For each triangle, a function is generated that approximates the underlying surface in the area of the triangle, and this function serves as a basis for assigning a weight to the triangle in the ordering operation, and for supplying the point to which the triangles are collapsed. This technique allows us to view a triangulated surface model at varying levels of detail while insuring that the simplified mesh approximates the original surface well.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents a new method to produce a hierarchical set of triangle meshes that can be used to blend different levels of detail in a smooth fashion. The algorithm produces a sequence of meshes /spl Mscr//sub 0/, /spl Mscr//sub 1/, /spl Mscr//sub 2/..., /spl Mscr//sub n/, where each mesh /spl Mscr//sub i/ can be transformed to mesh /spl Mscr//sub i+1/ through a set of triangle-collapse operations. For each triangle, a function is generated that approximates the underlying surface in the area of the triangle, and this function serves as a basis for assigning a weight to the triangle in the ordering operation, and for supplying the point to which the triangles are collapsed. This technique allows us to view a triangulated surface model at varying levels of detail while insuring that the simplified mesh approximates the original surface well.",
"fno": "82620379",
"keywords": [
"Mesh Generation Smooth Hierarchical Surface Triangulations Triangle Meshes Level Of Detail Blending Mesh Sequence Mesh Transformation Triangle Collapse Operations Function Generation Underlying Surface Approximation Weight Assignment Ordering Operation Triangulated Surface Model Mesh Simplification Level Of Detail Representation Shape Approximation Data Visualization"
],
"authors": [
{
"affiliation": "Center for Image Process. & Integrated Comput., California Univ., Davis, CA, USA",
"fullName": "T.S. Gieng",
"givenName": "T.S.",
"surname": "Gieng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Image Process. & Integrated Comput., California Univ., Davis, CA, USA",
"fullName": "B. Hamann",
"givenName": "B.",
"surname": "Hamann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Image Process. & Integrated Comput., California Univ., Davis, CA, USA",
"fullName": "K.I. Joy",
"givenName": "K.I.",
"surname": "Joy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Image Process. & Integrated Comput., California Univ., Davis, CA, USA",
"fullName": "G.L. Schussman",
"givenName": "G.L.",
"surname": "Schussman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Image Process. & Integrated Comput., California Univ., Davis, CA, USA",
"fullName": "I.J. Trotts",
"givenName": "I.J.",
"surname": "Trotts",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-10-01T00:00:00",
"pubType": "proceedings",
"pages": "379",
"year": "1997",
"issn": "1070-2385",
"isbn": "0-8186-8262-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "82620371",
"articleId": "12OmNCbCrIi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "82620387",
"articleId": "12OmNrAMEQ3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx8wTfK",
"title": "Proceedings. Computer Graphics International",
"acronym": "cgi",
"groupId": "1000132",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvA1h9i",
"doi": "10.1109/CGI.2004.1309222",
"title": "Memory Efficient Adjacent Triangle Connectivity of a Vertex Using Triangle Strips",
"normalizedTitle": "Memory Efficient Adjacent Triangle Connectivity of a Vertex Using Triangle Strips",
"abstract": "We often need to refer to adjacent elements (e.g., vertices, edges and triangles) in triangle meshes for rendering, mesh simplification and other processes. It is, however, sometimes impossible to prepare the enormous memory needed to represent element connectivity in gigantic triangle meshes. This paper proposes a new scheme for referring to adjacent triangles around a vertex in non-manifold triangle meshes. First, we introduce the constraints to allow random access to a triangle in a sequence of triangle strips. Then, for each vertex, we construct a list of references to its adjacent strips as a representation of triangle connectivity. Experimental results show that, compared to conventional methods, our scheme can reduce the total size of a triangle mesh and adjacent triangle connectivity to about 50%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We often need to refer to adjacent elements (e.g., vertices, edges and triangles) in triangle meshes for rendering, mesh simplification and other processes. It is, however, sometimes impossible to prepare the enormous memory needed to represent element connectivity in gigantic triangle meshes. This paper proposes a new scheme for referring to adjacent triangles around a vertex in non-manifold triangle meshes. First, we introduce the constraints to allow random access to a triangle in a sequence of triangle strips. Then, for each vertex, we construct a list of references to its adjacent strips as a representation of triangle connectivity. Experimental results show that, compared to conventional methods, our scheme can reduce the total size of a triangle mesh and adjacent triangle connectivity to about 50%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We often need to refer to adjacent elements (e.g., vertices, edges and triangles) in triangle meshes for rendering, mesh simplification and other processes. It is, however, sometimes impossible to prepare the enormous memory needed to represent element connectivity in gigantic triangle meshes. This paper proposes a new scheme for referring to adjacent triangles around a vertex in non-manifold triangle meshes. First, we introduce the constraints to allow random access to a triangle in a sequence of triangle strips. Then, for each vertex, we construct a list of references to its adjacent strips as a representation of triangle connectivity. Experimental results show that, compared to conventional methods, our scheme can reduce the total size of a triangle mesh and adjacent triangle connectivity to about 50%.",
"fno": "21710278",
"keywords": [],
"authors": [
{
"affiliation": "Ricoh Company, Ltd.",
"fullName": "Hidekuni Annaka",
"givenName": "Hidekuni",
"surname": "Annaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ricoh Company, Ltd.",
"fullName": "Tsukasa Matsuoka",
"givenName": "Tsukasa",
"surname": "Matsuoka",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cgi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-06-01T00:00:00",
"pubType": "proceedings",
"pages": "278-281",
"year": "2004",
"issn": "1530-1052",
"isbn": "0-7695-2171-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "21710274",
"articleId": "12OmNrkjVpC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "21710284",
"articleId": "12OmNBOllne",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rt/2007/1629/0/04342586",
"title": "Ray-Strips: A Compact Mesh Representation for Interactive Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2007/04342586/12OmNASILFQ",
"parentPublication": {
"id": "proceedings/rt/2007/1629/0",
"title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2002/1521/0/15210380",
"title": "Concentric Strips: Algorithms and Architecture for the Compression/Decompression of Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2002/15210380/12OmNAoUTv6",
"parentPublication": {
"id": "proceedings/3dpvt/2002/1521/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1996/3673/0/36730319",
"title": "Optimizing Triangle Strips for Fast Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1996/36730319/12OmNBDgZ3b",
"parentPublication": {
"id": "proceedings/ieee-vis/1996/3673/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1999/5897/0/58970019",
"title": "Skip Strips: Maintaining Triangle Strips for View-dependent Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1999/58970019/12OmNBkxswI",
"parentPublication": {
"id": "proceedings/ieee-vis/1999/5897/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2003/2028/0/20280271",
"title": "DStrips: Dynamic Triangle Strips for Real-Time Mesh Simplification and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2003/20280271/12OmNCdTeN9",
"parentPublication": {
"id": "proceedings/pg/2003/2028/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsa/2010/3999/0/3999a135",
"title": "Adaptive Keyframing Animation on the GPU Using Triangle Strips",
"doi": null,
"abstractUrl": "/proceedings-article/iccsa/2010/3999a135/12OmNx0RIZW",
"parentPublication": {
"id": "proceedings/iccsa/2010/3999/0",
"title": "2010 International Conference on Computational Science and Its Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2004/8781/0/87810071",
"title": "Texture-Encoded Tetrahedral Strips",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2004/87810071/12OmNxwnctV",
"parentPublication": {
"id": "proceedings/vv/2004/8781/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2004/2171/0/21710002",
"title": "Multi-Path Algorithm for Triangle Strips",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2004/21710002/12OmNykkB84",
"parentPublication": {
"id": "proceedings/cgi/2004/2171/0",
"title": "Proceedings. Computer Graphics International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2007/2929/0/29290966",
"title": "Connectivity Compression for Stripified Triangle Meshes Using Basic Strips",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2007/29290966/12OmNzzxuuQ",
"parentPublication": {
"id": "proceedings/icig/2007/2929/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/01/v0047",
"title": "Edgebreaker: Connectivity Compression for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1999/01/v0047/13rRUILLkve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqG0SXH",
"title": "Computer Graphics and Applications, Pacific Conference on",
"acronym": "pg",
"groupId": "1000130",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwdtwaQ",
"doi": "10.1109/PCCGA.2000.883946",
"title": "Efficient Coding of Non-Triangular Mesh Connectivity",
"normalizedTitle": "Efficient Coding of Non-Triangular Mesh Connectivity",
"abstract": "We describe an efficient algorithm for coding the connectivity information of general polygon meshes. In contrast to most existing algorithms which are suitable only for triangular meshes, and pay a penalty for treatment of non-triangular faces, this algorithm codes the connectivity information in a direct manner. Our treatment of the special case of triangular meshes is shown to be equivalent to the Edgebreaker algorithm. Using our methods, any triangle mesh may be coded in no more than 2 bits/triangle (approximately 4 bits/vertex), a quadrilateral mesh in no more than 3.5 bits/quad (approximately 3.5 bits/vertex), and the most common case of a quad mesh with few triangles in no more than 4 bits/polygon.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe an efficient algorithm for coding the connectivity information of general polygon meshes. In contrast to most existing algorithms which are suitable only for triangular meshes, and pay a penalty for treatment of non-triangular faces, this algorithm codes the connectivity information in a direct manner. Our treatment of the special case of triangular meshes is shown to be equivalent to the Edgebreaker algorithm. Using our methods, any triangle mesh may be coded in no more than 2 bits/triangle (approximately 4 bits/vertex), a quadrilateral mesh in no more than 3.5 bits/quad (approximately 3.5 bits/vertex), and the most common case of a quad mesh with few triangles in no more than 4 bits/polygon.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe an efficient algorithm for coding the connectivity information of general polygon meshes. In contrast to most existing algorithms which are suitable only for triangular meshes, and pay a penalty for treatment of non-triangular faces, this algorithm codes the connectivity information in a direct manner. Our treatment of the special case of triangular meshes is shown to be equivalent to the Edgebreaker algorithm. Using our methods, any triangle mesh may be coded in no more than 2 bits/triangle (approximately 4 bits/vertex), a quadrilateral mesh in no more than 3.5 bits/quad (approximately 3.5 bits/vertex), and the most common case of a quad mesh with few triangles in no more than 4 bits/polygon.",
"fno": "08680235",
"keywords": [
"Mesh Compression",
"Coding"
],
"authors": [
{
"affiliation": "Technion- Israel Institute of Technology",
"fullName": "Boris Kronrod",
"givenName": "Boris",
"surname": "Kronrod",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technion- Israel Institute of Technology",
"fullName": "Craig Gotsman",
"givenName": "Craig",
"surname": "Gotsman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pg",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-10-01T00:00:00",
"pubType": "proceedings",
"pages": "235",
"year": "2000",
"issn": null,
"isbn": "0-7695-0868-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08680225",
"articleId": "12OmNrNh0BN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08680243",
"articleId": "12OmNBZpH8t",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzzxuxr",
"title": "4th International Conference on Digital Home (ICDH)",
"acronym": "icdh",
"groupId": "1802037",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxETa4O",
"doi": "10.1109/ICDH.2012.16",
"title": "2D Shape Manipulation Using Equilateral Triangle Mesh",
"normalizedTitle": "2D Shape Manipulation Using Equilateral Triangle Mesh",
"abstract": "This paper presents an improved algorithm to achieve 2D image shape manipulation by real-time control. The shape is discretized as regular equilateral triangles mesh. User places handles on triangle vertices and manipulates the shape by dragging the handles to the desired positions. Meanwhile the concept of mesh-step is proposed to control mesh density. At last, based on the algorithm of Igarashi et al. [1], a better approach of shape deformation is provided because of the equilateral triangle attributes. Our algorithm could provide a better user experience when placing handles and a more reasonable result of 2D shape manipulation will be obtained especially when the shape contains regular factors. Simultaneously, the running speed advantage of our algorithm is more obviously with the mesh vertices increasing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an improved algorithm to achieve 2D image shape manipulation by real-time control. The shape is discretized as regular equilateral triangles mesh. User places handles on triangle vertices and manipulates the shape by dragging the handles to the desired positions. Meanwhile the concept of mesh-step is proposed to control mesh density. At last, based on the algorithm of Igarashi et al. [1], a better approach of shape deformation is provided because of the equilateral triangle attributes. Our algorithm could provide a better user experience when placing handles and a more reasonable result of 2D shape manipulation will be obtained especially when the shape contains regular factors. Simultaneously, the running speed advantage of our algorithm is more obviously with the mesh vertices increasing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an improved algorithm to achieve 2D image shape manipulation by real-time control. The shape is discretized as regular equilateral triangles mesh. User places handles on triangle vertices and manipulates the shape by dragging the handles to the desired positions. Meanwhile the concept of mesh-step is proposed to control mesh density. At last, based on the algorithm of Igarashi et al. [1], a better approach of shape deformation is provided because of the equilateral triangle attributes. Our algorithm could provide a better user experience when placing handles and a more reasonable result of 2D shape manipulation will be obtained especially when the shape contains regular factors. Simultaneously, the running speed advantage of our algorithm is more obviously with the mesh vertices increasing.",
"fno": "4899a432",
"keywords": [
"Mesh Edit",
"Interaction",
"Deformation",
"Shape Manipulation",
"Equilateral Triangle"
],
"authors": [
{
"affiliation": null,
"fullName": "Yong Jie Shi",
"givenName": "Yong Jie",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Xu",
"givenName": "Dan",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdh",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "432-437",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1348-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4899a426",
"articleId": "12OmNxiKrZZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4899a438",
"articleId": "12OmNs0TL2p",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/gmp/2002/1674/0/16740119",
"title": "Fair Triangle Mesh Generation with Discrete Elastica",
"doi": null,
"abstractUrl": "/proceedings-article/gmp/2002/16740119/12OmNAhxjFi",
"parentPublication": {
"id": "proceedings/gmp/2002/1674/0",
"title": "Geometric Modeling and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmsp/2011/4356/1/4356a184",
"title": "Consistent Mesh Segmentation Using Protrusion Function and Graph Cut",
"doi": null,
"abstractUrl": "/proceedings-article/cmsp/2011/4356a184/12OmNBW0vCO",
"parentPublication": {
"id": "proceedings/cmsp/2011/4356/1",
"title": "Multimedia and Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a669",
"title": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a669/12OmNBp52IP",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icycs/2008/3398/0/3398b373",
"title": "Robust Feature Extraction for the Composite Surface Mesh from STL File",
"doi": null,
"abstractUrl": "/proceedings-article/icycs/2008/3398b373/12OmNCesr9G",
"parentPublication": {
"id": "proceedings/icycs/2008/3398/0",
"title": "2008 9th International Conference for Young Computer Scientists",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2010/4360/0/4360a202",
"title": "Direct Manipulation of 3D Mesh Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2010/4360a202/12OmNx1qUZ2",
"parentPublication": {
"id": "proceedings/isise/2010/4360/0",
"title": "2010 Third International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2004/2127/0/21270076",
"title": "Shape from Contours and Multiple Stereo — A Hierarchical, Mesh-Based Approach",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2004/21270076/12OmNxEjXWf",
"parentPublication": {
"id": "proceedings/crv/2004/2127/0",
"title": "First Canadian Conference on Computer and Robot Vision, 2004. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2009/3813/0/3813a072",
"title": "Surface Reconstruction: An Improved Marching Triangle Algorithm for Scalar and Vector Implicit Field Representations",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2009/3813a072/12OmNy2Jt55",
"parentPublication": {
"id": "proceedings/sibgrapi/2009/3813/0",
"title": "2009 XXII Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/1999/0210/0/02100280",
"title": "Triangle Mesh Compression for Fast Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100280/12OmNyqRnhM",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010045",
"title": "Linear Correlations between Spatial and Normal Noise in Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010045/13rRUxASuGj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/02/v0145",
"title": "Constructing Hierarchies for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1998/02/v0145/13rRUy0qnGc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNro0Ib9",
"title": "Volume Graphics 2005",
"acronym": "vg",
"groupId": "1002149",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzT7Otj",
"doi": "10.1109/VG.2005.194111",
"title": "Robust generation of signed distance fields from triangle meshes",
"normalizedTitle": "Robust generation of signed distance fields from triangle meshes",
"abstract": "A new method for robust generation of distance fields from triangle meshes is presented. Graphics hardware is used to accelerate a technique for generating layered depth images. From multiple layered depth images, a binary volume and a point representation are extracted. The point information is then used to convert the binary volume into a distance field. The method is robust and handles holes, spurious triangles and ambiguities. Moreover, the method lends itself to Boolean operations between solids. Since a point cloud as well as a signed distance is generated, it is possible to extract an iso-surface of the distance field and fit it to the point set. Using this method, one may recover sharp edge information. Examples are given where the method for generating distance fields coupled with mesh fitting is used to perform Boolean and morphological operations on triangle meshes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A new method for robust generation of distance fields from triangle meshes is presented. Graphics hardware is used to accelerate a technique for generating layered depth images. From multiple layered depth images, a binary volume and a point representation are extracted. The point information is then used to convert the binary volume into a distance field. The method is robust and handles holes, spurious triangles and ambiguities. Moreover, the method lends itself to Boolean operations between solids. Since a point cloud as well as a signed distance is generated, it is possible to extract an iso-surface of the distance field and fit it to the point set. Using this method, one may recover sharp edge information. Examples are given where the method for generating distance fields coupled with mesh fitting is used to perform Boolean and morphological operations on triangle meshes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A new method for robust generation of distance fields from triangle meshes is presented. Graphics hardware is used to accelerate a technique for generating layered depth images. From multiple layered depth images, a binary volume and a point representation are extracted. The point information is then used to convert the binary volume into a distance field. The method is robust and handles holes, spurious triangles and ambiguities. Moreover, the method lends itself to Boolean operations between solids. Since a point cloud as well as a signed distance is generated, it is possible to extract an iso-surface of the distance field and fit it to the point set. Using this method, one may recover sharp edge information. Examples are given where the method for generating distance fields coupled with mesh fitting is used to perform Boolean and morphological operations on triangle meshes.",
"fno": "01500538",
"keywords": [
"Mesh Generation",
"Boolean Algebra",
"Computational Geometry",
"Computer Graphics",
"Data Structures",
"Robust Generation",
"Signed Distance Fields",
"Triangle Meshes",
"Robust Distance Field Generation",
"Mesh Generation",
"Graphics Hardware",
"Multiple Layered Depth Images",
"Binary Volume Extraction",
"Point Representation Extraction",
"Boolean Operations",
"Edge Information",
"Mesh Fitting",
"Boolean Algebra",
"Computational Geometry",
"Computer Graphics",
"Data Structures",
"Robustness",
"Data Mining",
"Graphics",
"Hardware",
"Acceleration",
"Image Generation",
"Image Converters",
"Solids",
"Clouds",
"Mesh Generation"
],
"authors": [
{
"affiliation": "Dept. of Informatics & Math. Modelling, Denmark Tech. Univ., Denmark",
"fullName": "J.A. Baerentzen",
"givenName": "J.A.",
"surname": "Baerentzen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-03-01T00:00:00",
"pubType": "proceedings",
"pages": "167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239",
"year": "2005",
"issn": "1727-8376",
"isbn": "3-905673-26-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01500537",
"articleId": "12OmNyywxC8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01500539",
"articleId": "12OmNC2OSFK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dpvt/2002/1521/0/15210380",
"title": "Concentric Strips: Algorithms and Architecture for the Compression/Decompression of Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2002/15210380/12OmNAoUTv6",
"parentPublication": {
"id": "proceedings/3dpvt/2002/1521/0",
"title": "3D Data Processing Visualization and Transmission, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dpvt/2006/2825/0/282500358",
"title": "Linking Feature Lines on 3D Triangle Meshes with Artificial Potential Fields",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/282500358/12OmNBlXs3j",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a669",
"title": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a669/12OmNBp52IP",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300012",
"title": "Signed Distance Transform Using Graphics Hardware",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300012/12OmNCwCLtC",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/4/3305d590",
"title": "Partition Triangle Meshes into Coarsely Quadrangular Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/3305d590/12OmNzBOi0N",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/4",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2007/2929/0/29290966",
"title": "Connectivity Compression for Stripified Triangle Meshes Using Basic Strips",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2007/29290966/12OmNzzxuuQ",
"parentPublication": {
"id": "proceedings/icig/2007/2929/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/01/v0047",
"title": "Edgebreaker: Connectivity Compression for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1999/01/v0047/13rRUILLkve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/03/v0243",
"title": "Signed Distance Computation Using the Angle Weighted Pseudonormal",
"doi": null,
"abstractUrl": "/journal/tg/2005/03/v0243/13rRUxYINf3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/02/v0145",
"title": "Constructing Hierarchies for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1998/02/v0145/13rRUy0qnGc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx0A7K1",
"title": "Face and Gesture 2011",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAoUTky",
"doi": "10.1109/FG.2011.5771428",
"title": "Prop-free pointing detection in dynamic cluttered environments",
"normalizedTitle": "Prop-free pointing detection in dynamic cluttered environments",
"abstract": "Vision-based prop-free pointing detection is challenging both from an algorithmic and a systems standpoint. From a computer vision perspective, accurately determining where multiple users are pointing is difficult in cluttered environments with dynamic scene content. Standard approaches relying on appearance models or background subtraction to segment users operate poorly in this domain. We propose a method that focuses on motion analysis to detect pointing gestures and robustly estimate the pointing direction. Our algorithm is self-initializing; as the user points, we analyze the observed motion from two cameras and infer rotation centers that best explain the observed motion. From these, we group pixel-level flow into dominant pointing vectors that each originate from a rotation center and merge across views to obtain 3D pointing vectors. However, our proposed algorithm is computationally expensive, posing systems challenges even with current computing infrastructure. We achieve interactive speeds by exploiting coarse-grained parallelization over a cluster of computers. In unconstrained environments, we obtain an average angular precision of 2.7°.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Vision-based prop-free pointing detection is challenging both from an algorithmic and a systems standpoint. From a computer vision perspective, accurately determining where multiple users are pointing is difficult in cluttered environments with dynamic scene content. Standard approaches relying on appearance models or background subtraction to segment users operate poorly in this domain. We propose a method that focuses on motion analysis to detect pointing gestures and robustly estimate the pointing direction. Our algorithm is self-initializing; as the user points, we analyze the observed motion from two cameras and infer rotation centers that best explain the observed motion. From these, we group pixel-level flow into dominant pointing vectors that each originate from a rotation center and merge across views to obtain 3D pointing vectors. However, our proposed algorithm is computationally expensive, posing systems challenges even with current computing infrastructure. We achieve interactive speeds by exploiting coarse-grained parallelization over a cluster of computers. In unconstrained environments, we obtain an average angular precision of 2.7°.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Vision-based prop-free pointing detection is challenging both from an algorithmic and a systems standpoint. From a computer vision perspective, accurately determining where multiple users are pointing is difficult in cluttered environments with dynamic scene content. Standard approaches relying on appearance models or background subtraction to segment users operate poorly in this domain. We propose a method that focuses on motion analysis to detect pointing gestures and robustly estimate the pointing direction. Our algorithm is self-initializing; as the user points, we analyze the observed motion from two cameras and infer rotation centers that best explain the observed motion. From these, we group pixel-level flow into dominant pointing vectors that each originate from a rotation center and merge across views to obtain 3D pointing vectors. However, our proposed algorithm is computationally expensive, posing systems challenges even with current computing infrastructure. We achieve interactive speeds by exploiting coarse-grained parallelization over a cluster of computers. In unconstrained environments, we obtain an average angular precision of 2.7°.",
"fno": "05771428",
"keywords": [
"Cameras",
"Computer Vision",
"Gesture Recognition",
"Image Recognition",
"Image Segmentation",
"Motion Estimation",
"Pattern Clustering",
"Dynamic Cluttered Environment",
"Vision Based Prop Free Pointing Detection",
"Computer Vision",
"Dynamic Scene Content",
"Appearance Model",
"Motion Analysis",
"Pointing Gesture",
"Pointing Direction",
"Cameras",
"Group Pixel Level Flow",
"3 D Dominant Pointing Vectors",
"Posing System",
"Current Computing Infrastructure",
"Coarse Grained Parallelization",
"Computer Cluster",
"Cameras",
"Three Dimensional Displays",
"Tracking",
"Robustness",
"Trajectory",
"Noise Measurement",
"Streaming Media"
],
"authors": [
{
"affiliation": "Robotics Institute, Carnegie Mellon University",
"fullName": "Pyry Matikainen",
"givenName": "Pyry",
"surname": "Matikainen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intel Labs Pittsburgh",
"fullName": "Padmanabhan Pillai",
"givenName": "Padmanabhan",
"surname": "Pillai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intel Labs Pittsburgh",
"fullName": "Lily Mummert",
"givenName": "Lily",
"surname": "Mummert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intel Labs Pittsburgh",
"fullName": "Rahul Sukthankar",
"givenName": "Rahul",
"surname": "Sukthankar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Robotics Institute, Carnegie Mellon University",
"fullName": "Martial Hebert",
"givenName": "Martial",
"surname": "Hebert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-03-01T00:00:00",
"pubType": "proceedings",
"pages": "374-381",
"year": "2011",
"issn": null,
"isbn": "978-1-4244-9140-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05771427",
"articleId": "12OmNqzcvNs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05771430",
"articleId": "12OmNyGtjd1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dpvt/2006/2825/0/282500065",
"title": "Motion Parallax without Motion Compensation in 3D Cluttered Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dpvt/2006/282500065/12OmNANBZiX",
"parentPublication": {
"id": "proceedings/3dpvt/2006/2825/0",
"title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118b274",
"title": "Subspace Tracking under Dynamic Dimensionality for Online Background Subtraction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b274/12OmNCmGNXY",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836015",
"title": "Robust tracking of articulated human movements through Component-Based Multiple Instance Learning with particle filtering",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836015/12OmNCyTyo0",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284817",
"title": "A Vision-Based Real-Time Pointing Arm Gesture Tracking and Recognition System",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284817/12OmNyRPgHT",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2008/2153/0/04813448",
"title": "Real-time 3D pointing gesture recognition in mobile space",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2008/04813448/12OmNzahc3t",
"parentPublication": {
"id": "proceedings/fg/2008/2153/0",
"title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08447552",
"title": "Pointing at Wiggle 3D Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08447552/13bd1tl2oml",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/06/mcg2009060034",
"title": "Efficient 3D Pointing Selection in Cluttered Virtual Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2009/06/mcg2009060034/13rRUx0xPCF",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000a922",
"title": "Research of Three Axis Tracking and Pointing Platform Servo Control System",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000a922/17D45WHONm2",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2019/1838/0/183800a174",
"title": "Commodifying Pointing in HRI: Simple and Fast Pointing Gesture Detection from RGB-D Images",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2019/183800a174/1cMGvsmiKKk",
"parentPublication": {
"id": "proceedings/crv/2019/1838/0",
"title": "2019 16th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150769",
"title": "Fine grained pointing recognition for natural drone guidance",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150769/1lPHtf3JGb6",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNynsbxl",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "1",
"displayVolume": "1",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBOCWxW",
"doi": "10.1109/3DV.2014.91",
"title": "Fast 3D Object Alignment from Depth Image with 3D Fourier Moment Matching on GPU",
"normalizedTitle": "Fast 3D Object Alignment from Depth Image with 3D Fourier Moment Matching on GPU",
"abstract": "In this paper, we develop a fast and accurate 3D object alignment system which can be applied to detect objects and estimate their 3D pose from a depth image containing cluttered background. The proposed 3D alignment system consists of two main algorithms: the first is the 3D detection algorithm to detect the top-level object from a depth map of the cluttered 3D objects, and the second is the 3D Fourier based point-set alignment algorithm to estimate the 3D object pose from an input depth image. We also implement the proposed 3D alignment algorithm on a GPU computing platform to speed up the computation of the object detection and Fourier-based image alignment algorithms in order to align the 3D object in real time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we develop a fast and accurate 3D object alignment system which can be applied to detect objects and estimate their 3D pose from a depth image containing cluttered background. The proposed 3D alignment system consists of two main algorithms: the first is the 3D detection algorithm to detect the top-level object from a depth map of the cluttered 3D objects, and the second is the 3D Fourier based point-set alignment algorithm to estimate the 3D object pose from an input depth image. We also implement the proposed 3D alignment algorithm on a GPU computing platform to speed up the computation of the object detection and Fourier-based image alignment algorithms in order to align the 3D object in real time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we develop a fast and accurate 3D object alignment system which can be applied to detect objects and estimate their 3D pose from a depth image containing cluttered background. The proposed 3D alignment system consists of two main algorithms: the first is the 3D detection algorithm to detect the top-level object from a depth map of the cluttered 3D objects, and the second is the 3D Fourier based point-set alignment algorithm to estimate the 3D object pose from an input depth image. We also implement the proposed 3D alignment algorithm on a GPU computing platform to speed up the computation of the object detection and Fourier-based image alignment algorithms in order to align the 3D object in real time.",
"fno": "7000a179",
"keywords": [
"Three Dimensional Displays",
"Graphics Processing Units",
"Image Segmentation",
"Object Detection",
"Fourier Transforms",
"Parallel Processing",
"3 D Object Alignment",
"Fourier Moment",
"GPU"
],
"authors": [
{
"affiliation": "Inst. of Inf. Syst. & Applic., Nat. Tsing Hua Univ., Hsinchu, Taiwan",
"fullName": "Hong-Ren Su",
"givenName": null,
"surname": "Hong-Ren Su",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Inf. Syst. & Applic., Nat. Tsing Hua Univ., Hsinchu, Taiwan",
"fullName": "Hao-Yuan Kuo",
"givenName": null,
"surname": "Hao-Yuan Kuo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Nat. Tsing Hua Univ., Hsinchu, Taiwan",
"fullName": "Shang-Hong Lai",
"givenName": null,
"surname": "Shang-Hong Lai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ind. Technol. Res. Inst., Hsinchu, Taiwan",
"fullName": "Chin-Chia Wu",
"givenName": null,
"surname": "Chin-Chia Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "179-186",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-7000-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7000a171",
"articleId": "12OmNCf1DwE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7000a187",
"articleId": "12OmNx4Q6A8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/1993/3870/0/00378229",
"title": "Fast and robust 3D recognition by alignment",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378229/12OmNBVIUzX",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiki/2016/5952/0/5952a409",
"title": "The Range Alignment Algorithm for High-Resolution Range Profile Based on Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iiki/2016/5952a409/12OmNCctfbD",
"parentPublication": {
"id": "proceedings/iiki/2016/5952/0",
"title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a082",
"title": "Seam Carving for Color-Plus-Depth 3D Image",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a082/12OmNwDj0Y7",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031594",
"title": "FFTEB: Edge bundling of huge graphs by the Fast Fourier Transform",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031594/12OmNzuZUEs",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2017/03/07407334",
"title": "A Modified Multiple Alignment Fast Fourier Transform with Higher Efficiency",
"doi": null,
"abstractUrl": "/journal/tb/2017/03/07407334/13rRUwInvdz",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/05/ttp2013051039",
"title": "Algorithms for 3D Shape Scanning with a Depth Camera",
"doi": null,
"abstractUrl": "/journal/tp/2013/05/ttp2013051039/13rRUxNW1UZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/09/08031057",
"title": "Viewpoint-Consistent 3D Face Alignment",
"doi": null,
"abstractUrl": "/journal/tp/2018/09/08031057/13rRUxcbnDR",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2005/2281/2/01524317",
"title": "Rotational and Translational Alignment Errors in 3D Reconstruction of Virus Structures at High Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2005/01524317/1iES3DRBECA",
"parentPublication": {
"id": "proceedings/icpads/2005/2281/1",
"title": "Parallel and Distributed Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itca/2020/0378/0/037800a698",
"title": "Research on 3D Reconstruction Method Based on Temporal Fourier Transform Profilometry",
"doi": null,
"abstractUrl": "/proceedings-article/itca/2020/037800a698/1tpBlEK5j0Y",
"parentPublication": {
"id": "proceedings/itca/2020/0378/0",
"title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2021/2354/0/235400a041",
"title": "Gravity Alignment for Single Panorama Depth Inference",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2021/235400a041/1zurnPhyJig",
"parentPublication": {
"id": "proceedings/sibgrapi/2021/2354/0",
"title": "2021 34th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxA3Z6n",
"doi": "10.1109/CVPR.2016.608",
"title": "Motion from Structure (MfS): Searching for 3D Objects in Cluttered Point Trajectories",
"normalizedTitle": "Motion from Structure (MfS): Searching for 3D Objects in Cluttered Point Trajectories",
"abstract": "Object detection has been a long standing problem in computer vision, and state-of-the-art approaches rely on the use of sophisticated features and/or classifiers. However, these learning-based approaches heavily depend on the quality and quantity of labeled data, and do not generalize well to extreme poses or textureless objects. In this work, we explore the use of 3D shape models to detect objects in videos in an unsupervised manner. We call this problem Motion from Structure (MfS): given a set of point trajectories and a 3D model of the object of interest, find a subset of trajectories that correspond to the 3D model and estimate its alignment (i.e., compute the motion matrix). MfS is related to Structure from Motion (SfM) and motion segmentation problems: unlike SfM, the structure of the object is known but the correspondence between the trajectories and the object is unknown, unlike motion segmentation, the MfS problem incorporates 3D structure, providing robustness to tracking mismatches and outliers. Experiments illustrate how our MfS algorithm outperforms alternative approaches in both synthetic data and real videos extracted from YouTube.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Object detection has been a long standing problem in computer vision, and state-of-the-art approaches rely on the use of sophisticated features and/or classifiers. However, these learning-based approaches heavily depend on the quality and quantity of labeled data, and do not generalize well to extreme poses or textureless objects. In this work, we explore the use of 3D shape models to detect objects in videos in an unsupervised manner. We call this problem Motion from Structure (MfS): given a set of point trajectories and a 3D model of the object of interest, find a subset of trajectories that correspond to the 3D model and estimate its alignment (i.e., compute the motion matrix). MfS is related to Structure from Motion (SfM) and motion segmentation problems: unlike SfM, the structure of the object is known but the correspondence between the trajectories and the object is unknown, unlike motion segmentation, the MfS problem incorporates 3D structure, providing robustness to tracking mismatches and outliers. Experiments illustrate how our MfS algorithm outperforms alternative approaches in both synthetic data and real videos extracted from YouTube.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Object detection has been a long standing problem in computer vision, and state-of-the-art approaches rely on the use of sophisticated features and/or classifiers. However, these learning-based approaches heavily depend on the quality and quantity of labeled data, and do not generalize well to extreme poses or textureless objects. In this work, we explore the use of 3D shape models to detect objects in videos in an unsupervised manner. We call this problem Motion from Structure (MfS): given a set of point trajectories and a 3D model of the object of interest, find a subset of trajectories that correspond to the 3D model and estimate its alignment (i.e., compute the motion matrix). MfS is related to Structure from Motion (SfM) and motion segmentation problems: unlike SfM, the structure of the object is known but the correspondence between the trajectories and the object is unknown, unlike motion segmentation, the MfS problem incorporates 3D structure, providing robustness to tracking mismatches and outliers. Experiments illustrate how our MfS algorithm outperforms alternative approaches in both synthetic data and real videos extracted from YouTube.",
"fno": "8851f639",
"keywords": [
"Computer Vision",
"Image Motion Analysis",
"Image Segmentation",
"Matrix Algebra",
"Object Detection",
"Shape Recognition",
"Solid Modelling",
"Video Signal Processing",
"Motion From Structure",
"Mf S",
"3 D Objects Searching",
"Cluttered Point Trajectories",
"Object Detection",
"Computer Vision",
"3 D Shape Models",
"Videos",
"Alignment Estimation",
"Motion Matrix",
"Structure From Motion",
"Sf M",
"Motion Segmentation",
"Object Trajectories",
"3 D Structure",
"Three Dimensional Displays",
"Solid Modeling",
"Trajectory",
"Tracking",
"Shape",
"Videos",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Inst. Super. Tecnico, Lisbon, Portugal",
"fullName": "Jayakorn Vongkulbhisal",
"givenName": "Jayakorn",
"surname": "Vongkulbhisal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. Super. Tecnico, Lisbon, Portugal",
"fullName": "Ricardo Cabral",
"givenName": "Ricardo",
"surname": "Cabral",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "Fernando De la Torre",
"givenName": "Fernando",
"surname": "De la Torre",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. Super. Tecnico, Lisbon, Portugal",
"fullName": "João P. Costeira",
"givenName": "João P.",
"surname": "Costeira",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "5639-5647",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8851f629",
"articleId": "12OmNxGja3K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8851f648",
"articleId": "12OmNwcUk34",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391b653",
"title": "Unsupervised Tube Extraction Using Transductive Learning and Dense Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b653/12OmNB6UIbH",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2016/3811/0/07738057",
"title": "Multiple object tracking based on motion segmentation of point trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2016/07738057/12OmNC4eSzl",
"parentPublication": {
"id": "proceedings/avss/2016/3811/0",
"title": "2016 13th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mnrao/1994/6435/0/00346247",
"title": "A general approach for determining 3D motion and structure of multiple objects from image trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/mnrao/1994/00346247/12OmNCgJebE",
"parentPublication": {
"id": "proceedings/mnrao/1994/6435/0",
"title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2016/5698/0/07907440",
"title": "Action Recognition for Videos by Long-Term Point Trajectory Analysis with Background Removal",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2016/07907440/12OmNy5zsrN",
"parentPublication": {
"id": "proceedings/sitis/2016/5698/0",
"title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457649",
"title": "Acquiring 3D motion trajectories of large numbers of swarming animals",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457649/12OmNyTfg8y",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a148",
"title": "Multi-Body Non-Rigid Structure-from-Motion",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a148/12OmNzIl3Bz",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/10/ttp2011102051",
"title": "Computing Smooth Time Trajectories for Camera and Deformable Shape in Structure from Motion with Occlusion",
"doi": null,
"abstractUrl": "/journal/tp/2011/10/ttp2011102051/13rRUwkxc6D",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/04/06888516",
"title": "Stereo Reconstruction of Droplet Flight Trajectories",
"doi": null,
"abstractUrl": "/journal/tp/2015/04/06888516/13rRUx0gegA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/08/07399418",
"title": "Spatio-Temporal Matching for Human Pose Estimation in Video",
"doi": null,
"abstractUrl": "/journal/tp/2016/08/07399418/13rRUxd2aZV",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300f389",
"title": "Joint Monocular 3D Vehicle Detection and Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300f389/1hVlG5kXmso",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqH9hnp",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyxXlju",
"doi": "10.1109/CVPR.2016.67",
"title": "Detection and Accurate Localization of Circular Fiducials under Highly Challenging Conditions",
"normalizedTitle": "Detection and Accurate Localization of Circular Fiducials under Highly Challenging Conditions",
"abstract": "Using fiducial markers ensures reliable detection and identification of planar features in images. Fiducials are used in a wide range of applications, especially when a reliable visual reference is needed, e.g., to track the camera in cluttered or textureless environments. A marker designed for such applications must be robust to partial occlusions, varying distances and angles of view, and fast camera motions. In this paper, we present a robust, highly accurate fiducial system, whose markers consist of concentric rings, along with its theoretical foundations. Relying on projective properties, it allows to robustly localize the imaged marker and to accurately detect the position of the image of the (common) circle center. We demonstrate that our system can detect and accurately localize these circular fiducials under very challenging conditions and the experimental results reveal that it outperforms other recent fiducial systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Using fiducial markers ensures reliable detection and identification of planar features in images. Fiducials are used in a wide range of applications, especially when a reliable visual reference is needed, e.g., to track the camera in cluttered or textureless environments. A marker designed for such applications must be robust to partial occlusions, varying distances and angles of view, and fast camera motions. In this paper, we present a robust, highly accurate fiducial system, whose markers consist of concentric rings, along with its theoretical foundations. Relying on projective properties, it allows to robustly localize the imaged marker and to accurately detect the position of the image of the (common) circle center. We demonstrate that our system can detect and accurately localize these circular fiducials under very challenging conditions and the experimental results reveal that it outperforms other recent fiducial systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Using fiducial markers ensures reliable detection and identification of planar features in images. Fiducials are used in a wide range of applications, especially when a reliable visual reference is needed, e.g., to track the camera in cluttered or textureless environments. A marker designed for such applications must be robust to partial occlusions, varying distances and angles of view, and fast camera motions. In this paper, we present a robust, highly accurate fiducial system, whose markers consist of concentric rings, along with its theoretical foundations. Relying on projective properties, it allows to robustly localize the imaged marker and to accurately detect the position of the image of the (common) circle center. We demonstrate that our system can detect and accurately localize these circular fiducials under very challenging conditions and the experimental results reveal that it outperforms other recent fiducial systems.",
"fno": "8851a562",
"keywords": [
"Cameras",
"Robustness",
"Image Edge Detection",
"Computer Vision",
"Feature Extraction",
"Detection Algorithms"
],
"authors": [
{
"affiliation": null,
"fullName": "Lilian Calvet",
"givenName": "Lilian",
"surname": "Calvet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pierre Gurdjos",
"givenName": "Pierre",
"surname": "Gurdjos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Carsten Griwodz",
"givenName": "Carsten",
"surname": "Griwodz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Simone Gasparini",
"givenName": "Simone",
"surname": "Gasparini",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "562-570",
"year": "2016",
"issn": "1063-6919",
"isbn": "978-1-4673-8851-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8851a553",
"articleId": "12OmNxFaLpl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8851a571",
"articleId": "12OmNC0PGMZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wmsvm/2010/7077/0/05558353",
"title": "An Extended Marker-Based Tracking System for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wmsvm/2010/05558353/12OmNAiFI7k",
"parentPublication": {
"id": "proceedings/wmsvm/2010/7077/0",
"title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131765",
"title": "Spatially-multiplexed MIMO markers",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131765/12OmNB836Ih",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b481",
"title": "ChromaTag: A Colored Marker and Fast Detection Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b481/12OmNC2OSIa",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/istcs/1993/3630/0/00253484",
"title": "Subpixel image registration using circular fiducials",
"doi": null,
"abstractUrl": "/proceedings-article/istcs/1993/00253484/12OmNCfSqOK",
"parentPublication": {
"id": "proceedings/istcs/1993/3630/0",
"title": "The 2nd Israel Symposium on Theory and Computing Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720368",
"title": "On the Design and Evaluation of a Precise Scalable Fiducial Marker Framework",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720368/12OmNwNwzQw",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a556",
"title": "X-Tag: A Fiducial Tag for Flexible and Accurate Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a556/12OmNxWLTHl",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2010/07/ttp2010071317",
"title": "Designing Highly Reliable Fiducial Markers",
"doi": null,
"abstractUrl": "/journal/tp/2010/07/ttp2010071317/13rRUwIF6eS",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09773975",
"title": "DeepTag: A General Framework for Fiducial Marker Design and Detection",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09773975/1DjDnSMD9n2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150618",
"title": "ArUcOmni: detection of highly reliable fiducial markers in panoramic images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150618/1lPH30Y2Fqw",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810997",
"title": "JanusVF: Accurate Navigation Using SCAAT and Virtual Fiducials",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810997/1t2n7JaRIQw",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBDyAaZ",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzYNNab",
"doi": "10.1109/ICCV.2015.499",
"title": "A Novel Representation of Parts for Accurate 3D Object Detection and Tracking in Monocular Images",
"normalizedTitle": "A Novel Representation of Parts for Accurate 3D Object Detection and Tracking in Monocular Images",
"abstract": "We present a method that estimates in real-time and under challenging conditions the 3D pose of a known object. Our method relies only on grayscale images since depth cameras fail on metallic objects, it can handle poorly textured objects, and cluttered, changing environments, the pose it predicts degrades gracefully in presence of large occlusions. As a result, by contrast with the state-of-the-art, our method is suitable for practical Augmented Reality applications even in industrial environments. To be robust to occlusions, we first learn to detect some parts of the target object. Our key idea is to then predict the 3D pose of each part in the form of the 2D projections of a few control points. The advantages of this representation is three-fold: We can predict the 3D pose of the object even when only one part is visible, when several parts are visible, we can combine them easily to compute a better pose of the object, the 3D pose we obtain is usually very accurate, even when only few parts are visible.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method that estimates in real-time and under challenging conditions the 3D pose of a known object. Our method relies only on grayscale images since depth cameras fail on metallic objects, it can handle poorly textured objects, and cluttered, changing environments, the pose it predicts degrades gracefully in presence of large occlusions. As a result, by contrast with the state-of-the-art, our method is suitable for practical Augmented Reality applications even in industrial environments. To be robust to occlusions, we first learn to detect some parts of the target object. Our key idea is to then predict the 3D pose of each part in the form of the 2D projections of a few control points. The advantages of this representation is three-fold: We can predict the 3D pose of the object even when only one part is visible, when several parts are visible, we can combine them easily to compute a better pose of the object, the 3D pose we obtain is usually very accurate, even when only few parts are visible.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method that estimates in real-time and under challenging conditions the 3D pose of a known object. Our method relies only on grayscale images since depth cameras fail on metallic objects, it can handle poorly textured objects, and cluttered, changing environments, the pose it predicts degrades gracefully in presence of large occlusions. As a result, by contrast with the state-of-the-art, our method is suitable for practical Augmented Reality applications even in industrial environments. To be robust to occlusions, we first learn to detect some parts of the target object. Our key idea is to then predict the 3D pose of each part in the form of the 2D projections of a few control points. The advantages of this representation is three-fold: We can predict the 3D pose of the object even when only one part is visible, when several parts are visible, we can combine them easily to compute a better pose of the object, the 3D pose we obtain is usually very accurate, even when only few parts are visible.",
"fno": "8391e391",
"keywords": [
"Three Dimensional Displays",
"Cameras",
"Robustness",
"Training",
"Object Detection",
"Augmented Reality",
"Image Edge Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Alberto Crivellaro",
"givenName": "Alberto",
"surname": "Crivellaro",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mahdi Rad",
"givenName": "Mahdi",
"surname": "Rad",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yannick Verdie",
"givenName": "Yannick",
"surname": "Verdie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kwang Moo Yi",
"givenName": "Kwang Moo",
"surname": "Yi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pascal Fua",
"givenName": "Pascal",
"surname": "Fua",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Vincent Lepetit",
"givenName": "Vincent",
"surname": "Lepetit",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "4391-4399",
"year": "2015",
"issn": "2380-7504",
"isbn": "978-1-4673-8391-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8391e382",
"articleId": "12OmNzw8ja2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8391e400",
"articleId": "12OmNApcupb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wmsvm/2010/7077/0/05558353",
"title": "An Extended Marker-Based Tracking System for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wmsvm/2010/05558353/12OmNAiFI7k",
"parentPublication": {
"id": "proceedings/wmsvm/2010/7077/0",
"title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a136",
"title": "Learning Where to Position Parts in 3D",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a136/12OmNB7tUtc",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b827",
"title": "Deep MANTA: A Coarse-to-Fine Many-Task Network for Joint 2D and 3D Vehicle Analysis from Monocular Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b827/12OmNBSBkiT",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2005/2459/0/01544665",
"title": "Adaptive line tracking with multiple hypotheses for augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2005/01544665/12OmNx7G5Xj",
"parentPublication": {
"id": "proceedings/ismar/2005/2459/0",
"title": "Fourth IEEE and ACM International Symposium on Mixed and Augmented Reality (ISMAR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671808",
"title": "Augmenting markerless complex 3D objects by combining geometrical and color edge information",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671808/12OmNxYbSX3",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538839",
"title": "Real-Time Object Tracking for Augmented Reality Combining Graph Cuts and Optical Flow",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538839/12OmNyU63tI",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761575",
"title": "Spatio-temporal 3D pose estimation and tracking of human body parts using the Shape Flow algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761575/12OmNzVoBHi",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2018/5889/0/08441049",
"title": "An Improvement on ArUco Marker for Pose Tracking Using Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2018/08441049/13bd1fph1yr",
"parentPublication": {
"id": "proceedings/snpd/2018/5889/0",
"title": "2018 19th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/11/ttg2011111728",
"title": "Extended Keyframe Detection with Stable Tracking for Multiple 3D Object Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2011/11/ttg2011111728/13rRUwIF6l5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/06/07934426",
"title": "Robust 3D Object Tracking from Monocular Images Using Stable Parts",
"doi": null,
"abstractUrl": "/journal/tp/2018/06/07934426/13rRUxASuOq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzd7bG3",
"doi": "10.1109/ICPR.2008.4761614",
"title": "Double-edge-model based character stroke extraction from complex backgrounds",
"normalizedTitle": "Double-edge-model based character stroke extraction from complex backgrounds",
"abstract": "Global gray-level thresholding techniques such as Otsupsilas method, and local gray-level thresholding techniques such as adaptive thresholding method are powerful in extracting character objects from simple or slowly varying backgrounds. However, they are found to be insufficient when the backgrounds include sharply varying contours or fonts in different sizes. In this paper, we propose a double-edge model insensitive to stroke width to extract character strokes with an unknown stroke width from complex or sharply varying backgrounds. Also, we propose a novel postprocessing method combining 2-level global thresholding and Canny edge detection to keep the character object in integrality and remove the background simultaneously. Experiment results show that the proposed method can extract character objects from complex backgrounds with satisfactory quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Global gray-level thresholding techniques such as Otsupsilas method, and local gray-level thresholding techniques such as adaptive thresholding method are powerful in extracting character objects from simple or slowly varying backgrounds. However, they are found to be insufficient when the backgrounds include sharply varying contours or fonts in different sizes. In this paper, we propose a double-edge model insensitive to stroke width to extract character strokes with an unknown stroke width from complex or sharply varying backgrounds. Also, we propose a novel postprocessing method combining 2-level global thresholding and Canny edge detection to keep the character object in integrality and remove the background simultaneously. Experiment results show that the proposed method can extract character objects from complex backgrounds with satisfactory quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Global gray-level thresholding techniques such as Otsupsilas method, and local gray-level thresholding techniques such as adaptive thresholding method are powerful in extracting character objects from simple or slowly varying backgrounds. However, they are found to be insufficient when the backgrounds include sharply varying contours or fonts in different sizes. In this paper, we propose a double-edge model insensitive to stroke width to extract character strokes with an unknown stroke width from complex or sharply varying backgrounds. Also, we propose a novel postprocessing method combining 2-level global thresholding and Canny edge detection to keep the character object in integrality and remove the background simultaneously. Experiment results show that the proposed method can extract character objects from complex backgrounds with satisfactory quality.",
"fno": "04761614",
"keywords": [
"Edge Detection",
"Handwritten Character Recognition",
"Image Retrieval",
"Optical Character Recognition",
"Double Edge Model",
"Character Stroke Extraction",
"Global Gray Level Thresholding Techniques",
"Otsu Method",
"Local Gray Level Thresholding Techniques",
"Adaptive Thresholding Method",
"Canny Edge Detection",
"Optical Character Recognition",
"Optical Character Recognition Software",
"Character Recognition",
"Image Edge Detection",
"Gray Scale",
"Pixel",
"Image Recognition",
"Engines",
"Automation",
"Feature Extraction",
"Shape"
],
"authors": [
{
"affiliation": "Hanwang Technology, Beijing, China",
"fullName": "Jing Yu",
"givenName": null,
"surname": "Jing Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanwang Technology, Beijing, China",
"fullName": "Lei Huang",
"givenName": null,
"surname": "Lei Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanwang Technology, Beijing, China",
"fullName": "Changping Liu",
"givenName": null,
"surname": "Changping Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761613",
"articleId": "12OmNqAU6ER",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761615",
"articleId": "12OmNBBQZuI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iita/2009/3859/1/3859a626",
"title": "Optical Character Recognition Based on Least Square Support Vector Machine",
"doi": null,
"abstractUrl": "/proceedings-article/iita/2009/3859a626/12OmNClQ0xC",
"parentPublication": {
"id": "proceedings/iita/2009/3859/1",
"title": "2009 Third International Symposium on Intelligent Information Technology Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6270/2/00576954",
"title": "Experimental comparisons of binarization and multi-thresholding methods on document images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576954/12OmNvAiSGd",
"parentPublication": {
"id": "proceedings/icpr/1994/6270/2",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fcst/2010/7779/0/05575533",
"title": "Research of Improving the Accuracy of License Plate Character Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/fcst/2010/05575533/12OmNvUsop0",
"parentPublication": {
"id": "proceedings/fcst/2010/7779/0",
"title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1993/4960/0/00395618",
"title": "Multifont Chinese character recognition using side-stroke-end feature",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1993/00395618/12OmNwLfMCl",
"parentPublication": {
"id": "proceedings/icdar/1993/4960/0",
"title": "Proceedings of 2nd International Conference on Document Analysis and Recognition (ICDAR '93)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1993/4960/0/00395782",
"title": "A character image enhancement method from characters with various background images",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1993/00395782/12OmNx7G5T9",
"parentPublication": {
"id": "proceedings/icdar/1993/4960/0",
"title": "Proceedings of 2nd International Conference on Document Analysis and Recognition (ICDAR '93)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2002/1695/3/169530180",
"title": "Character Pattern Extraction from Colorful Documents with Complex Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169530180/12OmNxWui8H",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1999/0318/0/03180511",
"title": "Model-Based Character Extraction from Complex Backgrounds",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1999/03180511/12OmNypIYwO",
"parentPublication": {
"id": "proceedings/icdar/1999/0318/0",
"title": "Proceedings of the Fifth International Conference on Document Analysis and Recognition. ICDAR '99 (Cat. No.PR00318)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1988/0862/0/00196270",
"title": "A relaxational extracting method for character recognition in scene images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1988/00196270/12OmNz3bdNd",
"parentPublication": {
"id": "proceedings/cvpr/1988/0862/0",
"title": "Proceedings CVPR '88: The Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/07/09036986",
"title": "<sc>SmartSO</sc>: Chinese Character and Stroke Order Recognition With Smartwatch",
"doi": null,
"abstractUrl": "/journal/tm/2021/07/09036986/1igMQsEQrOo",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102785",
"title": "Character Region Awareness Network For Scene Text Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102785/1kwr6XVks00",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysmAGFrlC",
"doi": "10.1109/ISMAR-Adjunct.2019.00-11",
"title": "6DoF Pose Estimation with Object Cutout based on a Deep Autoencoder",
"normalizedTitle": "6DoF Pose Estimation with Object Cutout based on a Deep Autoencoder",
"abstract": "The six degree-of-freedom (6DoF) pose estimation is an important task in Augmented Reality, especially for initializing or recovering from the failure of 3D tracking for the textureless object, since it still encounters the insufficient accuracy problems because of cluttered backgrounds, occasionally quick movements, and other factors. We propose a simple but effective method to cutout the interested textureless object in a single RGB image with clear contour, which can be employed to directly estimate 6Dof poses with relatively high precision, and then help to remove most of the disturbing edges of clutter background for further refinement of pose estimation. To achieve this task, we propose a novel convolutional neural network, similar to an autoencoder, to reconstruct arbitrary scenes containing the object of interest, and extract the object area. We evaluated our method on objects from the LINEMOD dataset, and the results show that our approach is superior to the baseline as well as some advanced methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The six degree-of-freedom (6DoF) pose estimation is an important task in Augmented Reality, especially for initializing or recovering from the failure of 3D tracking for the textureless object, since it still encounters the insufficient accuracy problems because of cluttered backgrounds, occasionally quick movements, and other factors. We propose a simple but effective method to cutout the interested textureless object in a single RGB image with clear contour, which can be employed to directly estimate 6Dof poses with relatively high precision, and then help to remove most of the disturbing edges of clutter background for further refinement of pose estimation. To achieve this task, we propose a novel convolutional neural network, similar to an autoencoder, to reconstruct arbitrary scenes containing the object of interest, and extract the object area. We evaluated our method on objects from the LINEMOD dataset, and the results show that our approach is superior to the baseline as well as some advanced methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The six degree-of-freedom (6DoF) pose estimation is an important task in Augmented Reality, especially for initializing or recovering from the failure of 3D tracking for the textureless object, since it still encounters the insufficient accuracy problems because of cluttered backgrounds, occasionally quick movements, and other factors. We propose a simple but effective method to cutout the interested textureless object in a single RGB image with clear contour, which can be employed to directly estimate 6Dof poses with relatively high precision, and then help to remove most of the disturbing edges of clutter background for further refinement of pose estimation. To achieve this task, we propose a novel convolutional neural network, similar to an autoencoder, to reconstruct arbitrary scenes containing the object of interest, and extract the object area. We evaluated our method on objects from the LINEMOD dataset, and the results show that our approach is superior to the baseline as well as some advanced methods.",
"fno": "476500a360",
"keywords": [
"Augmented Reality",
"Clutter",
"Convolutional Neural Nets",
"Edge Detection",
"Image Colour Analysis",
"Image Reconstruction",
"Image Texture",
"Object Tracking",
"Pose Estimation",
"Augmented Reality",
"Interested Textureless Object",
"Single RGB Image",
"Clutter Background",
"Object Area",
"6 Do F Pose Estimation",
"Deep Autoencoder",
"Degree Of Freedom",
"Convolutional Neural Network",
"LINEMOD Dataset",
"Three Dimensional Displays",
"Pose Estimation",
"Image Reconstruction",
"Solid Modeling",
"Two Dimensional Displays",
"Task Analysis",
"Tracking"
],
"authors": [
{
"affiliation": "Shandong University",
"fullName": "Xin Liu",
"givenName": "Xin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong University",
"fullName": "Jichao Zhang",
"givenName": "Jichao",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong University",
"fullName": "Xian He",
"givenName": "Xian",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong University",
"fullName": "Xiuqiang Song",
"givenName": "Xiuqiang",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong University",
"fullName": "Xueying Qin",
"givenName": "Xueying",
"surname": "Qin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "360-365",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a358",
"articleId": "1gysoroSMs8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a366",
"articleId": "1gysjSArEsM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699254",
"title": "Learning 6DoF Object Poses from Synthetic Single Channel Images",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699254/19F1NG6YVO0",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a176",
"title": "Keypoint Cascade Voting for Point Cloud Based 6DoF Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a176/1KYsrDaZSLK",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c758",
"title": "CullNet: Calibrated and Pose Aware Confidence Scores for Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c758/1i5mMsq7oYM",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b238",
"title": "6D-VNet: End-To-End 6DoF Vehicle Pose Estimation From Monocular RGB Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b238/1iTvddvXqdW",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093272",
"title": "PointPoseNet: Point Pose Network for Robust 6D Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093272/1jPbzRH8MKc",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1451",
"title": "PFRL: Pose-Free Reinforcement Learning for 6D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1451/1m3nHGv0jMA",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1629",
"title": "PVN3D: A Deep Point-Wise 3D Keypoints Voting Network for 6DoF Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1629/1m3niqm4WcM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2020/9274/0/927400a332",
"title": "A Study on the Impact of Domain Randomization for Monocular Deep 6DoF Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2020/927400a332/1p2VASaMsI8",
"parentPublication": {
"id": "proceedings/sibgrapi/2020/9274/0",
"title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/06/09309178",
"title": "PVNet: Pixel-Wise Voting Network for 6DoF Object Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2022/06/09309178/1pQEe6zENaw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h613",
"title": "img2pose: Face Alignment and Detection via 6DoF, Face Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h613/1yeK4plxwKQ",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1i5mkDyiIUg",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1i5mva7fXQ4",
"doi": "10.1109/ICCVW.2019.00263",
"title": "Silhouette-Assisted 3D Object Instance Reconstruction from a Cluttered Scene",
"normalizedTitle": "Silhouette-Assisted 3D Object Instance Reconstruction from a Cluttered Scene",
"abstract": "The objective of our work is to reconstruct 3D object instances from a single RGB image of a cluttered scene. 3D object instance reconstruction is an ill-posed problem due to the presence of heavily occluded and truncated objects, and self-occlusions that lead to substantial regions of unseen areas. Previous works for 3D reconstruction take clues from object silhouettes to carve reconstructed outputs. In this paper, we explore two ways to include silhouette learnable in the network for 3D instance reconstruction from a single cluttered scene image. To this end, in the first approach, we automatically generate instance-specific silhouettes that are compactly encoded within our network design and used to improve the reconstructed 3D shapes; in the second approach, we find an efficient design to regularize object reconstruction explicitly. Experimental results on the SUNCG dataset show that our methods have better performance than the state-of-the-art.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The objective of our work is to reconstruct 3D object instances from a single RGB image of a cluttered scene. 3D object instance reconstruction is an ill-posed problem due to the presence of heavily occluded and truncated objects, and self-occlusions that lead to substantial regions of unseen areas. Previous works for 3D reconstruction take clues from object silhouettes to carve reconstructed outputs. In this paper, we explore two ways to include silhouette learnable in the network for 3D instance reconstruction from a single cluttered scene image. To this end, in the first approach, we automatically generate instance-specific silhouettes that are compactly encoded within our network design and used to improve the reconstructed 3D shapes; in the second approach, we find an efficient design to regularize object reconstruction explicitly. Experimental results on the SUNCG dataset show that our methods have better performance than the state-of-the-art.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The objective of our work is to reconstruct 3D object instances from a single RGB image of a cluttered scene. 3D object instance reconstruction is an ill-posed problem due to the presence of heavily occluded and truncated objects, and self-occlusions that lead to substantial regions of unseen areas. Previous works for 3D reconstruction take clues from object silhouettes to carve reconstructed outputs. In this paper, we explore two ways to include silhouette learnable in the network for 3D instance reconstruction from a single cluttered scene image. To this end, in the first approach, we automatically generate instance-specific silhouettes that are compactly encoded within our network design and used to improve the reconstructed 3D shapes; in the second approach, we find an efficient design to regularize object reconstruction explicitly. Experimental results on the SUNCG dataset show that our methods have better performance than the state-of-the-art.",
"fno": "502300c080",
"keywords": [
"Image Colour Analysis",
"Image Reconstruction",
"Object Detection",
"Shape Recognition",
"Silhouette Assisted 3 D Object Instance Reconstruction",
"RGB Image",
"Heavily Occluded Objects",
"Truncated Objects",
"Instance Specific Silhouettes",
"3 D Shape Reconstruction",
"Object Reconstruction",
"Cluttered Scene Image",
"SUNCG Dataset",
"Three Dimensional Displays",
"Shape",
"Image Reconstruction",
"Two Dimensional Displays",
"Training",
"Pose Estimation",
"Implicit Silhouette",
"Explicit Silhouette",
"3 D Reconstruction From Single Image",
"Perspective Projection"
],
"authors": [
{
"affiliation": "Data61-CSIRO, Australia & Australian National University, Australia",
"fullName": "Lin Li",
"givenName": "Lin",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IIIAI, UAE & Australian National University, Australia",
"fullName": "Salman Khan",
"givenName": "Salman",
"surname": "Khan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Australian National University, Australia",
"fullName": "Nick Barnes",
"givenName": "Nick",
"surname": "Barnes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "2080-2088",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-5023-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "502300c070",
"articleId": "1i5mGsqvQiI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "502300c089",
"articleId": "1i5msweb8CA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/crv/2014/4337/0/4337a378",
"title": "3D Reconstruction by Fusioning Shadow and Silhouette Information",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2014/4337a378/12OmNzfXawe",
"parentPublication": {
"id": "proceedings/crv/2014/4337/0",
"title": "2014 Canadian Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486576",
"title": "Joint Multi-View People Tracking and Pose Estimation for 3D Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486576/14jQfQj2fTp",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i647",
"title": "Holistic++ Scene Understanding: Single-View 3D Holistic Scene Parsing and Human Pose Estimation With Human-Object Interaction and Physical Commonsense",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i647/1hQqhrIltn2",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0422",
"title": "U4D: Unsupervised 4D Dynamic Scene Understanding",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0422/1hQqutyasIU",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300j255",
"title": "3D Instance Segmentation via Multi-Task Metric Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300j255/1hVlUFvBONO",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093611",
"title": "Silhouette Guided Point Cloud Reconstruction beyond Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093611/1jPbfVd2c3S",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150931",
"title": "Geometry to the Rescue: 3D Instance Reconstruction from a Cluttered Scene",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150931/1lPHarhatd6",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b129",
"title": "From Image Collections to Point Clouds With Self-Supervised Shape and Pose Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b129/1m3nOmkaLW8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctt/2020/8575/0/857500a158",
"title": "Monocular Instance Level 3D Object Reconstruction based on Mesh R-CNN",
"doi": null,
"abstractUrl": "/proceedings-article/isctt/2020/857500a158/1rHeLb3yn4I",
"parentPublication": {
"id": "proceedings/isctt/2020/8575/0",
"title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900e606",
"title": "RfD-Net: Point Scene Understanding by Semantic Instance Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900e606/1yeIJqzXldK",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAolGQE",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"acronym": "pdcat",
"groupId": "1001048",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAXglK1",
"doi": "10.1109/PDCAT.2016.084",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"normalizedTitle": "Accumulative Energy-Based Seam Carving for Image Resizing",
"abstract": "With the diversified development of the digital devices, such as computer, mobile phone, pad and television, how to resize an image or video to adapt to different display screens has been attracting more and more peoples' attention. Seam carving has been an important method for image resizing. If multiple removed or inserted seams are located within a certain region, it can lead to discontinuity image content. Besides, the salient objects tend to be destroyed if the energy function only contains the gradient information. Therefore, we propose an accumulative energy-based seam carving method for image resizing. When removing a certain seam, we distribute the energy of each pixel on the seam to its adjacent 8-connected pixels in order to avoid the extreme concentration of seams, especially within a texture region. In addition, we add the image saliency and the edge information into the energy function to reduce the distortion. Since the computational complexity of seam carving method is very high, we use parallel computing environment to achieve efficient computation. Experimental results show that compared with the existing methods, our method can both avoid the discontinuity of image content and distortions as well as better maintain the shape of the salient objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the diversified development of the digital devices, such as computer, mobile phone, pad and television, how to resize an image or video to adapt to different display screens has been attracting more and more peoples' attention. Seam carving has been an important method for image resizing. If multiple removed or inserted seams are located within a certain region, it can lead to discontinuity image content. Besides, the salient objects tend to be destroyed if the energy function only contains the gradient information. Therefore, we propose an accumulative energy-based seam carving method for image resizing. When removing a certain seam, we distribute the energy of each pixel on the seam to its adjacent 8-connected pixels in order to avoid the extreme concentration of seams, especially within a texture region. In addition, we add the image saliency and the edge information into the energy function to reduce the distortion. Since the computational complexity of seam carving method is very high, we use parallel computing environment to achieve efficient computation. Experimental results show that compared with the existing methods, our method can both avoid the discontinuity of image content and distortions as well as better maintain the shape of the salient objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the diversified development of the digital devices, such as computer, mobile phone, pad and television, how to resize an image or video to adapt to different display screens has been attracting more and more peoples' attention. Seam carving has been an important method for image resizing. If multiple removed or inserted seams are located within a certain region, it can lead to discontinuity image content. Besides, the salient objects tend to be destroyed if the energy function only contains the gradient information. Therefore, we propose an accumulative energy-based seam carving method for image resizing. When removing a certain seam, we distribute the energy of each pixel on the seam to its adjacent 8-connected pixels in order to avoid the extreme concentration of seams, especially within a texture region. In addition, we add the image saliency and the edge information into the energy function to reduce the distortion. Since the computational complexity of seam carving method is very high, we use parallel computing environment to achieve efficient computation. Experimental results show that compared with the existing methods, our method can both avoid the discontinuity of image content and distortions as well as better maintain the shape of the salient objects.",
"fno": "07943389",
"keywords": [
"Computational Complexity",
"Computer Displays",
"Distortion",
"Image Reconstruction",
"Parallel Processing",
"Video Signal Processing",
"Accumulative Energy Based Seam Carving",
"Image Resizing",
"Digital Devices",
"Video Resizing",
"Display Screens",
"Discontinuity Image Content",
"Salient Objects",
"Energy Function",
"Gradient Information",
"Image Saliency",
"Edge Information",
"Distortion Reduction",
"Computational Complexity",
"Parallel Computing Environment",
"Image Content Discontinuity",
"Handheld Computers",
"Parallel Processing",
"Image Edge Detection",
"Distortion",
"Computational Efficiency",
"Distributed Computing",
"Image Resizing",
"Seam Carving",
"Optimal Seam",
"Accumulative Energy",
"Saliency Detection",
"Edge Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuqing Lin",
"givenName": "Yuqing",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuzhen Niu",
"givenName": "Yuzhen",
"surname": "Niu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiawen Lin",
"givenName": "Jiawen",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Haifeng Zhang",
"givenName": "Haifeng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pdcat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "366-371",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-5081-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07943388",
"articleId": "12OmNqBbHSO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07943390",
"articleId": "12OmNzFv4kn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a082",
"title": "Seam Carving for Color-Plus-Depth 3D Image",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a082/12OmNwDj0Y7",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a675",
"title": "Improved Adaptive Seam Carving for Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a675/12OmNzmclGc",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05540165",
"title": "Discontinuous seam-carving for video retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05540165/12OmNzwZ6vw",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a001",
"title": "SeeTheSeams: Localized Detection of Seam Carving based Image Forgery in Satellite Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a001/1G56JOOi1CU",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAiFI8D",
"doi": "10.1109/CVPR.2012.6247657",
"title": "Scene warping: Layer-based stereoscopic image resizing",
"normalizedTitle": "Scene warping: Layer-based stereoscopic image resizing",
"abstract": "This paper proposes scene warping, a layer-based stereoscopic image resizing method using image warping. The proposed method decomposes the input stereoscopic image pair into layers according to the depth and color information. A quad mesh is placed onto each layer to guide the image warping for resizing. The warped layers are composited by their depth orders to synthesize the resized stereoscopic image. We formulate an energy function to guide the warping for each layer so that the composited image avoids distortions and holes, maintains good stereoscopic properties and contains as many important pixels as possible in the reduced image space. The proposed method offers the advantages of less discontinuous artifacts, less-distorted objects, correct depth ordering and enhanced stereoscopic quality. Experiments show that our method compares favorably with existing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes scene warping, a layer-based stereoscopic image resizing method using image warping. The proposed method decomposes the input stereoscopic image pair into layers according to the depth and color information. A quad mesh is placed onto each layer to guide the image warping for resizing. The warped layers are composited by their depth orders to synthesize the resized stereoscopic image. We formulate an energy function to guide the warping for each layer so that the composited image avoids distortions and holes, maintains good stereoscopic properties and contains as many important pixels as possible in the reduced image space. The proposed method offers the advantages of less discontinuous artifacts, less-distorted objects, correct depth ordering and enhanced stereoscopic quality. Experiments show that our method compares favorably with existing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes scene warping, a layer-based stereoscopic image resizing method using image warping. The proposed method decomposes the input stereoscopic image pair into layers according to the depth and color information. A quad mesh is placed onto each layer to guide the image warping for resizing. The warped layers are composited by their depth orders to synthesize the resized stereoscopic image. We formulate an energy function to guide the warping for each layer so that the composited image avoids distortions and holes, maintains good stereoscopic properties and contains as many important pixels as possible in the reduced image space. The proposed method offers the advantages of less discontinuous artifacts, less-distorted objects, correct depth ordering and enhanced stereoscopic quality. Experiments show that our method compares favorably with existing methods.",
"fno": "007P1A07",
"keywords": [
"Stereo Image Processing",
"Image Colour Analysis",
"Image Enhancement",
"Stereoscopic Quality Enhancement",
"Scene Warping",
"Layer Based Stereoscopic Image Resizing",
"Image Warping",
"Stereoscopic Image Pair Decomposition",
"Color Information",
"Quad Mesh",
"Energy Function",
"Discontinuous Artifact",
"Less Distorted Object",
"Depth Ordering",
"Stereo Image Processing",
"Object Segmentation",
"Optimization",
"Image Quality",
"Energy Measurement",
"Image Color Analysis",
"Image Edge Detection"
],
"authors": [
{
"affiliation": "Nat. Taiwan Univ., Taipei, Taiwan",
"fullName": "Cheng-Da Chung",
"givenName": null,
"surname": "Cheng-Da Chung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nat. Taiwan Univ., Taipei, Taiwan",
"fullName": "Ken-Yi Lee",
"givenName": null,
"surname": "Ken-Yi Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nat. Taiwan Univ., Taipei, Taiwan",
"fullName": "Yung-Yu Chuang",
"givenName": null,
"surname": "Yung-Yu Chuang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "49-56",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "006P1A06",
"articleId": "12OmNwpXRWZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "008P1A08",
"articleId": "12OmNvUaNfz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmip/2017/5954/0/5954a123",
"title": "No-Reference Stereoscopic Image Quality Assessment Using Natural Scene Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/icmip/2017/5954a123/12OmNAndiqC",
"parentPublication": {
"id": "proceedings/icmip/2017/5954/0",
"title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a302",
"title": "Warping-Based Novel View Synthesis from a Binocular Image for Autostereoscopic Displays",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a302/12OmNAoUTsE",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011901",
"title": "Stereoscopic image inpainting using scene geometry",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011901/12OmNyr8YlH",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2016/1552/0/07574680",
"title": "Reducing perspective distortion for stereoscopic image stitching",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2016/07574680/12OmNzlUKBO",
"parentPublication": {
"id": "proceedings/icmew/2016/1552/0",
"title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/04/v0686",
"title": "Stereoscopic Video Synthesis from a Monocular Video",
"doi": null,
"abstractUrl": "/journal/tg/2007/04/v0686/13rRUwcAqq7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081288",
"title": "Changing Perspective in Stereoscopic Images",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081288/13rRUwghd4Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07833188",
"title": "Efficient Hybrid Image Warping for High Frame-Rate Stereoscopic Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07833188/13rRUxBrGh6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081375",
"title": "StereoPasting: Interactive Composition in Stereoscopic Images",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081375/13rRUxC0SWa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/01/v0015",
"title": "Interactive Stereoscopic Rendering of Volumetric Environments",
"doi": null,
"abstractUrl": "/journal/tg/2004/01/v0015/13rRUyYBlgp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b655",
"title": "Warping-Based Stereoscopic 3D Video Retargeting With Depth Remapping",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b655/18j8LvV2AJG",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzVXNJe",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"acronym": "isspit",
"groupId": "1001026",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCbU33t",
"doi": "10.1109/ISSPIT.2009.5407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"normalizedTitle": "Fast seam carving using partial update and divide and conquer method",
"abstract": "A variety of display devices with different resolutions and aspect ratios require image resizing while preserving the image's important information. Among the many available techniques, the seam carving method is the most promising due to its simplicity and effectiveness. However, the optimal seam carving method takes a long computation time due to its nature of sequential processing. This paper proposes two approaches to accelerate the original seam carving method. One is to update the cumulative minimum energy map (CMEM) partially around the seam and another is to process several seams using the divide and conquer (DNC) method. The proposed methods are about 2 ~ 5 times faster than the original seam carving method, while preserving the image's information as effectively as the original seam carving method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A variety of display devices with different resolutions and aspect ratios require image resizing while preserving the image's important information. Among the many available techniques, the seam carving method is the most promising due to its simplicity and effectiveness. However, the optimal seam carving method takes a long computation time due to its nature of sequential processing. This paper proposes two approaches to accelerate the original seam carving method. One is to update the cumulative minimum energy map (CMEM) partially around the seam and another is to process several seams using the divide and conquer (DNC) method. The proposed methods are about 2 ~ 5 times faster than the original seam carving method, while preserving the image's information as effectively as the original seam carving method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A variety of display devices with different resolutions and aspect ratios require image resizing while preserving the image's important information. Among the many available techniques, the seam carving method is the most promising due to its simplicity and effectiveness. However, the optimal seam carving method takes a long computation time due to its nature of sequential processing. This paper proposes two approaches to accelerate the original seam carving method. One is to update the cumulative minimum energy map (CMEM) partially around the seam and another is to process several seams using the divide and conquer (DNC) method. The proposed methods are about 2 ~ 5 times faster than the original seam carving method, while preserving the image's information as effectively as the original seam carving method.",
"fno": "05407481",
"keywords": [
"Divide And Conquer Methods",
"Image Processing",
"Image Resizing",
"Seam Carving Method",
"Cumulative Minimum Energy Map",
"Divide And Conquer Method",
"Partial Update",
"Image Information Preservation",
"Computer Science",
"Computer Displays",
"Image Resolution",
"Acceleration",
"Dynamic Programming",
"Performance Loss",
"Interpolation",
"Particle Measurements",
"Size Measurement",
"Laplace Equations",
"Content Aware Image Resizing",
"Image Seams",
"Cumulative Minimum Energy Map",
"Partial Update",
"Divide And Conquer"
],
"authors": [
{
"affiliation": "Department of Computer Science and Engineering, Pohang University of Science and Technology, Korea",
"fullName": "Jinseok Lee",
"givenName": "Jinseok",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Engineering, Pohang University of Science and Technology, Korea",
"fullName": "Daijin Kim",
"givenName": "Daijin",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isspit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2009",
"issn": "2162-7843",
"isbn": "978-1-4244-5949-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05407484",
"articleId": "12OmNqJHFAw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05407545",
"articleId": "12OmNrJiCEV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfhr/2014/4335/0/06981003",
"title": "Text Line Segmentation for Handwritten Documents Using Constrained Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icfhr/2014/06981003/12OmNBU1jNg",
"parentPublication": {
"id": "proceedings/icfhr/2014/4335/0",
"title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2014/3435/0/3435a060",
"title": "A Study of Image Retargeting Based on Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2014/3435a060/12OmNwswg2d",
"parentPublication": {
"id": "proceedings/icmtma/2014/3435/0",
"title": "2014 Sixth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ares/2010/3965/0/3965a702",
"title": "A Novel Image Hiding Scheme Using Content Aware Seam Carving Method",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2010/3965a702/12OmNyKa5Y8",
"parentPublication": {
"id": "proceedings/ares/2010/3965/0",
"title": "2010 International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a675",
"title": "Improved Adaptive Seam Carving for Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a675/12OmNzmclGc",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a001",
"title": "SeeTheSeams: Localized Detection of Seam Carving based Image Forgery in Satellite Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a001/1G56JOOi1CU",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqH9hnl",
"title": "2008 37th IEEE Applied Imagery Pattern Recognition Workshop",
"acronym": "aipr",
"groupId": "1000046",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs0C9We",
"doi": "10.1109/AIPR.2008.4906465",
"title": "Hyper-spectral content aware resizing",
"normalizedTitle": "Hyper-spectral content aware resizing",
"abstract": "Image resizing is performed for many reasons in image processing. Often, it is done to reduce or enlarge an image for display. It is also done to reduce the bandwidth needed to transmit an image. Most image resizing algorithms work based on principles of spatial or spatial frequency interpolation. One drawback to these algorithms is that they are not image content aware and can fail to preserve relevant features in an image, especially during size reduction. Recently, a content aware image resizing algorithm, called seam carving, was developed. In this paper we discuss an extension of the seam carving algorithm to hyper-spectral imagery. For a hyper-spectral image with an MxN field of view and with P spectral layers, our algorithm identifies a one pixel wide path through the image field of view containing a minimum of information and then removes it. This process is repeated until the image size is reduced to the desired dimension. Information content is assessed using normalized spatial power metrics. Several such metrics have been tested with varying results. The resulting carved hyper-spectral image has the minimum reduction in information for the resizing based upon energy metrics used to quantify information. We will present the results of seam carving applied to imagery sets of: three spectra RGB imagery from a standard still camera, two spectra imagery generated synthetically, and three spectra imagery captured with VNIR, SWIR, and LWIR cameras.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image resizing is performed for many reasons in image processing. Often, it is done to reduce or enlarge an image for display. It is also done to reduce the bandwidth needed to transmit an image. Most image resizing algorithms work based on principles of spatial or spatial frequency interpolation. One drawback to these algorithms is that they are not image content aware and can fail to preserve relevant features in an image, especially during size reduction. Recently, a content aware image resizing algorithm, called seam carving, was developed. In this paper we discuss an extension of the seam carving algorithm to hyper-spectral imagery. For a hyper-spectral image with an MxN field of view and with P spectral layers, our algorithm identifies a one pixel wide path through the image field of view containing a minimum of information and then removes it. This process is repeated until the image size is reduced to the desired dimension. Information content is assessed using normalized spatial power metrics. Several such metrics have been tested with varying results. The resulting carved hyper-spectral image has the minimum reduction in information for the resizing based upon energy metrics used to quantify information. We will present the results of seam carving applied to imagery sets of: three spectra RGB imagery from a standard still camera, two spectra imagery generated synthetically, and three spectra imagery captured with VNIR, SWIR, and LWIR cameras.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image resizing is performed for many reasons in image processing. Often, it is done to reduce or enlarge an image for display. It is also done to reduce the bandwidth needed to transmit an image. Most image resizing algorithms work based on principles of spatial or spatial frequency interpolation. One drawback to these algorithms is that they are not image content aware and can fail to preserve relevant features in an image, especially during size reduction. Recently, a content aware image resizing algorithm, called seam carving, was developed. In this paper we discuss an extension of the seam carving algorithm to hyper-spectral imagery. For a hyper-spectral image with an MxN field of view and with P spectral layers, our algorithm identifies a one pixel wide path through the image field of view containing a minimum of information and then removes it. This process is repeated until the image size is reduced to the desired dimension. Information content is assessed using normalized spatial power metrics. Several such metrics have been tested with varying results. The resulting carved hyper-spectral image has the minimum reduction in information for the resizing based upon energy metrics used to quantify information. We will present the results of seam carving applied to imagery sets of: three spectra RGB imagery from a standard still camera, two spectra imagery generated synthetically, and three spectra imagery captured with VNIR, SWIR, and LWIR cameras.",
"fno": "04906465",
"keywords": [],
"authors": [
{
"affiliation": "Electronic and Computer Services, Pennsylvania State University, 149 Hammond Building, University Park, 16802, USA",
"fullName": "Jesse Scott",
"givenName": "Jesse",
"surname": "Scott",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Applied Research Laboratory, Pennsylvania State University, 450 Science Park Road, University Park, 16802, USA",
"fullName": "Richard Tutwiler",
"givenName": "Richard",
"surname": "Tutwiler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electronic and Computer Services, Pennsylvania State University, 149 Hammond Building, University Park, 16802, USA",
"fullName": "Michael Pusateri",
"givenName": "Michael",
"surname": "Pusateri",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2008",
"issn": null,
"isbn": "978-1-4244-3125-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04906464",
"articleId": "12OmNz61dqO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04906466",
"articleId": "12OmNyfdOLD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2011/4484/0/4484a125",
"title": "A Projection Profile-Based Algorithm for Content-Aware Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2011/4484a125/12OmNwBjP4m",
"parentPublication": {
"id": "proceedings/cgiv/2011/4484/0",
"title": "2011 Eighth International Conference Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ares/2010/3965/0/3965a702",
"title": "A Novel Image Hiding Scheme Using Content Aware Seam Carving Method",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2010/3965a702/12OmNyKa5Y8",
"parentPublication": {
"id": "proceedings/ares/2010/3965/0",
"title": "2010 International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815054",
"title": "Saliency-Aware Volume Data Resizing by Surface Carving",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815054/12OmNyUFfUQ",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a852",
"title": "Adaptive Energy Selection for Content-Aware Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a852/17D45Xq6dCY",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxZeM",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"acronym": "icpads",
"groupId": "1000534",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNsbGvDS",
"doi": "10.1109/PADSW.2014.7097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"normalizedTitle": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"abstract": "Image resizing is increasingly important for picture sharing and exchanging between various personal electronic equipments. Seam Carving is a state-of-the-art approach for effective image resizing because of its content-aware characteristic. However, complex computation and memory access patterns make it time-consuming and prevent its wide usage in real-time image processing. To address these problems, we propose a novel algorithm, called Non-Cumulative Seam Carving (NCSC), which removes main computation bottleneck. Furthermore, we also propose an adaptive multi-seam algorithm for better parallelism on GPU platforms. Finally, we implement our algorithm on a multi-GPU platform. Results show that our approach achieves a maximum 140× speedup on a two-GPU system over the sequential version. It only takes 0.11 second to resize a 1024×640 image by half in width compared to 15.5 seconds with the traditional seam carving.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image resizing is increasingly important for picture sharing and exchanging between various personal electronic equipments. Seam Carving is a state-of-the-art approach for effective image resizing because of its content-aware characteristic. However, complex computation and memory access patterns make it time-consuming and prevent its wide usage in real-time image processing. To address these problems, we propose a novel algorithm, called Non-Cumulative Seam Carving (NCSC), which removes main computation bottleneck. Furthermore, we also propose an adaptive multi-seam algorithm for better parallelism on GPU platforms. Finally, we implement our algorithm on a multi-GPU platform. Results show that our approach achieves a maximum 140× speedup on a two-GPU system over the sequential version. It only takes 0.11 second to resize a 1024×640 image by half in width compared to 15.5 seconds with the traditional seam carving.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image resizing is increasingly important for picture sharing and exchanging between various personal electronic equipments. Seam Carving is a state-of-the-art approach for effective image resizing because of its content-aware characteristic. However, complex computation and memory access patterns make it time-consuming and prevent its wide usage in real-time image processing. To address these problems, we propose a novel algorithm, called Non-Cumulative Seam Carving (NCSC), which removes main computation bottleneck. Furthermore, we also propose an adaptive multi-seam algorithm for better parallelism on GPU platforms. Finally, we implement our algorithm on a multi-GPU platform. Results show that our approach achieves a maximum 140× speedup on a two-GPU system over the sequential version. It only takes 0.11 second to resize a 1024×640 image by half in width compared to 15.5 seconds with the traditional seam carving.",
"fno": "07097861",
"keywords": [
"Image Recognition",
"Streaming Media"
],
"authors": [
{
"affiliation": "Department of Computer Science and Technology, Tsinghua University, BeiJing, China",
"fullName": "Ikjoon Kim",
"givenName": "Ikjoon",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Technology, Tsinghua University, BeiJing, China",
"fullName": "Jidong Zhai",
"givenName": "Jidong",
"surname": "Zhai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Technology, Tsinghua University, BeiJing, China",
"fullName": "Yan Li",
"givenName": "Yan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science and Technology, Tsinghua University, BeiJing, China",
"fullName": "Wenguang Chen",
"givenName": "Wenguang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpads",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "616-623",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-7615-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07097860",
"articleId": "12OmNASraSn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07097862",
"articleId": "12OmNxecRVe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2008/3125/0/04906465",
"title": "Hyper-spectral content aware resizing",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2008/04906465/12OmNs0C9We",
"parentPublication": {
"id": "proceedings/aipr/2008/3125/0",
"title": "2008 37th IEEE Applied Imagery Pattern Recognition Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a082",
"title": "Seam Carving for Color-Plus-Depth 3D Image",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a082/12OmNwDj0Y7",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ares/2010/3965/0/3965a702",
"title": "A Novel Image Hiding Scheme Using Content Aware Seam Carving Method",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2010/3965a702/12OmNyKa5Y8",
"parentPublication": {
"id": "proceedings/ares/2010/3965/0",
"title": "2010 International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010111",
"title": "Summarization-Based Image Resizing by Intelligent Object Carving",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010111/13rRUyYSWkY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxQOjzV",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"acronym": "ism",
"groupId": "1001094",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwDj0Y7",
"doi": "10.1109/ISM.2014.41",
"title": "Seam Carving for Color-Plus-Depth 3D Image",
"normalizedTitle": "Seam Carving for Color-Plus-Depth 3D Image",
"abstract": "Color-plus-Depth 3D images are booming up with the advance of depth-sensing camera (e.g., Kinect). This new 3D visual content imposes new challenges on image resizing since we have to resize both the color and the depth images simultaneously. In order to resolve these newly introduced challenges of color-plus-depth 3D images, we propose a new energy function with salient depth cues consideration for seam carving operation. We further incorporate super-pixel over-segmentation and depth remapping for achieving an object-based and 3D viewing comfort zone aware resizing framework. Experimental results show the proposed framework can effectively generate the resized images by maintaining the salient regions and providing a comfortable 3D visual perception, at the same time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Color-plus-Depth 3D images are booming up with the advance of depth-sensing camera (e.g., Kinect). This new 3D visual content imposes new challenges on image resizing since we have to resize both the color and the depth images simultaneously. In order to resolve these newly introduced challenges of color-plus-depth 3D images, we propose a new energy function with salient depth cues consideration for seam carving operation. We further incorporate super-pixel over-segmentation and depth remapping for achieving an object-based and 3D viewing comfort zone aware resizing framework. Experimental results show the proposed framework can effectively generate the resized images by maintaining the salient regions and providing a comfortable 3D visual perception, at the same time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Color-plus-Depth 3D images are booming up with the advance of depth-sensing camera (e.g., Kinect). This new 3D visual content imposes new challenges on image resizing since we have to resize both the color and the depth images simultaneously. In order to resolve these newly introduced challenges of color-plus-depth 3D images, we propose a new energy function with salient depth cues consideration for seam carving operation. We further incorporate super-pixel over-segmentation and depth remapping for achieving an object-based and 3D viewing comfort zone aware resizing framework. Experimental results show the proposed framework can effectively generate the resized images by maintaining the salient regions and providing a comfortable 3D visual perception, at the same time.",
"fno": "4311a082",
"keywords": [
"Three Dimensional Displays",
"Visualization",
"Image Color Analysis",
"Lattices",
"Image Segmentation",
"Nonlinear Distortion",
"Multimedia Communication",
"Color Plus Depth 3 D Image",
"Seam Carving",
"Image Resizing"
],
"authors": [
{
"affiliation": null,
"fullName": "Wei-Cih Jhou",
"givenName": "Wei-Cih",
"surname": "Jhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu-Hsun Lin",
"givenName": "Yu-Hsun",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ja-Ling Wu",
"givenName": "Ja-Ling",
"surname": "Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ism",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "82-85",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4311-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4311a078",
"articleId": "12OmNBOCWhR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4311a086",
"articleId": "12OmNzRHOS3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019542",
"title": "Quality assessment of multi-view-plus-depth images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019542/12OmNBfZSmq",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572627",
"title": "Depth image interpolation with the propagation of color geodesic neighborhoods",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572627/12OmNCesr6M",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a910",
"title": "Depth Camera Based on Color-Coded Aperture",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a910/12OmNvm6VHm",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b377",
"title": "Depth Map Completion by Jointly Exploiting Blurry Color Images and Sparse Depth Maps",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b377/12OmNwwuE12",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a571",
"title": "On Preserving Structure in Stereo Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a571/12OmNxGj9Uh",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2020/4272/0/427200a135",
"title": "No-Reference Quality Prediction for DIBR-Synthesized Images Using Statistics of Fused Color-Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2020/427200a135/1mAa1YjLS2Q",
"parentPublication": {
"id": "proceedings/mipr/2020/4272/0",
"title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqIhFPc",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"acronym": "trustcom",
"groupId": "1800729",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwdbV2X",
"doi": "10.1109/TrustCom.2012.181",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"normalizedTitle": "Image Resizing Based on Geometry Preservation with Seam Carving",
"abstract": "When an image or a video is transformed to an aspect ratio deferent from its original size, information lost is inevitable no matter what method is used, thus, how to keep the most attractive contents and minimize the visual distortion during the resizing process is the key issue. To address this problem, this paper proposes an object geometry preservation method based on the seam carving method. We first define a framework that measures the importance of geometry feature in the source material, then a new energy function is presented with object geometry constraint, according to the new energy function, an optimized seam carving method is used to minimize distortion while resizing the source material. The experiment results show that our method is better to transform a variety of source images to a different display size than conventional resizing methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When an image or a video is transformed to an aspect ratio deferent from its original size, information lost is inevitable no matter what method is used, thus, how to keep the most attractive contents and minimize the visual distortion during the resizing process is the key issue. To address this problem, this paper proposes an object geometry preservation method based on the seam carving method. We first define a framework that measures the importance of geometry feature in the source material, then a new energy function is presented with object geometry constraint, according to the new energy function, an optimized seam carving method is used to minimize distortion while resizing the source material. The experiment results show that our method is better to transform a variety of source images to a different display size than conventional resizing methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When an image or a video is transformed to an aspect ratio deferent from its original size, information lost is inevitable no matter what method is used, thus, how to keep the most attractive contents and minimize the visual distortion during the resizing process is the key issue. To address this problem, this paper proposes an object geometry preservation method based on the seam carving method. We first define a framework that measures the importance of geometry feature in the source material, then a new energy function is presented with object geometry constraint, according to the new energy function, an optimized seam carving method is used to minimize distortion while resizing the source material. The experiment results show that our method is better to transform a variety of source images to a different display size than conventional resizing methods.",
"fno": "4745a596",
"keywords": [
"Energy Function",
"Image Resizing",
"Seam Carving",
"Geometry Preservation"
],
"authors": [
{
"affiliation": null,
"fullName": "Fan Zhou",
"givenName": "Fan",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ruomei Wang",
"givenName": "Ruomei",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu Liu",
"givenName": "Yu",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yun Liang",
"givenName": "Yun",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "trustcom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "596-601",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2172-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4745a588",
"articleId": "12OmNArtheh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4745a602",
"articleId": "12OmNy6qfHc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2011/4520/0/4520a563",
"title": "Language-Independent Text Lines Extraction Using Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2011/4520a563/12OmNrJ11Hl",
"parentPublication": {
"id": "proceedings/icdar/2011/4520/0",
"title": "2011 International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ares/2010/3965/0/3965a702",
"title": "A Novel Image Hiding Scheme Using Content Aware Seam Carving Method",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2010/3965a702/12OmNyKa5Y8",
"parentPublication": {
"id": "proceedings/ares/2010/3965/0",
"title": "2010 International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2012/4687/0/4687a693",
"title": "Study on Seam Carving for Image Fingerprint",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2012/4687a693/12OmNz61d3Q",
"parentPublication": {
"id": "proceedings/cisis/2012/4687/0",
"title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a615",
"title": "Content-Aware Video Seam Carving Based on Bag of Visual Cubes",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a615/12OmNzmLxKW",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKa5Tk",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxGALgl",
"doi": "10.1109/ICME.2008.4607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"normalizedTitle": "Content-aware image resizing using perceptual seam carving with human attention model",
"abstract": "In this paper, a new image resizing technique, perceptual seam carving, is proposed. With considering both face map and saliency map as human attention model in the energy function, it can keep important information in perceptual when the image is downsized. Moreover, a switching scheme between seam carving and resampling is also proposed to avoid excessively distorting the images. Experiments show that the proposed algorithm can generate more desirable resized images than cropping, resampling, and conventional seam carving techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a new image resizing technique, perceptual seam carving, is proposed. With considering both face map and saliency map as human attention model in the energy function, it can keep important information in perceptual when the image is downsized. Moreover, a switching scheme between seam carving and resampling is also proposed to avoid excessively distorting the images. Experiments show that the proposed algorithm can generate more desirable resized images than cropping, resampling, and conventional seam carving techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a new image resizing technique, perceptual seam carving, is proposed. With considering both face map and saliency map as human attention model in the energy function, it can keep important information in perceptual when the image is downsized. Moreover, a switching scheme between seam carving and resampling is also proposed to avoid excessively distorting the images. Experiments show that the proposed algorithm can generate more desirable resized images than cropping, resampling, and conventional seam carving techniques.",
"fno": "04607613",
"keywords": [
"Image Reconstruction",
"Image Sampling",
"Content Aware Image Resizing",
"Perceptual Seam Carving",
"Human Attention Model",
"Image Resizing Technique",
"Face Map",
"Saliency Map",
"Image Distortion",
"Image Cropping",
"Image Resampling",
"Face",
"Switches",
"Visualization",
"Pixel",
"Face Detection",
"Object Detection",
"Computer Vision"
],
"authors": [
{
"affiliation": "Media IC and System Lab, Graduate Institute of Electronics Engineering and Department of Electrical Engineering, National Taiwan University, 1, Sec. 4, Roosevelt Rd., Taipei 106, Taiwan",
"fullName": "Daw-Sen Hwang",
"givenName": null,
"surname": "Daw-Sen Hwang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media IC and System Lab, Graduate Institute of Electronics Engineering and Department of Electrical Engineering, National Taiwan University, 1, Sec. 4, Roosevelt Rd., Taipei 106, Taiwan",
"fullName": "Shao-Yi Chien",
"givenName": null,
"surname": "Shao-Yi Chien",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-06-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1945-7871",
"isbn": "978-1-4244-2570-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04607612",
"articleId": "12OmNC4eSGz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04607614",
"articleId": "12OmNzSh18u",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2014/3435/0/3435a060",
"title": "A Study of Image Retargeting Based on Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2014/3435a060/12OmNwswg2d",
"parentPublication": {
"id": "proceedings/icmtma/2014/3435/0",
"title": "2014 Sixth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04959689",
"title": "Combined image plus depth seam carving for multiview 3D images",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04959689/12OmNxYbT4y",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a050",
"title": "Reverse Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a050/12OmNxjjEkK",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a615",
"title": "Content-Aware Video Seam Carving Based on Bag of Visual Cubes",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a615/12OmNzmLxKW",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a675",
"title": "Improved Adaptive Seam Carving for Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a675/12OmNzmclGc",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a001",
"title": "SeeTheSeams: Localized Detection of Seam Carving based Image Forgery in Satellite Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a001/1G56JOOi1CU",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqAU6sR",
"title": "Image and Graphics, International Conference on",
"acronym": "icig",
"groupId": "1001790",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxjjEkK",
"doi": "10.1109/ICIG.2011.172",
"title": "Reverse Seam Carving",
"normalizedTitle": "Reverse Seam Carving",
"abstract": "Seam carving is an effective operator supporting content-aware resizing for both image reduction and expansion. However, repeated seam removing and inserting processes lead to excessively distortion image when imposed on seam insertion then removal operations or the other way around. With considering the relationship between seam removing and inserting processes, we present an ameliorated energy function to minimize aliasing. \"Forward Energy\" is an effective improvement only to image reduction. Moreover, we propose a novel \"Visual Points\" structure which distinguish the \"Forward Energy\" of seam insertion from that of seam removal, and improve seam insertion operations greatly. Qualitative and quantitative experiments show that the proposed method can achieve high quality as compared to existed methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Seam carving is an effective operator supporting content-aware resizing for both image reduction and expansion. However, repeated seam removing and inserting processes lead to excessively distortion image when imposed on seam insertion then removal operations or the other way around. With considering the relationship between seam removing and inserting processes, we present an ameliorated energy function to minimize aliasing. \"Forward Energy\" is an effective improvement only to image reduction. Moreover, we propose a novel \"Visual Points\" structure which distinguish the \"Forward Energy\" of seam insertion from that of seam removal, and improve seam insertion operations greatly. Qualitative and quantitative experiments show that the proposed method can achieve high quality as compared to existed methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Seam carving is an effective operator supporting content-aware resizing for both image reduction and expansion. However, repeated seam removing and inserting processes lead to excessively distortion image when imposed on seam insertion then removal operations or the other way around. With considering the relationship between seam removing and inserting processes, we present an ameliorated energy function to minimize aliasing. \"Forward Energy\" is an effective improvement only to image reduction. Moreover, we propose a novel \"Visual Points\" structure which distinguish the \"Forward Energy\" of seam insertion from that of seam removal, and improve seam insertion operations greatly. Qualitative and quantitative experiments show that the proposed method can achieve high quality as compared to existed methods.",
"fno": "4541a050",
"keywords": [
"Seam Carving",
"Image Retargeting",
"Visual Points"
],
"authors": [
{
"affiliation": null,
"fullName": "Gang Pan",
"givenName": "Gang",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Weishu Li",
"givenName": "Weishu",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wei Bai",
"givenName": "Wei",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jinyan Chen",
"givenName": "Jinyan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luyuan Li",
"givenName": "Luyuan",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icig",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-08-01T00:00:00",
"pubType": "proceedings",
"pages": "50-55",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4541-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4541a045",
"articleId": "12OmNAle6mU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4541a056",
"articleId": "12OmNxWuiur",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cinc/2009/3645/1/3645a343",
"title": "Research and Design of Coal Seam Water Infusion Control System",
"doi": null,
"abstractUrl": "/proceedings-article/cinc/2009/3645a343/12OmNAZfxEs",
"parentPublication": {
"id": "proceedings/cinc/2009/3645/1",
"title": "Computational Intelligence and Natural Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2011/4520/0/4520a563",
"title": "Language-Independent Text Lines Extraction Using Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2011/4520a563/12OmNrJ11Hl",
"parentPublication": {
"id": "proceedings/icdar/2011/4520/0",
"title": "2011 International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ares/2010/3965/0/3965a702",
"title": "A Novel Image Hiding Scheme Using Content Aware Seam Carving Method",
"doi": null,
"abstractUrl": "/proceedings-article/ares/2010/3965a702/12OmNyKa5Y8",
"parentPublication": {
"id": "proceedings/ares/2010/3965/0",
"title": "2010 International Conference on Availability, Reliability and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a505",
"title": "Fast Content Aware Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a505/12OmNyqiaRZ",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2012/4687/0/4687a693",
"title": "Study on Seam Carving for Image Fingerprint",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2012/4687a693/12OmNz61d3Q",
"parentPublication": {
"id": "proceedings/cisis/2012/4687/0",
"title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2006/2528/2/252820616",
"title": "Robotic Seam Tracking by Utilizing Arc Light",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2006/252820616/12OmNzBOigb",
"parentPublication": {
"id": "proceedings/isda/2006/2528/2",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2010/4222/0/4222a615",
"title": "Content-Aware Video Seam Carving Based on Bag of Visual Cubes",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2010/4222a615/12OmNzmLxKW",
"parentPublication": {
"id": "proceedings/iih-msp/2010/4222/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a675",
"title": "Improved Adaptive Seam Carving for Image Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a675/12OmNzmclGc",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy2agRS",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"acronym": "cad-graphics",
"groupId": "1001488",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyUFfUQ",
"doi": "10.1109/CADGraphics.2013.90",
"title": "Saliency-Aware Volume Data Resizing by Surface Carving",
"normalizedTitle": "Saliency-Aware Volume Data Resizing by Surface Carving",
"abstract": "We present a saliency-aware volume resizing operation called surface carving, which intelligently removes contextual voxels while preserving important features. By iteratively applying surface carving in all directions, we can create a volume of the desired size. For large volume data sets, a multilevel banded method is introduced to gracefully overcome the memory limit and speed up volume resizing. We compare our technique with traditionally cropping and scaling approaches and demonstrate the effectiveness and efficiency of our method with several volume data sets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a saliency-aware volume resizing operation called surface carving, which intelligently removes contextual voxels while preserving important features. By iteratively applying surface carving in all directions, we can create a volume of the desired size. For large volume data sets, a multilevel banded method is introduced to gracefully overcome the memory limit and speed up volume resizing. We compare our technique with traditionally cropping and scaling approaches and demonstrate the effectiveness and efficiency of our method with several volume data sets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a saliency-aware volume resizing operation called surface carving, which intelligently removes contextual voxels while preserving important features. By iteratively applying surface carving in all directions, we can create a volume of the desired size. For large volume data sets, a multilevel banded method is introduced to gracefully overcome the memory limit and speed up volume resizing. We compare our technique with traditionally cropping and scaling approaches and demonstrate the effectiveness and efficiency of our method with several volume data sets.",
"fno": "06815054",
"keywords": [
"Transfer Functions",
"Manifolds",
"Educational Institutions",
"Context",
"Memory Management",
"Energy Measurement",
"Weight Measurement",
"Data Reduction",
"Volume Resizing",
"Saliency Aware",
"Surface Carving"
],
"authors": [
{
"affiliation": null,
"fullName": "Qichao Wang",
"givenName": "Qichao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yubo Tao",
"givenName": "Yubo",
"surname": "Tao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hai Lin",
"givenName": "Hai",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cad-graphics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-11-01T00:00:00",
"pubType": "proceedings",
"pages": "447-448",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-2576-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06815053",
"articleId": "12OmNAWpyx7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06815055",
"articleId": "12OmNyS6RAH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pdcat/2016/5081/0/07943389",
"title": "Accumulative Energy-Based Seam Carving for Image Resizing",
"doi": null,
"abstractUrl": "/proceedings-article/pdcat/2016/07943389/12OmNAXglK1",
"parentPublication": {
"id": "proceedings/pdcat/2016/5081/0",
"title": "2016 17th International Conference on Parallel and Distributed Computing, Applications and Technologies (PDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2016/3568/0/3568a432",
"title": "An Evaluation of the Volume on Surface (VoS) Approach",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2016/3568a432/12OmNAfgwza",
"parentPublication": {
"id": "proceedings/sibgrapi/2016/3568/0",
"title": "2016 29th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2009/5949/0/05407481",
"title": "Fast seam carving using partial update and divide and conquer method",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407481/12OmNCbU33t",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097861",
"title": "Optimizing Seam Carving on multi-GPU systems for real-time image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097861/12OmNsbGvDS",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2012/4745/0/4745a596",
"title": "Image Resizing Based on Geometry Preservation with Seam Carving",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2012/4745a596/12OmNwdbV2X",
"parentPublication": {
"id": "proceedings/trustcom/2012/4745/0",
"title": "2012 IEEE 11th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607613",
"title": "Content-aware image resizing using perceptual seam carving with human attention model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607613/12OmNxGALgl",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2013/0703/0/06636654",
"title": "Real-time online camera synchronization for volume carving on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636654/12OmNyRxFhL",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814998",
"title": "Visual Saliency Guided Global and Local Resizing for 3D Models",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814998/12OmNyo1o1L",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010111",
"title": "Summarization-Based Image Resizing by Intelligent Object Carving",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010111/13rRUyYSWkY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08226853",
"title": "Multi-Material Volume Rendering with a Physically-Based Surface Reflection Model",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08226853/14H4WMQegms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwLOYSu",
"title": "2017 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAfPISE",
"doi": "10.1109/CW.2017.14",
"title": "User Friendly Calibration for Tracking of Optical Stereo See-Through Head Worn Displays for Augmented Reality",
"normalizedTitle": "User Friendly Calibration for Tracking of Optical Stereo See-Through Head Worn Displays for Augmented Reality",
"abstract": "In recent time devices like Google Glass and Oculus Rift gained a lot of public attention. So the field of Virtual and Augmented Reality has become a more and more attractive field of study. Optical Stereo See-Through Head Worn Displays (OST-HWD or OST-HMD) can be used for Augmented Reality, but have to be calibrated. This means, that one has to find a configuration, that aligns the image shown on the displays with the environment, which is observed by the built-in camera. If this is not done, the augmented virtual image would not align with the real world. In this paper, the process of this calibration approach is divided into two stages, hardware and user calibration, but with less constraints for the positions of the cameras, which makes it easier to use. We aim at a more user friendly suite for the calibration of OST-HWD devices. Therefore both of the aforementioned stages are combined in a new quick step-by-step installation wizard, which is written in HTML and JavaScript to ensure easy usability. We apply a new minimization model in order to simplify and robustify the calculations of the virtual plane. In addition to that the required hardware components, including camera and calibration rig, were simplified. The implemented software has been evaluated for its results of the computed virtual plane, intrinsic data and eye positions of the user. Finally a user study was conducted to rate the usability of the calibration process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent time devices like Google Glass and Oculus Rift gained a lot of public attention. So the field of Virtual and Augmented Reality has become a more and more attractive field of study. Optical Stereo See-Through Head Worn Displays (OST-HWD or OST-HMD) can be used for Augmented Reality, but have to be calibrated. This means, that one has to find a configuration, that aligns the image shown on the displays with the environment, which is observed by the built-in camera. If this is not done, the augmented virtual image would not align with the real world. In this paper, the process of this calibration approach is divided into two stages, hardware and user calibration, but with less constraints for the positions of the cameras, which makes it easier to use. We aim at a more user friendly suite for the calibration of OST-HWD devices. Therefore both of the aforementioned stages are combined in a new quick step-by-step installation wizard, which is written in HTML and JavaScript to ensure easy usability. We apply a new minimization model in order to simplify and robustify the calculations of the virtual plane. In addition to that the required hardware components, including camera and calibration rig, were simplified. The implemented software has been evaluated for its results of the computed virtual plane, intrinsic data and eye positions of the user. Finally a user study was conducted to rate the usability of the calibration process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent time devices like Google Glass and Oculus Rift gained a lot of public attention. So the field of Virtual and Augmented Reality has become a more and more attractive field of study. Optical Stereo See-Through Head Worn Displays (OST-HWD or OST-HMD) can be used for Augmented Reality, but have to be calibrated. This means, that one has to find a configuration, that aligns the image shown on the displays with the environment, which is observed by the built-in camera. If this is not done, the augmented virtual image would not align with the real world. In this paper, the process of this calibration approach is divided into two stages, hardware and user calibration, but with less constraints for the positions of the cameras, which makes it easier to use. We aim at a more user friendly suite for the calibration of OST-HWD devices. Therefore both of the aforementioned stages are combined in a new quick step-by-step installation wizard, which is written in HTML and JavaScript to ensure easy usability. We apply a new minimization model in order to simplify and robustify the calculations of the virtual plane. In addition to that the required hardware components, including camera and calibration rig, were simplified. The implemented software has been evaluated for its results of the computed virtual plane, intrinsic data and eye positions of the user. Finally a user study was conducted to rate the usability of the calibration process.",
"fno": "2089a033",
"keywords": [
"Augmented Reality",
"Calibration",
"Cameras",
"Helmet Mounted Displays",
"Augmented Reality",
"Google Glass",
"Head Worn Displays",
"OST HMD",
"Augmented Virtual Image",
"Calibration Approach",
"OST HWD Devices",
"Calibration Rig",
"Calibration Process",
"User Friendly Calibration",
"Optical Stereo",
"Calibration",
"Augmented Reality",
"Cameras",
"Head",
"Optical Imaging",
"Hardware",
"Iris",
"Augmented Reality",
"Calibration",
"Optical Stereo See Through Head Worn Displays"
],
"authors": [
{
"affiliation": null,
"fullName": "Felix Bernard",
"givenName": "Felix",
"surname": "Bernard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Timo Engelke",
"givenName": "Timo",
"surname": "Engelke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Arjan Kuijper",
"givenName": "Arjan",
"surname": "Kuijper",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-09-01T00:00:00",
"pubType": "proceedings",
"pages": "33-40",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-2089-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "2089a025",
"articleId": "12OmNA0MYZo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "2089a041",
"articleId": "12OmNB7tUpR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504693",
"title": "A calibration method for optical see-through head-mounted displays with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504693/12OmNAnMuMd",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802089",
"title": "Quantification of error from system and environmental sources in Optical See-Through head mounted display calibration methods",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802089/12OmNxwncbX",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223385",
"title": "Continuous automatic calibration for optical see-through displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223385/12OmNynJMQZ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446429",
"title": "Impact of Alignment Point Distance Distribution on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446429/13bd1gCd7Sz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07021939",
"title": "Subjective Evaluation of a Semi-Automatic Optical See-Through Head-Mounted Display Calibration Technique",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07021939/13rRUwInvyB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07012105",
"title": "Corneal-Imaging Calibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07012105/13rRUxE04tC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBBhN9g",
"doi": "10.1109/VR.2016.7504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"normalizedTitle": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"abstract": "A common factor among current generation optical see-through augmented reality systems is fixed focal distance to virtual content. In this work, we investigate the issue of focus blur, in particular, the blurring caused by simultaneously viewing virtual content and physical objects in the environment at differing focal distances. We examine the application of dynamic sharpening filters as a straight forward, system independent, means for mitigating this effect improving the clarity of defocused AR content. We assess the utility of this method, termed SharpView, by employing an adjustment experiment in which users actively apply varying amounts of sharpening to reduce the perception of blur in AR content. Our experimental results validate the ability of our SharpView model to improve the visual clarity of focus blurred content, with optimal performance at focal differences well suited for near field AR applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A common factor among current generation optical see-through augmented reality systems is fixed focal distance to virtual content. In this work, we investigate the issue of focus blur, in particular, the blurring caused by simultaneously viewing virtual content and physical objects in the environment at differing focal distances. We examine the application of dynamic sharpening filters as a straight forward, system independent, means for mitigating this effect improving the clarity of defocused AR content. We assess the utility of this method, termed SharpView, by employing an adjustment experiment in which users actively apply varying amounts of sharpening to reduce the perception of blur in AR content. Our experimental results validate the ability of our SharpView model to improve the visual clarity of focus blurred content, with optimal performance at focal differences well suited for near field AR applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A common factor among current generation optical see-through augmented reality systems is fixed focal distance to virtual content. In this work, we investigate the issue of focus blur, in particular, the blurring caused by simultaneously viewing virtual content and physical objects in the environment at differing focal distances. We examine the application of dynamic sharpening filters as a straight forward, system independent, means for mitigating this effect improving the clarity of defocused AR content. We assess the utility of this method, termed SharpView, by employing an adjustment experiment in which users actively apply varying amounts of sharpening to reduce the perception of blur in AR content. Our experimental results validate the ability of our SharpView model to improve the visual clarity of focus blurred content, with optimal performance at focal differences well suited for near field AR applications.",
"fno": "07504749",
"keywords": [
"Optical Imaging",
"Adaptive Optics",
"Lenses",
"Augmented Reality",
"Optical Distortion",
"Retina",
"Deconvolution",
"I 4 4 Image Processing And Computer Vision Restoration Wiener Filtering",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Interactive Media Design Laboratory, Nara Institute of Science and Technology",
"fullName": "Kohei Oshima",
"givenName": "Kohei",
"surname": "Oshima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science & Engineering, Mississippi State University",
"fullName": "Kenneth R Moser",
"givenName": "Kenneth R",
"surname": "Moser",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interactive Media Design Laboratory, Nara Institute of Science and Technology",
"fullName": "Damien Constantine Rompapas",
"givenName": "Damien Constantine",
"surname": "Rompapas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science & Engineering, Mississippi State University",
"fullName": "J Edward Swan",
"givenName": "J Edward",
"surname": "Swan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mobile Computing Laboratory, Ritsumeikan University",
"fullName": "Sei Ikeda",
"givenName": "Sei",
"surname": "Ikeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interactive Media Design Laboratory, Nara Institute of Science and Technology",
"fullName": "Goshiro Yamamoto",
"givenName": "Goshiro",
"surname": "Yamamoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interactive Media Design Laboratory, Nara Institute of Science and Technology",
"fullName": "Takafumi Taketomi",
"givenName": "Takafumi",
"surname": "Taketomi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interactive Media Design Laboratory, Nara Institute of Science and Technology",
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interactive Media Design Laboratory, Nara Institute of Science and Technology",
"fullName": "Hirokazu Kato",
"givenName": "Hirokazu",
"surname": "Kato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "253-254",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504748",
"articleId": "12OmNzXWZF5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504750",
"articleId": "12OmNzYeAQj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2016/7258/0/07552965",
"title": "Content-adaptive focus configuration for near-eye multi-focal displays",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552965/12OmNAlNixT",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460049",
"title": "SharpView: Improved clarity of defocused content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460049/12OmNBWzHQi",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504755",
"title": "Spatial consistency perception in optical and video see-through head-mounted augmentations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504755/12OmNqNXEli",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwar/1999/0359/0/03590075",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/07/07226865",
"title": "Resolving the Vergence-Accommodation Conflict in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/07/07226865/13rRUxASuhD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050791",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a301",
"title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyqRnea",
"title": "Third IEEE and ACM International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqFrGwu",
"doi": "10.1109/ISMAR.2004.2",
"title": "A Compact Optical See-Through Head-Worn Display with Occlusion Support",
"normalizedTitle": "A Compact Optical See-Through Head-Worn Display with Occlusion Support",
"abstract": "We are proposing a novel optical see-through head- worn display that is capable of mutual occlusions. Mutual occlusion is an attribute of an augmented reality display where real objects can occlude virtual objects and virtual objects can occlude real objects. For a user to achieve the perception of indifference between the real and the virtual images superimposed on the real environment, mutual occlusion is a strongly desired attribute for certain applications. This paper presents a breakthrough in display hardware from a mobility (i.e. compactness), resolution, and a switching speed based criteria. Specifically, we focus on the research that is related to virtual objects being able to occlude real objects. The core of the system is a spatial light modulator (SLM) and polarization-based optics which allow us to block or pass certain parts of a scene which is viewed through the head-worn display. An objective lens images the scene onto the SLM and the modulated image is mapped back to the original scene via an eyepiece. We are combining computer generated imagery with the modulated version of the scene to form the final image a user would see.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We are proposing a novel optical see-through head- worn display that is capable of mutual occlusions. Mutual occlusion is an attribute of an augmented reality display where real objects can occlude virtual objects and virtual objects can occlude real objects. For a user to achieve the perception of indifference between the real and the virtual images superimposed on the real environment, mutual occlusion is a strongly desired attribute for certain applications. This paper presents a breakthrough in display hardware from a mobility (i.e. compactness), resolution, and a switching speed based criteria. Specifically, we focus on the research that is related to virtual objects being able to occlude real objects. The core of the system is a spatial light modulator (SLM) and polarization-based optics which allow us to block or pass certain parts of a scene which is viewed through the head-worn display. An objective lens images the scene onto the SLM and the modulated image is mapped back to the original scene via an eyepiece. We are combining computer generated imagery with the modulated version of the scene to form the final image a user would see.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We are proposing a novel optical see-through head- worn display that is capable of mutual occlusions. Mutual occlusion is an attribute of an augmented reality display where real objects can occlude virtual objects and virtual objects can occlude real objects. For a user to achieve the perception of indifference between the real and the virtual images superimposed on the real environment, mutual occlusion is a strongly desired attribute for certain applications. This paper presents a breakthrough in display hardware from a mobility (i.e. compactness), resolution, and a switching speed based criteria. Specifically, we focus on the research that is related to virtual objects being able to occlude real objects. The core of the system is a spatial light modulator (SLM) and polarization-based optics which allow us to block or pass certain parts of a scene which is viewed through the head-worn display. An objective lens images the scene onto the SLM and the modulated image is mapped back to the original scene via an eyepiece. We are combining computer generated imagery with the modulated version of the scene to form the final image a user would see.",
"fno": "21910016",
"keywords": [
"Display Hardware",
"Occlusion",
"Augmented Reality",
"Optical System Design",
"Head Mounted Display",
"Spatial Light Modulator"
],
"authors": [
{
"affiliation": "University of Central Florida",
"fullName": "Ozan Cakmakci",
"givenName": "Ozan",
"surname": "Cakmakci",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Yonggang Ha",
"givenName": "Yonggang",
"surname": "Ha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Jannick P. Rolland",
"givenName": "Jannick P.",
"surname": "Rolland",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-11-01T00:00:00",
"pubType": "proceedings",
"pages": "16-25",
"year": "2004",
"issn": null,
"isbn": "0-7695-2191-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "21910006",
"articleId": "12OmNC4eSz8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "21910026",
"articleId": "12OmNrIaebq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2005/8929/0/01492775",
"title": "Realistic occlusion effects in mirror-based co-located augmented reality systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492775/12OmNAYoKpz",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539213",
"title": "Real-Time Composition of Stereo Images for Video See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539213/12OmNC3Xhk4",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/music/2012/1956/0/4727a001",
"title": "A Mobile Head-Mounted Display for Action Sports",
"doi": null,
"abstractUrl": "/proceedings-article/music/2012/4727a001/12OmNrMZpuh",
"parentPublication": {
"id": "proceedings/music/2012/1956/0",
"title": "2012 Third FTRA International Conference on Mobile, Ubiquitous, and Intelligent Computing (MUSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780233",
"title": "Visuo-Haptic Display Using Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920081",
"title": "A Testbed for Precise Registration, Natural Occlusion and Interaction in an Augmented Environment Using a Head-Mounted Projective Display (HMPD)",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920081/12OmNylboC4",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/1997/8192/0/81920048",
"title": "Eyeglass-Based Systems For Wearable Computing",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/1997/81920048/12OmNzwZ6id",
"parentPublication": {
"id": "proceedings/iswc/1997/8192/0",
"title": "Digest of Papers. First International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089433",
"title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvRU0cM",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqI04YU",
"doi": "10.1109/ISMAR-Adjunct.2017.66",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"normalizedTitle": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"abstract": "Virtual content on optical see-through head-mounted displays (OST-HMDs) appears dim in bright environments. In this paper, we demonstrate how a liquid crystal (LC) filter can be used to dynamically increase the perceived brightness of the virtual content. Continuously adjusting the LC filter opacity attenuates the real scene and increases the perceived brightness without being noticed by the user. The results of our psychophysical experiment with 16 participants validate our prototype OST-HMD. Our design could be combined with existing and future OST-HMDs to improve the visibility of the virtual content in augmented reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual content on optical see-through head-mounted displays (OST-HMDs) appears dim in bright environments. In this paper, we demonstrate how a liquid crystal (LC) filter can be used to dynamically increase the perceived brightness of the virtual content. Continuously adjusting the LC filter opacity attenuates the real scene and increases the perceived brightness without being noticed by the user. The results of our psychophysical experiment with 16 participants validate our prototype OST-HMD. Our design could be combined with existing and future OST-HMDs to improve the visibility of the virtual content in augmented reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual content on optical see-through head-mounted displays (OST-HMDs) appears dim in bright environments. In this paper, we demonstrate how a liquid crystal (LC) filter can be used to dynamically increase the perceived brightness of the virtual content. Continuously adjusting the LC filter opacity attenuates the real scene and increases the perceived brightness without being noticed by the user. The results of our psychophysical experiment with 16 participants validate our prototype OST-HMD. Our design could be combined with existing and future OST-HMDs to improve the visibility of the virtual content in augmented reality.",
"fno": "6327a202",
"keywords": [
"Brightness",
"Lighting",
"Optical Attenuators",
"Optical Variables Control",
"Prototypes",
"Augmented Reality",
"Electronic Mail",
"OST HMD",
"Optical See Through Displays",
"Illumination Shedding",
"Liquid Crystal Visor",
"Brightness Adaptation"
],
"authors": [
{
"affiliation": null,
"fullName": "Shohei Mori",
"givenName": "Shohei",
"surname": "Mori",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sei Ikeda",
"givenName": "Sei",
"surname": "Ikeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alexander Plopski",
"givenName": "Alexander",
"surname": "Plopski",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "202-203",
"year": "2017",
"issn": null,
"isbn": "978-0-7695-6327-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6327a200",
"articleId": "12OmNylKAVM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6327a204",
"articleId": "12OmNzUxOf1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456571",
"title": "Restoring the Awareness in the Occluded Visual Field for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456571/14M3DYLGFgs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a409",
"title": "Adapting Michelson Contrast for use with Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a409/1J7WpecpAwU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvRU0cM",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvA1hoC",
"doi": "10.1109/ISMAR-Adjunct.2017.47",
"title": "[POSTER] Two-Step Gamut Mapping for Optical See-Through Displays",
"normalizedTitle": "[POSTER] Two-Step Gamut Mapping for Optical See-Through Displays",
"abstract": "In Optical See-Through (OST) displays, the chroma component in color space is often distorted. OST displays appear to have a uniquely different gamut feature compared with conventional color devices. In this paper, we proposed a novel two-step gamut mapping method for OST displays. The conventional CARISMA (Color Appearance Research for Interactive System Management and Application) algorithm is extended straightforwardly, and its process is divided into two separate steps (lightness mapping and chroma compression) by reflecting the characteristics of the newtype OST. We confirmed experimentally that the proposed gamut mapping method can reduce color distortion better than the existing CARISMA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In Optical See-Through (OST) displays, the chroma component in color space is often distorted. OST displays appear to have a uniquely different gamut feature compared with conventional color devices. In this paper, we proposed a novel two-step gamut mapping method for OST displays. The conventional CARISMA (Color Appearance Research for Interactive System Management and Application) algorithm is extended straightforwardly, and its process is divided into two separate steps (lightness mapping and chroma compression) by reflecting the characteristics of the newtype OST. We confirmed experimentally that the proposed gamut mapping method can reduce color distortion better than the existing CARISMA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In Optical See-Through (OST) displays, the chroma component in color space is often distorted. OST displays appear to have a uniquely different gamut feature compared with conventional color devices. In this paper, we proposed a novel two-step gamut mapping method for OST displays. The conventional CARISMA (Color Appearance Research for Interactive System Management and Application) algorithm is extended straightforwardly, and its process is divided into two separate steps (lightness mapping and chroma compression) by reflecting the characteristics of the newtype OST. We confirmed experimentally that the proposed gamut mapping method can reduce color distortion better than the existing CARISMA.",
"fno": "6327a130",
"keywords": [
"Augmented Reality",
"Gamut Mapping",
"OST Display",
"Color Distortion",
"Chroma Reproduction",
"Lightness Mapping"
],
"authors": [
{
"affiliation": null,
"fullName": "Kang-Kyu Lee",
"givenName": "Kang-Kyu",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jae-Woo Kim",
"givenName": "Jae-Woo",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Je-Ho Ryu",
"givenName": "Je-Ho",
"surname": "Ryu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jong-Ok Kim",
"givenName": "Jong-Ok",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "130-131",
"year": "2017",
"issn": null,
"isbn": "978-0-7695-6327-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6327a124",
"articleId": "12OmNqC2uYn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6327a132",
"articleId": "12OmNAjO6Ey",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836482",
"title": "The RealityMashers: Augmented Reality Wide Field-of-View Optical See-Through Head Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836482/12OmNqzcvPm",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836485",
"title": "Chromaticity Based Local Linear Regression for Color Distortion Estimation of Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836485/12OmNxvO02f",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223385",
"title": "Continuous automatic calibration for optical see-through displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223385/12OmNynJMQZ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543467",
"title": "Display gamut reshaping for color emulation and balancing",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543467/12OmNyr8Ynx",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061317",
"title": "Color Seamlessness in Multi-Projector Displays Using Constrained Gamut Morphing",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061317/13rRUwgQpqH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqJ8taA",
"title": "Augmented Reality, International Workshop on",
"acronym": "iwar",
"groupId": "1000063",
"volume": "0",
"displayVolume": "0",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxTVU20",
"doi": "10.1109/IWAR.1999.803808",
"title": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"normalizedTitle": "A Method for Calibrating See-Through Head-Mounted Displays for AR",
"abstract": "In order to have a working AR system, the see-through system must be calibrated such that the internal models of objects match their physical counterparts. By match, we mean they should have the same position, orientation, and size information as well as any intrinsic parameters (such as focal lengths in the case of cameras) that their physical counterparts have. To this end, a procedure must be developed which estimates the parameters of these internal models. This calibration method must be both accurate and simple to use.This paper reports on our efforts to implement a calibration method for a see-through head-mounted display. We use a dynamic system in which a user interactively modifies the camera parameters until the image of a calibration object matches the image of a corresponding physical object. The calibration method is dynamic in the sense that we do not require the user's head to be immobilized.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to have a working AR system, the see-through system must be calibrated such that the internal models of objects match their physical counterparts. By match, we mean they should have the same position, orientation, and size information as well as any intrinsic parameters (such as focal lengths in the case of cameras) that their physical counterparts have. To this end, a procedure must be developed which estimates the parameters of these internal models. This calibration method must be both accurate and simple to use.This paper reports on our efforts to implement a calibration method for a see-through head-mounted display. We use a dynamic system in which a user interactively modifies the camera parameters until the image of a calibration object matches the image of a corresponding physical object. The calibration method is dynamic in the sense that we do not require the user's head to be immobilized.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to have a working AR system, the see-through system must be calibrated such that the internal models of objects match their physical counterparts. By match, we mean they should have the same position, orientation, and size information as well as any intrinsic parameters (such as focal lengths in the case of cameras) that their physical counterparts have. To this end, a procedure must be developed which estimates the parameters of these internal models. This calibration method must be both accurate and simple to use.This paper reports on our efforts to implement a calibration method for a see-through head-mounted display. We use a dynamic system in which a user interactively modifies the camera parameters until the image of a calibration object matches the image of a corresponding physical object. The calibration method is dynamic in the sense that we do not require the user's head to be immobilized.",
"fno": "03590075",
"keywords": [
"Augmented Reality",
"Optical See Through",
"Camera Calibration"
],
"authors": [
{
"affiliation": "Indiana University Purdue University Indianapolis (IUPUI)",
"fullName": "Erin McGarrity",
"givenName": "Erin",
"surname": "McGarrity",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indiana University Purdue University Indianapolis (IUPUI)",
"fullName": "Mihran Tuceryan",
"givenName": "Mihran",
"surname": "Tuceryan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iwar",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-10-01T00:00:00",
"pubType": "proceedings",
"pages": "75",
"year": "1999",
"issn": null,
"isbn": "0-7695-0359-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "03590064",
"articleId": "12OmNzhnadt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "03590085",
"articleId": "12OmNBcAGLe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1ftOBCG",
"doi": "10.1109/VR.2018.8446058",
"title": "User Preference for SharpView-Enhanced Virtual Text During Non-Fixated Viewing",
"normalizedTitle": "User Preference for SharpView-Enhanced Virtual Text During Non-Fixated Viewing",
"abstract": "For optical see-through head-mounted displays, the mismatch between a display's focal length and the real world scene inadvertently prevents users from simultaneously focusing on the presented virtual content and the scene. It has been shown that it is possible to ameliorate the out-of-focus blur for images with a known focus distance, by applying an algorithm called Sharp View. However, it remains unclear if Sharp View also improves the readability and clarity of text rendered on the display. In this study, we investigate whether users reported increased text clarity when Sharp View was applied to a text label, and how the focal demand of the display, the focal distance to real world content, and gaze condition affect the result. Our results indicate that, in non-fixated viewing, there is a significant user preference for Sharp View-enhanced text strings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For optical see-through head-mounted displays, the mismatch between a display's focal length and the real world scene inadvertently prevents users from simultaneously focusing on the presented virtual content and the scene. It has been shown that it is possible to ameliorate the out-of-focus blur for images with a known focus distance, by applying an algorithm called Sharp View. However, it remains unclear if Sharp View also improves the readability and clarity of text rendered on the display. In this study, we investigate whether users reported increased text clarity when Sharp View was applied to a text label, and how the focal demand of the display, the focal distance to real world content, and gaze condition affect the result. Our results indicate that, in non-fixated viewing, there is a significant user preference for Sharp View-enhanced text strings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For optical see-through head-mounted displays, the mismatch between a display's focal length and the real world scene inadvertently prevents users from simultaneously focusing on the presented virtual content and the scene. It has been shown that it is possible to ameliorate the out-of-focus blur for images with a known focus distance, by applying an algorithm called Sharp View. However, it remains unclear if Sharp View also improves the readability and clarity of text rendered on the display. In this study, we investigate whether users reported increased text clarity when Sharp View was applied to a text label, and how the focal demand of the display, the focal distance to real world content, and gaze condition affect the result. Our results indicate that, in non-fixated viewing, there is a significant user preference for Sharp View-enhanced text strings.",
"fno": "08446058",
"keywords": [
"Helmet Mounted Displays",
"Image Texture",
"Rendering Computer Graphics",
"Text Analysis",
"Virtual Reality",
"Nonfixated Viewing",
"Focal Length",
"User Preference",
"Virtual Content",
"Focus Distance",
"Sharpview Enhanced Virtual Text",
"Optical See Through Head Mounted Displays",
"Sharp View Enhanced Text Strings",
"Text Clarity",
"Observers",
"Lenses",
"Switches",
"Optical Imaging",
"Electronic Mail",
"Visualization",
"Augmented Reality"
],
"authors": [
{
"affiliation": "Mississippi State University",
"fullName": "J. Edward Swan",
"givenName": "J.",
"surname": "Edward Swan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Green Mountain Technology",
"fullName": "Trey Cook",
"givenName": "Trey",
"surname": "Cook",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mississippi State University",
"fullName": "Nate Phillips",
"givenName": "Nate",
"surname": "Phillips",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Green Mountain Technology",
"fullName": "Kristen Massey",
"givenName": "Kristen",
"surname": "Massey",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Green Mountain Technology",
"fullName": "Alexander Plopski",
"givenName": "Alexander",
"surname": "Plopski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Green Mountain Technology",
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-400",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08447561",
"articleId": "13bd1fKQxs5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446391",
"articleId": "13bd1eSlyst",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d290",
"title": "Single Image Camera Calibration with Lenticular Arrays for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d290/12OmNx0A7Bo",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/das/2016/1792/0/1792a329",
"title": "Preserving Text Content from Historical Handwritten Documents",
"doi": null,
"abstractUrl": "/proceedings-article/das/2016/1792a329/12OmNxG1yVu",
"parentPublication": {
"id": "proceedings/das/2016/1792/0",
"title": "2016 12th IAPR Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549411",
"title": "Early steps towards understanding text legibility in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549411/12OmNy6HQV1",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/02/08462792",
"title": "The Effect of Focal Distance, Age, and Brightness on Near-Field Augmented Reality Depth Matching",
"doi": null,
"abstractUrl": "/journal/tg/2020/02/08462792/13w3loWnQPK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699306",
"title": "HiKeyb: High-Efficiency Mixed Reality System for Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699306/19F1UXTzDos",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714039",
"title": "The Effect of Context Switching, Focal Switching Distance, Binocular and Monocular Viewing, and Transient Focal Blur on Human Performance in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714039/1B0Y24wmlm8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a640",
"title": "An Evaluation of Caret Navigation Methods for Text Editing in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a640/1J7W8cdLJeg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798065",
"title": "PeriText: Utilizing Peripheral Vision for Reading Text on Augmented Reality Smart Glasses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798065/1cI6anBuT8Q",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a096",
"title": "Effects of a Distracting Background and Focal Switching Distance in an Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a096/1yeQC2Aw0De",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysvKFdazS",
"doi": "10.1109/ISMAR50242.2020.00093",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"normalizedTitle": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"abstract": "Augmented reality (AR) optical-see-through (OST) head-mounted displays (HMD) have developed to a point where browsing information on the go is possible. In this paper, we investigate the readability of text on an AR HMD while the user is walking. There are two common methods of displaying text on a HMD: anchoring the text on the screen coordinate system or the world coordinate system. We report on the results of two laboratory experiments comparing text readability when the text is displayed in these two coordinate systems, and while the participants walked on a treadmill. In the first experiment, the participants read letter strings comprising Sloane letters, whereas the second experiment used English words. In addition to evaluating the text readability and workload experienced by participants, we employed IMU sensors to compare the effects of the text display method on the participants' head movement and gait. In both experiments, the reading speed and head movement were significantly higher and mental workload significantly lower for the world coordinate system than for the screen coordinate system. These results suggest that text readability while walking is better on the world coordinate system, and displaying text with the screen coordinate system results in an unnatural gait owing to the user trying to keep their head still in an effort to stabilize the HMD screen.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented reality (AR) optical-see-through (OST) head-mounted displays (HMD) have developed to a point where browsing information on the go is possible. In this paper, we investigate the readability of text on an AR HMD while the user is walking. There are two common methods of displaying text on a HMD: anchoring the text on the screen coordinate system or the world coordinate system. We report on the results of two laboratory experiments comparing text readability when the text is displayed in these two coordinate systems, and while the participants walked on a treadmill. In the first experiment, the participants read letter strings comprising Sloane letters, whereas the second experiment used English words. In addition to evaluating the text readability and workload experienced by participants, we employed IMU sensors to compare the effects of the text display method on the participants' head movement and gait. In both experiments, the reading speed and head movement were significantly higher and mental workload significantly lower for the world coordinate system than for the screen coordinate system. These results suggest that text readability while walking is better on the world coordinate system, and displaying text with the screen coordinate system results in an unnatural gait owing to the user trying to keep their head still in an effort to stabilize the HMD screen.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented reality (AR) optical-see-through (OST) head-mounted displays (HMD) have developed to a point where browsing information on the go is possible. In this paper, we investigate the readability of text on an AR HMD while the user is walking. There are two common methods of displaying text on a HMD: anchoring the text on the screen coordinate system or the world coordinate system. We report on the results of two laboratory experiments comparing text readability when the text is displayed in these two coordinate systems, and while the participants walked on a treadmill. In the first experiment, the participants read letter strings comprising Sloane letters, whereas the second experiment used English words. In addition to evaluating the text readability and workload experienced by participants, we employed IMU sensors to compare the effects of the text display method on the participants' head movement and gait. In both experiments, the reading speed and head movement were significantly higher and mental workload significantly lower for the world coordinate system than for the screen coordinate system. These results suggest that text readability while walking is better on the world coordinate system, and displaying text with the screen coordinate system results in an unnatural gait owing to the user trying to keep their head still in an effort to stabilize the HMD screen.",
"fno": "850800a649",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Sensors",
"Text Analysis",
"Screen Coordinate Systems",
"Text Readability",
"Displaying Text",
"Text Display Method",
"HMD Screen",
"Augmented Reality",
"Optical See Through Head Mounted Displays",
"Legged Locomotion",
"Head",
"Head Mounted Displays",
"Resists",
"Optical Imaging",
"Optical Sensors",
"Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Devices",
"Displays And Imagers"
],
"authors": [
{
"affiliation": "The University of Tokyo,Japan Science and Technology Agency",
"fullName": "Shogo Fukushima",
"givenName": "Shogo",
"surname": "Fukushima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Takeo Hamada",
"givenName": "Takeo",
"surname": "Hamada",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Ari Hautasaari",
"givenName": "Ari",
"surname": "Hautasaari",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "649-658",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a639",
"articleId": "1pysvxeFG4E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a659",
"articleId": "1pysuKaUadi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446345",
"title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09850416",
"title": "Distance Perception in Virtual Reality: A Meta-Analysis of the Effect of Head-Mounted Display Characteristics",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09850416/1Fz4SPLVTMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a686",
"title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a470",
"title": "Perceptibility of Jitter in Augmented Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a470/1JrQZ2SKCuQ",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a001",
"title": "Perception-Driven Hybrid Foveated Depth of Field Rendering for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a001/1yeCURkWXpS",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a413",
"title": "Selective Foveated Ray Tracing for Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a413/1yeD8bFOZos",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1lgopdHelEs",
"title": "Environmental Science and Information Application Technology, International Conference on",
"acronym": "esiat",
"groupId": "1002836",
"volume": "2",
"displayVolume": "2",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx7G5VW",
"doi": "10.1109/ESIAT.2009.147",
"title": "Rapid Texture-based Volume Rendering",
"normalizedTitle": "Rapid Texture-based Volume Rendering",
"abstract": "Nowadays, man can get a great number of 3D data sets from different sources is common in medical diagnosis but how to explore the information contents of these data sets is still a problem. One effective method is with computer aid rendering the volume. As the 3D datasets are usually in large scalar, the capability of a single CPU to rendering is not sufficient to achieve interactivity. Direct volume rendering via 3D textures has positioned itself as an efficient tool for the display and visual analysis of volumetric scalar fields. In this paper a rapid texture based volume visualization method is proposed. The method exploits hardware-assisted texture mapping, re-samples volume data, represented as a stack of 3D texture, onto a sampling surface or so called proxy geometry. For each Texel of a slice it perform a fetch to the 3D texture and performs fusion and shading using a fragment shadier. In order to speed up the rendering, the integration of acceleration techniques to reduce per-fragment operations for texture based volume rendering also will be addressed. At last we demonstrate the effectiveness of our method with several data sets on the GeForce 8800 GTX graphics card. It proved that the proposed method can generate high-quality visual representations of the 3D data sets at interactive rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Nowadays, man can get a great number of 3D data sets from different sources is common in medical diagnosis but how to explore the information contents of these data sets is still a problem. One effective method is with computer aid rendering the volume. As the 3D datasets are usually in large scalar, the capability of a single CPU to rendering is not sufficient to achieve interactivity. Direct volume rendering via 3D textures has positioned itself as an efficient tool for the display and visual analysis of volumetric scalar fields. In this paper a rapid texture based volume visualization method is proposed. The method exploits hardware-assisted texture mapping, re-samples volume data, represented as a stack of 3D texture, onto a sampling surface or so called proxy geometry. For each Texel of a slice it perform a fetch to the 3D texture and performs fusion and shading using a fragment shadier. In order to speed up the rendering, the integration of acceleration techniques to reduce per-fragment operations for texture based volume rendering also will be addressed. At last we demonstrate the effectiveness of our method with several data sets on the GeForce 8800 GTX graphics card. It proved that the proposed method can generate high-quality visual representations of the 3D data sets at interactive rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Nowadays, man can get a great number of 3D data sets from different sources is common in medical diagnosis but how to explore the information contents of these data sets is still a problem. One effective method is with computer aid rendering the volume. As the 3D datasets are usually in large scalar, the capability of a single CPU to rendering is not sufficient to achieve interactivity. Direct volume rendering via 3D textures has positioned itself as an efficient tool for the display and visual analysis of volumetric scalar fields. In this paper a rapid texture based volume visualization method is proposed. The method exploits hardware-assisted texture mapping, re-samples volume data, represented as a stack of 3D texture, onto a sampling surface or so called proxy geometry. For each Texel of a slice it perform a fetch to the 3D texture and performs fusion and shading using a fragment shadier. In order to speed up the rendering, the integration of acceleration techniques to reduce per-fragment operations for texture based volume rendering also will be addressed. At last we demonstrate the effectiveness of our method with several data sets on the GeForce 8800 GTX graphics card. It proved that the proposed method can generate high-quality visual representations of the 3D data sets at interactive rates.",
"fno": "3682b575",
"keywords": [
"Volume Rendering",
"Graphics Processing Units GPU",
"3 D Textures",
"Ray Casting"
],
"authors": [
{
"affiliation": null,
"fullName": "Chen Shihao",
"givenName": "Chen",
"surname": "Shihao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "He Guiqing",
"givenName": "He",
"surname": "Guiqing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hao Chongyang",
"givenName": "Hao",
"surname": "Chongyang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "esiat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-07-01T00:00:00",
"pubType": "proceedings",
"pages": "575-578",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3682-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3682b568",
"articleId": "12OmNyaXPNK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3682b579",
"articleId": "12OmNxy4MYa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vv/2002/7641/0/76410115",
"title": "Accelerating Volume Rendering with Texture Hulls",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410115/12OmNB6D70H",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300038",
"title": "Acceleration Techniques for GPU-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2008/3311/2/3311b030",
"title": "Real-time Medical Image Volume Rendering Based on GPU Accelerated Method",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2008/3311b030/12OmNvjgWRZ",
"parentPublication": {
"id": "proceedings/iscid/2008/3311/2",
"title": "2008 International Symposium on Computational Intelligence and Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2011/4602/0/4602a158",
"title": "An Adaptive Sampling Based Parallel Volume Rendering Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a158/12OmNxE2mHp",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-icess/2012/4749/0/4749a381",
"title": "High-Performance Volume Rendering on the Ubiquitous WebGL Platform",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-icess/2012/4749a381/12OmNyKJiyE",
"parentPublication": {
"id": "proceedings/hpcc-icess/2012/4749/0",
"title": "High Performance Computing and Communication & IEEE International Conference on Embedded Software and Systems, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/1998/9180/0/91800063",
"title": "Edge Preservation in Volume Rendering Using Splatting",
"doi": null,
"abstractUrl": "/proceedings-article/vv/1998/91800063/12OmNzJbQVc",
"parentPublication": {
"id": "proceedings/vv/1998/9180/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/alpit/2007/2930/0/2930a282",
"title": "Self-Adaptive Slices of 3D Texture Real-Time Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/alpit/2007/2930a282/12OmNzd7c2j",
"parentPublication": {
"id": "proceedings/alpit/2007/2930/0",
"title": "Advanced Language Processing and Web Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2002/7641/0/76410045",
"title": "OpenGL Volumizer: A Toolkit for High Quality Volume Rendering of Large Data Sets",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410045/12OmNzvhvCC",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061533",
"title": "Efficient High-Quality Volume Rendering of SPH Data",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061533/13rRUwInvJc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122125",
"title": "Image Plane Sweep Volume Illumination",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122125/13rRUxjQyve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxWuisc",
"title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"acronym": "sibgrapi",
"groupId": "1000131",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxaNGjy",
"doi": "10.1109/SIBGRAPI.2015.27",
"title": "Accurate Volume Rendering Based on Adaptive Numerical Integration",
"normalizedTitle": "Accurate Volume Rendering Based on Adaptive Numerical Integration",
"abstract": "We present an adaptive integration strategy to evaluate the volume rendering integral for regular volumes. We discuss different strategies to control the step size for both the inner and the outer integrals in the volume rendering equation. We report a set of computational experiments that compare both accuracy and efficiency of our proposal against Riemann summation with uniform step size. The comparisons are made for both CPU and GPU implementations and show that our method delivers both accuracy control and competitive performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an adaptive integration strategy to evaluate the volume rendering integral for regular volumes. We discuss different strategies to control the step size for both the inner and the outer integrals in the volume rendering equation. We report a set of computational experiments that compare both accuracy and efficiency of our proposal against Riemann summation with uniform step size. The comparisons are made for both CPU and GPU implementations and show that our method delivers both accuracy control and competitive performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an adaptive integration strategy to evaluate the volume rendering integral for regular volumes. We discuss different strategies to control the step size for both the inner and the outer integrals in the volume rendering equation. We report a set of computational experiments that compare both accuracy and efficiency of our proposal against Riemann summation with uniform step size. The comparisons are made for both CPU and GPU implementations and show that our method delivers both accuracy control and competitive performance.",
"fno": "7962a017",
"keywords": [
"Rendering Computer Graphics",
"Accuracy",
"Iterative Methods",
"Transfer Functions",
"Graphics Processing Units",
"Integral Equations",
"Computational Modeling",
"Simpsons Rule",
"Volume Rendering",
"Adaptive Integration",
"Error Control"
],
"authors": [
{
"affiliation": null,
"fullName": "Leonardo Quatrin Campagnolo",
"givenName": "Leonardo Quatrin",
"surname": "Campagnolo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Waldemar Celes",
"givenName": "Waldemar",
"surname": "Celes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Luiz Henrique de Figueiredo",
"givenName": "Luiz Henrique",
"surname": "de Figueiredo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sibgrapi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-08-01T00:00:00",
"pubType": "proceedings",
"pages": "17-24",
"year": "2015",
"issn": "1530-1834",
"isbn": "978-1-4673-7962-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7962a009",
"articleId": "12OmNBCqbA2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7962a025",
"articleId": "12OmNvAAtKy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2011/4548/0/4548a093",
"title": "Accurate Volume Rendering of Unstructured Hexahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2011/4548a093/12OmNCcbE5T",
"parentPublication": {
"id": "proceedings/sibgrapi/2011/4548/0",
"title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460002",
"title": "Hardware Assisted Multichannel Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460002/12OmNCdk2xM",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2008/1966/0/04475453",
"title": "Second Order Pre-Integrated Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2008/04475453/12OmNwlqhPE",
"parentPublication": {
"id": "proceedings/pacificvis/2008/1966/0",
"title": "IEEE Pacific Visualization Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532808",
"title": "Scale-invariant volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532808/12OmNyoAA5X",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720357",
"title": "Importance-Aware Composition for Illustrative Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720357/12OmNz5apMR",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010140",
"title": "Verifying Volume Rendering Using Discretization Error Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010140/13rRUwInvB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0253",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/04069241",
"title": "Topology-Controlled Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/04069241/13rRUytF41s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222562",
"title": "Homomorphic-Encrypted Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222562/1nTqvh6tnr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552224",
"title": "Differentiable Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552224/1xibZvRmYzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCdk2Yv",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxzMnWP",
"doi": "10.1109/VISUAL.2000.885697",
"title": "Two-Level Volume Rendering-Fusing MIP and DVR",
"normalizedTitle": "Two-Level Volume Rendering-Fusing MIP and DVR",
"abstract": "In this paper we present a two-level approach for fusing direct volume rendering (DVR) and maximum-intensity projection (MIP) within a joint rendering method. Different structures within the data-set are rendered locally by either MIP or DVR on an object-by- object basis. Globally all the results of subsequent object renderings are combined in a merging step (usually compositing in our case). This allows to selectively choose the most suitable technique for depicting each object within the data, while keeping the amount of information contained in the image at a reasonable level. This is especially useful when inner structures should be visualized together with semi-transparent outer parts, similar to the focus-and-context approach known from information visualization. We also present an implementation of our approach, which allows to explore volumetric data using two-level rendering at interactive frame rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a two-level approach for fusing direct volume rendering (DVR) and maximum-intensity projection (MIP) within a joint rendering method. Different structures within the data-set are rendered locally by either MIP or DVR on an object-by- object basis. Globally all the results of subsequent object renderings are combined in a merging step (usually compositing in our case). This allows to selectively choose the most suitable technique for depicting each object within the data, while keeping the amount of information contained in the image at a reasonable level. This is especially useful when inner structures should be visualized together with semi-transparent outer parts, similar to the focus-and-context approach known from information visualization. We also present an implementation of our approach, which allows to explore volumetric data using two-level rendering at interactive frame rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a two-level approach for fusing direct volume rendering (DVR) and maximum-intensity projection (MIP) within a joint rendering method. Different structures within the data-set are rendered locally by either MIP or DVR on an object-by- object basis. Globally all the results of subsequent object renderings are combined in a merging step (usually compositing in our case). This allows to selectively choose the most suitable technique for depicting each object within the data, while keeping the amount of information contained in the image at a reasonable level. This is especially useful when inner structures should be visualized together with semi-transparent outer parts, similar to the focus-and-context approach known from information visualization. We also present an implementation of our approach, which allows to explore volumetric data using two-level rendering at interactive frame rates.",
"fno": "64780039",
"keywords": [
"Visualization",
"Volume Rendering",
"Dynamical Systems",
"Medical Applications"
],
"authors": [
{
"affiliation": "Vienna University of Technology",
"fullName": "Helwig Hauser",
"givenName": "Helwig",
"surname": "Hauser",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vienna University of Technology",
"fullName": "Lukas Mroz",
"givenName": "Lukas",
"surname": "Mroz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Urbino",
"fullName": "Gian-Italo Bischi",
"givenName": "Gian-Italo",
"surname": "Bischi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vienna University of Technology",
"fullName": "Eduard Gröller",
"givenName": "Eduard",
"surname": "Gröller",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-10-01T00:00:00",
"pubType": "proceedings",
"pages": "39",
"year": "2000",
"issn": "1070-2385",
"isbn": "0-7803-6478-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "64780038",
"articleId": "12OmNzcxZjW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00885727",
"articleId": "12OmNx3HIby",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a275",
"title": "Direct Volume Rendering and Clipping Technology of Radar Beams",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a275/12OmNApLGRp",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300040",
"title": "High-Quality Two-Level Volume Rendering of Segmented Data Sets on Consumer Graphics Hardware",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300040/12OmNxEjY0A",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b102",
"title": "IRVR Algorithm: A New Volume Rendering Accelerating Method Based on Image Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b102/12OmNyLiux3",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2007/01/l0003",
"title": "Hypergraph-Partitioning-Based Remapping Models for Image-Space-Parallel Direct Volume Rendering of Unstructured Grids",
"doi": null,
"abstractUrl": "/journal/td/2007/01/l0003/13rRUIJcWl5",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122144",
"title": "An Efficient Direct Volume Rendering Approach for Dichromats",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122144/13rRUNvgz9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010140",
"title": "Verifying Volume Rendering Using Discretization Error Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010140/13rRUwInvB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0242",
"title": "Two-Level Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0242/13rRUxC0SOO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09224194",
"title": "Direct Volume Rendering with Nonparametric Models of Uncertainty",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09224194/1nV71j9G3yo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a106",
"title": "FAVR - Accelerating Direct Volume Rendering for Virtual RealitySystems",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a106/1qRNBEWTyEw",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCbCrVD",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZmZv2",
"doi": "10.1109/VISUAL.1995.480790",
"title": "Interactive Maximum Projection Volume Rendering",
"normalizedTitle": "Interactive Maximum Projection Volume Rendering",
"abstract": "Maximum projection is a volume rendering technique that, for each pixel, finds the maximum intensity along a projector. For certain important classes of data, this is an approximation to summation rendering which produces superior visualizations.In this paper we will show how maximum projection rendering with additional depth cues can be implemented using simple affine transformations in object space. This technique can be used together with 3D graphics libraries and standard graphics hardware,thus allowing interactive manipulations of the volume data. The algorithm presented in this paper allows for a wide range of tradeoffs between interactivity and image quality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Maximum projection is a volume rendering technique that, for each pixel, finds the maximum intensity along a projector. For certain important classes of data, this is an approximation to summation rendering which produces superior visualizations.In this paper we will show how maximum projection rendering with additional depth cues can be implemented using simple affine transformations in object space. This technique can be used together with 3D graphics libraries and standard graphics hardware,thus allowing interactive manipulations of the volume data. The algorithm presented in this paper allows for a wide range of tradeoffs between interactivity and image quality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Maximum projection is a volume rendering technique that, for each pixel, finds the maximum intensity along a projector. For certain important classes of data, this is an approximation to summation rendering which produces superior visualizations.In this paper we will show how maximum projection rendering with additional depth cues can be implemented using simple affine transformations in object space. This technique can be used together with 3D graphics libraries and standard graphics hardware,thus allowing interactive manipulations of the volume data. The algorithm presented in this paper allows for a wide range of tradeoffs between interactivity and image quality.",
"fno": "71870011",
"keywords": [
"Maximum Rendering",
"Summation Rendering",
"Volume Visualization",
"Interactive Computer Graphics",
"Geometric Transformation",
"Hardware Accelerated Rendering"
],
"authors": [
{
"affiliation": null,
"fullName": "Wolfgang Heidrich",
"givenName": "Wolfgang",
"surname": "Heidrich",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michael McCool",
"givenName": "Michael",
"surname": "McCool",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "John Stevens",
"givenName": "John",
"surname": "Stevens",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-10-01T00:00:00",
"pubType": "proceedings",
"pages": "11",
"year": "1995",
"issn": "1070-2385",
"isbn": "0-8186-7187-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "71870003",
"articleId": "12OmNs5rl20",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "71870019",
"articleId": "12OmNAsTgR0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzl3WX4",
"title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)",
"acronym": "sive",
"groupId": "1805064",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAtaS0G",
"doi": "10.1109/SIVE.2014.7006288",
"title": "Reproducible sonification for virtual navigation",
"normalizedTitle": "Reproducible sonification for virtual navigation",
"abstract": "The use of sonification for navigation, localization and obstacle avoidance is considered to be one of the most important tasks in auditory display research for its potential application to navigation systems in vehicles and smartphones, assistive technology and other eyes-free applications. The aim of this technology is to deliver location-based information to support navigation through sound. In this paper a comparison of two sonification methods for navigation and obstacle avoidance is presented. These methods were initially developed during a sonification hack day that was ran during the Interactive Sonification (ISon) workshop 2013. In order to allow the formal comparison of methods, we followed a reproducible sonification approach using a set of guidelines provided by SonEX (Sonification Evaluation eXchange). SonEX is a community-based environment that enables the definition and evaluation of standardized tasks, supporting open science standards and reproducible research. In order to allow for reproducible research, the system has been made publicly available.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The use of sonification for navigation, localization and obstacle avoidance is considered to be one of the most important tasks in auditory display research for its potential application to navigation systems in vehicles and smartphones, assistive technology and other eyes-free applications. The aim of this technology is to deliver location-based information to support navigation through sound. In this paper a comparison of two sonification methods for navigation and obstacle avoidance is presented. These methods were initially developed during a sonification hack day that was ran during the Interactive Sonification (ISon) workshop 2013. In order to allow the formal comparison of methods, we followed a reproducible sonification approach using a set of guidelines provided by SonEX (Sonification Evaluation eXchange). SonEX is a community-based environment that enables the definition and evaluation of standardized tasks, supporting open science standards and reproducible research. In order to allow for reproducible research, the system has been made publicly available.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The use of sonification for navigation, localization and obstacle avoidance is considered to be one of the most important tasks in auditory display research for its potential application to navigation systems in vehicles and smartphones, assistive technology and other eyes-free applications. The aim of this technology is to deliver location-based information to support navigation through sound. In this paper a comparison of two sonification methods for navigation and obstacle avoidance is presented. These methods were initially developed during a sonification hack day that was ran during the Interactive Sonification (ISon) workshop 2013. In order to allow the formal comparison of methods, we followed a reproducible sonification approach using a set of guidelines provided by SonEX (Sonification Evaluation eXchange). SonEX is a community-based environment that enables the definition and evaluation of standardized tasks, supporting open science standards and reproducible research. In order to allow for reproducible research, the system has been made publicly available.",
"fno": "07006288",
"keywords": [
"Navigation",
"Games",
"Avatars",
"Educational Institutions",
"Auditory Displays",
"Virtual Environments",
"Collision Avoidance",
"Reproducibility",
"Interactive Sonification",
"Auditory Displays",
"Spatial Auditorydisplays",
"Blind Navigation"
],
"authors": [
{
"affiliation": "Audio Department,Fraunhofer IIS",
"fullName": "Norberto Degara",
"givenName": "Norberto",
"surname": "Degara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audio Department, Fraunhofer IIS",
"fullName": "Thimmaiah Kuppanda",
"givenName": "Thimmaiah",
"surname": "Kuppanda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "FIT Lab, Swansea University",
"fullName": "Timothy Neate",
"givenName": "Timothy",
"surname": "Neate",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Audio Lab, University of York",
"fullName": "Jiajun Yang",
"givenName": "Jiajun",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interaction Design, Zurich University of the Arts",
"fullName": "Andres Torres",
"givenName": "Andres",
"surname": "Torres",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sive",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-03-01T00:00:00",
"pubType": "proceedings",
"pages": "35-40",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5781-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07006287",
"articleId": "12OmNzBwGu7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07006289",
"articleId": "12OmNAXglTR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ictai/2017/3876/0/387601a574",
"title": "Optical Flow Based Obstacle Avoidance for Multi-rotor Aerial Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2017/387601a574/12OmNAtK4n2",
"parentPublication": {
"id": "proceedings/ictai/2017/3876/0",
"title": "2017 IEEE 29th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2011/4455/0/4455b063",
"title": "Robot Navigation Based on Multi-sensor Data Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455b063/12OmNCgrD8M",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbr-lars/2012/4906/0/4906a156",
"title": "Autonomous Navigation with Obstacle Avoidance for a Car-Like Robot",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars/2012/4906a156/12OmNwMob5N",
"parentPublication": {
"id": "proceedings/sbr-lars/2012/4906/0",
"title": "Brazilian Robotics Symposium and Latin American Robotics Symposium (SBR-LARS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cine/2017/2529/0/2529a088",
"title": "Application of Deep Q-Learning for Wheel Mobile Robot Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cine/2017/2529a088/12OmNxAlA3c",
"parentPublication": {
"id": "proceedings/cine/2017/2529/0",
"title": "2017 3rd International Conference on Computational Intelligence and Networks (CINE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2015/8146/0/07340503",
"title": "A Haptic-Assisted Guidance System for working machines based on virtual force fields",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2015/07340503/12OmNy5zskG",
"parentPublication": {
"id": "proceedings/icat/2015/8146/0",
"title": "2015 XXV International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2013/2246/0/2246a064",
"title": "Navigation Maps for Virtual Travelers",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2013/2246a064/12OmNzcxZkp",
"parentPublication": {
"id": "proceedings/cw/2013/2246/0",
"title": "2013 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031b725",
"title": "Research on ODMM Obstacle Avoidance Fuzzy Navigation Based on Ultrasonic-Absolute-Positioning",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031b725/12OmNzvQHUn",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2017/1235/0/08457966",
"title": "ROS Navigation Stack for Smart Indoor Agents",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2017/08457966/13xI8AOXccM",
"parentPublication": {
"id": "proceedings/aipr/2017/1235/0",
"title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2019/03/08370712",
"title": "Vision-Based Mobile Indoor Assistive Navigation Aid for Blind People",
"doi": null,
"abstractUrl": "/journal/tm/2019/03/08370712/17D45WXIkHr",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730764",
"title": "Simulation in Real Conditions of Navigation and Obstacle Avoidance with PX4/Gazebo Platform",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730764/1aDSK7HQgxi",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNynJMVF",
"title": "2009 IEEE 17th International Conference on Program Comprehension (ICPC 2009)",
"acronym": "icpc",
"groupId": "1003168",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCcbE84",
"doi": "10.1109/ICPC.2009.5090035",
"title": "Sonification design guidelines to enhance program comprehension",
"normalizedTitle": "Sonification design guidelines to enhance program comprehension",
"abstract": "Faced with the challenges of understanding the source code of a program, software developers are assisted by a wealth of software visualization research. This work explores how visualization can be supplemented by sonification as a cognitive tool for code comprehension. By engaging the programmer's auditory senses, sonification can improve the utility of program comprehension tools. This paper reports on our experiences of creating and evaluating a program comprehension prototype tool that employs sonification to assist program understanding by rendering sonic cues. Our empirical evaluation of the efficacy of information sonification indicates that this cognitive aid can effectively complement visualization when trying to understand an unfamiliar code base. Based on our experiences, we then propose a set of guidelines for the design of a new generation of tools that increase their information utility by combining visualization and sonification.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Faced with the challenges of understanding the source code of a program, software developers are assisted by a wealth of software visualization research. This work explores how visualization can be supplemented by sonification as a cognitive tool for code comprehension. By engaging the programmer's auditory senses, sonification can improve the utility of program comprehension tools. This paper reports on our experiences of creating and evaluating a program comprehension prototype tool that employs sonification to assist program understanding by rendering sonic cues. Our empirical evaluation of the efficacy of information sonification indicates that this cognitive aid can effectively complement visualization when trying to understand an unfamiliar code base. Based on our experiences, we then propose a set of guidelines for the design of a new generation of tools that increase their information utility by combining visualization and sonification.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Faced with the challenges of understanding the source code of a program, software developers are assisted by a wealth of software visualization research. This work explores how visualization can be supplemented by sonification as a cognitive tool for code comprehension. By engaging the programmer's auditory senses, sonification can improve the utility of program comprehension tools. This paper reports on our experiences of creating and evaluating a program comprehension prototype tool that employs sonification to assist program understanding by rendering sonic cues. Our empirical evaluation of the efficacy of information sonification indicates that this cognitive aid can effectively complement visualization when trying to understand an unfamiliar code base. Based on our experiences, we then propose a set of guidelines for the design of a new generation of tools that increase their information utility by combining visualization and sonification.",
"fno": "05090035",
"keywords": [
"Program Visualisation",
"Source Coding",
"Sonification Design Guidelines",
"Program Source Code",
"Software Developers",
"Software Visualization",
"Cognitive Tool",
"Code Comprehension",
"Auditory Senses",
"Program Comprehension Tools",
"Guidelines",
"Visualization",
"Software Maintenance",
"Software Systems",
"Programming",
"Auditory Displays",
"Computer Science",
"Educational Institutions",
"Software Prototyping",
"Prototypes"
],
"authors": [
{
"affiliation": "Department of Computer Science, Virginia Tech, Blacksburg, 24061, USA",
"fullName": "Khaled Hussein",
"givenName": "Khaled",
"surname": "Hussein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Virginia Tech, Blacksburg, 24061, USA",
"fullName": "Eli Tilevich",
"givenName": "Eli",
"surname": "Tilevich",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Music, Virginia Tech, Blacksburg, 24061, USA",
"fullName": "Ivica Ico Bukvic",
"givenName": "Ivica Ico",
"surname": "Bukvic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wellesley College, MA 02481, USA",
"fullName": "SooBeen Kim",
"givenName": null,
"surname": "SooBeen Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-05-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2009",
"issn": "1092-8138",
"isbn": "978-1-4244-3998-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05090034",
"articleId": "12OmNwp74ta",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05090036",
"articleId": "12OmNviZllQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpc/2016/1428/0/07503710",
"title": "A case study of program comprehension effort and technical debt estimations",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2016/07503710/12OmNAnMuFm",
"parentPublication": {
"id": "proceedings/icpc/2016/1428/0",
"title": "2016 IEEE 24th International Conference on Program Comprehension (ICPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwpc/2004/2149/0/21490151",
"title": "Program Comprehension for Web Services",
"doi": null,
"abstractUrl": "/proceedings-article/iwpc/2004/21490151/12OmNBPtJI6",
"parentPublication": {
"id": "proceedings/iwpc/2004/2149/0",
"title": "12th IEEE International Workshop on Program Comprehension (IWPC'04)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/1/4647a601",
"title": "Overview of Program Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647a601/12OmNBziBc8",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/2",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2009/3998/0/05090033",
"title": "Trace visualization for program comprehension: A controlled experiment",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2009/05090033/12OmNCdBDHF",
"parentPublication": {
"id": "proceedings/icpc/2009/3998/0",
"title": "2009 IEEE 17th International Conference on Program Comprehension (ICPC 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2013/3073/0/06606723",
"title": "Normalizing source code vocabulary to support program comprehension and software quality",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2013/06606723/12OmNvDZEXa",
"parentPublication": {
"id": "proceedings/icse/2013/3073/0",
"title": "2013 35th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2017/3681/0/3681a642",
"title": "Facilitating Scenario-Based Program Comprehension with Topic Models",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2017/3681a642/12OmNwqx4aW",
"parentPublication": {
"id": "proceedings/apsec/2017/3681/0",
"title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wpc/1993/4042/0/00263897",
"title": "DOCKET: program comprehension-in-the-large",
"doi": null,
"abstractUrl": "/proceedings-article/wpc/1993/00263897/12OmNyQpgLV",
"parentPublication": {
"id": "proceedings/wpc/1993/4042/0",
"title": "1993 IEEE Second Workshop on Program Comprehension",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2018/5638/0/563801a788",
"title": "[Journal First] A Comparison of Program Comprehension Strategies by Blind and Sighted Programmers",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2018/563801a788/13l5NXDXuaT",
"parentPublication": {
"id": "proceedings/icse/2018/5638/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2018/10/07997917",
"title": "Measuring Program Comprehension: A Large-Scale Field Study with Professionals",
"doi": null,
"abstractUrl": "/journal/ts/2018/10/07997917/14qdcQ4CFKF",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2022/9598/0/959800a041",
"title": "COSPEX: A Program Comprehension Tool for Novice Programmers",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2022/959800a041/1EaP68o0IjS",
"parentPublication": {
"id": "proceedings/icse-companion/2022/9598/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxbW4PP",
"title": "2015 IEEE 1st International Workshop on Virtual and Augmented Reality for Molecular Science (VARMS@IEEEVR)",
"acronym": "varms-ieeevr",
"groupId": "1807845",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx0A7CV",
"doi": "10.1109/VARMS.2015.7151725",
"title": "Enhancing visualization of molecular simulations using sonification",
"normalizedTitle": "Enhancing visualization of molecular simulations using sonification",
"abstract": "Scientific visualization is an application area for virtual reality environments like stereoscopic displays or CAVEs. Especially interactive molecular visualizations that show the complex three-dimensional structures found in structural biology are often investigated using such environments. In contrast to VR applications like simulators, molecular visualization typically lacks auditory output. Nevertheless, sonification can be used to convey information about the data. In our work, we use sound to highlight events extracted from a molecular dynamics simulation. This not only offloads information from the visual channel, but can also guide the attention of the analyst towards important phenomena even if they are occluded in the visualization. Sound also creates a higher level of immersion, which can be beneficial for educational purposes. In this paper, we detail our application that adds sonification to the visualization of molecular simulations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scientific visualization is an application area for virtual reality environments like stereoscopic displays or CAVEs. Especially interactive molecular visualizations that show the complex three-dimensional structures found in structural biology are often investigated using such environments. In contrast to VR applications like simulators, molecular visualization typically lacks auditory output. Nevertheless, sonification can be used to convey information about the data. In our work, we use sound to highlight events extracted from a molecular dynamics simulation. This not only offloads information from the visual channel, but can also guide the attention of the analyst towards important phenomena even if they are occluded in the visualization. Sound also creates a higher level of immersion, which can be beneficial for educational purposes. In this paper, we detail our application that adds sonification to the visualization of molecular simulations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scientific visualization is an application area for virtual reality environments like stereoscopic displays or CAVEs. Especially interactive molecular visualizations that show the complex three-dimensional structures found in structural biology are often investigated using such environments. In contrast to VR applications like simulators, molecular visualization typically lacks auditory output. Nevertheless, sonification can be used to convey information about the data. In our work, we use sound to highlight events extracted from a molecular dynamics simulation. This not only offloads information from the visual channel, but can also guide the attention of the analyst towards important phenomena even if they are occluded in the visualization. Sound also creates a higher level of immersion, which can be beneficial for educational purposes. In this paper, we detail our application that adds sonification to the visualization of molecular simulations.",
"fno": "07151725",
"keywords": [
"Bioinformatics",
"Data Visualisation",
"Molecular Dynamics Method",
"Proteins",
"Virtual Reality",
"Molecular Simulation Visualization Enhancement",
"Sonification",
"Scientific Visualization",
"Virtual Reality Environments",
"Interactive Molecular Visualizations",
"Complex Three Dimensional Structures",
"Structural Biology",
"Auditory Output",
"Event Extraction",
"Molecular Dynamics Simulation",
"Visual Channel",
"Immersion Level",
"Proteins",
"Solid Modeling",
"Hydrogen",
"Data Visualization",
"Data Models",
"Biological System Modeling",
"Visualization",
"H 5 2 Information Interfaces And Presentation User Interfaces Auditory Non Speech Feedback",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality",
"J 3 Computer Applications Life And Medical Sciences Biology And Genetics"
],
"authors": [
{
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"fullName": "Benjamin Rau",
"givenName": "Benjamin",
"surname": "Rau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"fullName": "Florian Frieß",
"givenName": "Florian",
"surname": "Frieß",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"fullName": "Michael Krone",
"givenName": "Michael",
"surname": "Krone",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"fullName": "Christoph Muller",
"givenName": "Christoph",
"surname": "Muller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"fullName": "Thomas Ertl",
"givenName": "Thomas",
"surname": "Ertl",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "varms-ieeevr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "25-30",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6926-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07151724",
"articleId": "12OmNAJDBu9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07151726",
"articleId": "12OmNy2Jt57",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbi/2014/5779/2/5779b174",
"title": "Something Doesn't Sound Right: Sonification for Monitoring Business Processes in Manufacturing",
"doi": null,
"abstractUrl": "/proceedings-article/cbi/2014/5779b174/12OmNAFFdGp",
"parentPublication": {
"id": "proceedings/cbi/2014/5779/2",
"title": "2014 IEEE 16th Conference on Business Informatics (CBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2009/3998/0/05090035",
"title": "Sonification design guidelines to enhance program comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2009/05090035/12OmNCcbE84",
"parentPublication": {
"id": "proceedings/icpc/2009/3998/0",
"title": "2009 IEEE 17th International Conference on Program Comprehension (ICPC 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/3/3336e014",
"title": "Study on Application of CAD Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336e014/12OmNwK7obY",
"parentPublication": {
"id": "proceedings/csse/2008/3336/3",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/viz/2009/3734/0/3734a003",
"title": "Development, Implementation, and Evaluation of Sonification Tools for Point-and-Surface-Based Data Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/viz/2009/3734a003/12OmNwe2ItL",
"parentPublication": {
"id": "proceedings/viz/2009/3734/0",
"title": "Visualisation, International Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156364",
"title": "ERICAs: Enabling insights into ab initio Molecular Dynamics simulations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156364/12OmNzSh13Q",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/1995/02/s2045",
"title": "Data Sonification: Do You See What I Hear?",
"doi": null,
"abstractUrl": "/magazine/so/1995/02/s2045/13rRUwfI0NU",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/1999/04/c4048",
"title": "Data Sonification and Sound Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/1999/04/c4048/13rRUy08MzR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577080",
"title": "Quantum: An art-science case study on sonification and sound design in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577080/17D45We0UEe",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2019/4687/0/468700a053",
"title": "Fuzzy C-Means Clustering and Sonification of HRV Features",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2019/468700a053/1febUYfb9Go",
"parentPublication": {
"id": "proceedings/chase/2019/4687/0",
"title": "2019 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090531",
"title": "Immersive sonification of protein surface",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090531/1jIxzEw3bb2",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxjjEbg",
"doi": "10.1109/VR.2017.7892327",
"title": "Sound design in virtual reality concert experiences using a wave field synthesis approach",
"normalizedTitle": "Sound design in virtual reality concert experiences using a wave field synthesis approach",
"abstract": "In this paper we propose an experiment that evaluates the influence of audience noise on the feeling of presence and the perceived quality in a virtual reality concert experience delivered using Wave Field Synthesis. A 360 degree video of a live rock concert from a local band was recorded. Single sound sources from the stage and the PA system were recorded, as well as the audience noise, and impulse responses of the concert venue. The audience noise was implemented in the production phase. A comparative study compared an experience with and without audience noise. In a between subject experiment with 30 participants we found that audience noise does not have a significant impact on presence. However, qualitative evaluations show that the naturalness of the sonic experience delivered through wavefield synthesis had a positive impact on the participants.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we propose an experiment that evaluates the influence of audience noise on the feeling of presence and the perceived quality in a virtual reality concert experience delivered using Wave Field Synthesis. A 360 degree video of a live rock concert from a local band was recorded. Single sound sources from the stage and the PA system were recorded, as well as the audience noise, and impulse responses of the concert venue. The audience noise was implemented in the production phase. A comparative study compared an experience with and without audience noise. In a between subject experiment with 30 participants we found that audience noise does not have a significant impact on presence. However, qualitative evaluations show that the naturalness of the sonic experience delivered through wavefield synthesis had a positive impact on the participants.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we propose an experiment that evaluates the influence of audience noise on the feeling of presence and the perceived quality in a virtual reality concert experience delivered using Wave Field Synthesis. A 360 degree video of a live rock concert from a local band was recorded. Single sound sources from the stage and the PA system were recorded, as well as the audience noise, and impulse responses of the concert venue. The audience noise was implemented in the production phase. A comparative study compared an experience with and without audience noise. In a between subject experiment with 30 participants we found that audience noise does not have a significant impact on presence. However, qualitative evaluations show that the naturalness of the sonic experience delivered through wavefield synthesis had a positive impact on the participants.",
"fno": "07892327",
"keywords": [
"Visualization",
"Virtual Reality",
"Cameras",
"Auditory Displays",
"Rocks",
"Production",
"Microphones",
"H 1 2 Information Systems User Machine Systems Human Factors",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Rasmus B. Lind",
"givenName": "Rasmus B.",
"surname": "Lind",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Victor Milesen",
"givenName": "Victor",
"surname": "Milesen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Dina M. Smed",
"givenName": "Dina M.",
"surname": "Smed",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Simone P. Vinkel",
"givenName": "Simone P.",
"surname": "Vinkel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Francesco Grani",
"givenName": "Francesco",
"surname": "Grani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Niels C. Nilsson",
"givenName": "Niels C.",
"surname": "Nilsson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Lars Reng",
"givenName": "Lars",
"surname": "Reng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Rolf Nordahl",
"givenName": "Rolf",
"surname": "Nordahl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aalborg University Copenhagen, Denmark",
"fullName": "Stefania Serafin",
"givenName": "Stefania",
"surname": "Serafin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "363-364",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892326",
"articleId": "12OmNwNOaQ4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892328",
"articleId": "12OmNym2bPM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-computing/2013/5047/0/5047a135",
"title": "Interactive Acoustic Sound Field Reproduction with Web System for Gion Festival",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2013/5047a135/12OmNAq3hzj",
"parentPublication": {
"id": "proceedings/culture-computing/2013/5047/0",
"title": "2013 International Conference on Culture and Computing (Culture Computing)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2015/1969/0/07361290",
"title": "Sonic interaction design for virtual and augmented reality environments",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2015/07361290/12OmNButpWE",
"parentPublication": {
"id": "proceedings/sive/2015/1969/0",
"title": "2015 IEEE 2nd VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2012/2120/0/06299336",
"title": "Digital Archive for Japanese Intangible Cultural Heritage Based on Reproduction of High-Fidelity Sound Field in Yamahoko Parade of Gion Festival",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2012/06299336/12OmNy3iFkX",
"parentPublication": {
"id": "proceedings/snpd/2012/2120/0",
"title": "2012 13th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel & Distributed Computing (SNPD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007327",
"title": "SonifEye: Sonification of Visual Information Using Physical Modeling Sound Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007327/13rRUyft7D7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2022/7218/0/09859362",
"title": "Exploring Multisensory Feedback for Virtual Reality Relaxation",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2022/09859362/1G4F5nOkBNK",
"parentPublication": {
"id": "proceedings/icmew/2022/7218/0",
"title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmct/2022/7362/0/736200a001",
"title": "Layered-XR: A Utility Virtual-Real Fusion Based on Layer Sets",
"doi": null,
"abstractUrl": "/proceedings-article/icmct/2022/736200a001/1Ml2hws1aRa",
"parentPublication": {
"id": "proceedings/icmct/2022/7362/0",
"title": "2022 7th International Conference on Multimedia Communication Technologies (ICMCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvris/2019/5050/0/505000a059",
"title": "Research on the Artistic Characteristics of VR Films",
"doi": null,
"abstractUrl": "/proceedings-article/icvris/2019/505000a059/1fHkc7eWgU0",
"parentPublication": {
"id": "proceedings/icvris/2019/5050/0",
"title": "2019 International Conference on Virtual Reality and Intelligent Systems (ICVRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a090",
"title": "First Steps Towards Augmented Reality Interactive Electronic Music Production",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a090/1tnWYWjfAFa",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a538",
"title": "Disturbance and Plausibility in a Virtual Rock Concert: A Pilot Study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a538/1tuAjEOnM8E",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a324",
"title": "Room03—the Echo of Poetry",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a324/1vg7H2tUU8w",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirz",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"acronym": "sive",
"groupId": "1805064",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45We0UEe",
"doi": "10.1109/SIVE.2018.8577080",
"title": "Quantum: An art-science case study on sonification and sound design in virtual reality",
"normalizedTitle": "Quantum: An art-science case study on sonification and sound design in virtual reality",
"abstract": "Molecular sonification is the transformation of chemical data into sound and has been used to gain insight into chemical systems and for the creation of contemporary music compositions. The combination of sonification with a virtual reality environment offers potential benefits such as providing a visual frame of reference, an increased sense of immersion, nuanced spatial information through binaural audio cues and ease of interactivity. To explore how strategies developed in sonification research and contemporary electroacoustic music composition can be adapted to virtual reality, the art-science installation ’Quantum’ was created. The multi-media work consists of computer-generated molecules in a virtual space producing sound created via the sonification of nuclear magnetic resonance data. Upon user interaction with different molecules, the overall composition and complexity of the sound world develop. The binaural sound material can migrate back and forth from the molecules to the non-binaural background composition and, depending on user input, develop in terms of timbre, spectral complexity, and gestural content. ‘Quantum’ is an exploration of the combination of sonification and virtual reality and offers first points of discussion that can be elaborated upon in future artworks, games or educational content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Molecular sonification is the transformation of chemical data into sound and has been used to gain insight into chemical systems and for the creation of contemporary music compositions. The combination of sonification with a virtual reality environment offers potential benefits such as providing a visual frame of reference, an increased sense of immersion, nuanced spatial information through binaural audio cues and ease of interactivity. To explore how strategies developed in sonification research and contemporary electroacoustic music composition can be adapted to virtual reality, the art-science installation ’Quantum’ was created. The multi-media work consists of computer-generated molecules in a virtual space producing sound created via the sonification of nuclear magnetic resonance data. Upon user interaction with different molecules, the overall composition and complexity of the sound world develop. The binaural sound material can migrate back and forth from the molecules to the non-binaural background composition and, depending on user input, develop in terms of timbre, spectral complexity, and gestural content. ‘Quantum’ is an exploration of the combination of sonification and virtual reality and offers first points of discussion that can be elaborated upon in future artworks, games or educational content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Molecular sonification is the transformation of chemical data into sound and has been used to gain insight into chemical systems and for the creation of contemporary music compositions. The combination of sonification with a virtual reality environment offers potential benefits such as providing a visual frame of reference, an increased sense of immersion, nuanced spatial information through binaural audio cues and ease of interactivity. To explore how strategies developed in sonification research and contemporary electroacoustic music composition can be adapted to virtual reality, the art-science installation ’Quantum’ was created. The multi-media work consists of computer-generated molecules in a virtual space producing sound created via the sonification of nuclear magnetic resonance data. Upon user interaction with different molecules, the overall composition and complexity of the sound world develop. The binaural sound material can migrate back and forth from the molecules to the non-binaural background composition and, depending on user input, develop in terms of timbre, spectral complexity, and gestural content. ‘Quantum’ is an exploration of the combination of sonification and virtual reality and offers first points of discussion that can be elaborated upon in future artworks, games or educational content.",
"fno": "08577080",
"keywords": [
"Sonification",
"Energy States",
"Visualization",
"Nuclear Magnetic Resonance",
"Virtual Reality",
"Chemicals",
"Sonification",
"Molecular Sonification",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of Manchester",
"fullName": "Falk Morawitz",
"givenName": "Falk",
"surname": "Morawitz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sive",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-5713-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08577131",
"articleId": "17D45XoXP3v",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08577121",
"articleId": "17D45Xtvpaj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmens/2009/3938/0/3938a203",
"title": "Reliability of Usability Inspection for Sonification Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icmens/2009/3938a203/12OmNAKcNNL",
"parentPublication": {
"id": "proceedings/icmens/2009/3938/0",
"title": "MEMS, NANO, and Smart Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/3/3336e014",
"title": "Study on Application of CAD Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336e014/12OmNwK7obY",
"parentPublication": {
"id": "proceedings/csse/2008/3336/3",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770865",
"title": "A Path Based Model for Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770865/12OmNyv7m5V",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010041",
"title": "Designing an Interactive Audio Interface for Climate Science",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010041/13rRUwdrdMQ",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/1995/02/s2045",
"title": "Data Sonification: Do You See What I Hear?",
"doi": null,
"abstractUrl": "/magazine/so/1995/02/s2045/13rRUwfI0NU",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/1999/04/c4048",
"title": "Data Sonification and Sound Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/1999/04/c4048/13rRUy08MzR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020031",
"title": "Sonic Interactions in Virtual Reality: State of the Art, Current Challenges, and Future Directions",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020031/13rRUy3xYd8",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2053",
"title": "Movement Sonification: Effects on Perception and Action",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2053/13rRUytF46p",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a159",
"title": "Feasibility Study on Interactive Geometry Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a159/1I6ROHRhemc",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visap/2020/8553/0/855300a045",
"title": "Leander: Navigating Musical Possibility Space Through Color Data Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/visap/2020/855300a045/1q7jxXtqA8w",
"parentPublication": {
"id": "proceedings/visap/2020/8553/0",
"title": "2020 IEEE VIS Arts Program (VISAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mWf",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAYoKsE",
"doi": "10.1109/SVR.2012.18",
"title": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework",
"normalizedTitle": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework",
"abstract": "We present our approach to extend a Virtual Reality software framework towards the use for Augmented Reality applications. Although VR and AR applications have very similar requirements in terms of abstract components (like 6DOF input, stereoscopic output, simulation engines), the requirements in terms of hardware and software vary considerably. In this article we would like to share the experience gained from adapting our VR software framework for AR applications. We will address design issues for this task. The result is a VR/AR basic software that allows us to implement interactive applications without fixing their type (VR or AR) beforehand. Switching from VR to AR is a matter of changing the configuration file of the application.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present our approach to extend a Virtual Reality software framework towards the use for Augmented Reality applications. Although VR and AR applications have very similar requirements in terms of abstract components (like 6DOF input, stereoscopic output, simulation engines), the requirements in terms of hardware and software vary considerably. In this article we would like to share the experience gained from adapting our VR software framework for AR applications. We will address design issues for this task. The result is a VR/AR basic software that allows us to implement interactive applications without fixing their type (VR or AR) beforehand. Switching from VR to AR is a matter of changing the configuration file of the application.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present our approach to extend a Virtual Reality software framework towards the use for Augmented Reality applications. Although VR and AR applications have very similar requirements in terms of abstract components (like 6DOF input, stereoscopic output, simulation engines), the requirements in terms of hardware and software vary considerably. In this article we would like to share the experience gained from adapting our VR software framework for AR applications. We will address design issues for this task. The result is a VR/AR basic software that allows us to implement interactive applications without fixing their type (VR or AR) beforehand. Switching from VR to AR is a matter of changing the configuration file of the application.",
"fno": "4725a116",
"keywords": [
"Software Architecture",
"Augmented Reality",
"Virtual Reality",
"Software Framework"
],
"authors": [],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-05-01T00:00:00",
"pubType": "proceedings",
"pages": "116-124",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1929-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4725a108",
"articleId": "12OmNyuPL1J",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4725a125",
"articleId": "12OmNxzuMBG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2012/4725/0/4725a010",
"title": "A VR Framework for Desktop Applications",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a010/12OmNBrlPwO",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920287",
"title": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920287/12OmNqH9htu",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a253",
"title": "Workshop on VR and AR meet creative industries",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a253/12OmNylKASp",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480774",
"title": "VARU Framework: Enabling Rapid Prototyping of VR, AR and Ubiquitous Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480774/12OmNzC5Tgt",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699200",
"title": "Effective Free Field of View Scene Exploration in VR and AR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798148",
"title": "CAVE-AR: A VR Authoring System to Interactively Design, Simulate, and Debug Multi-user AR Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798148/1cJ0FRS6rjG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2020/03/08978600",
"title": "Virtual and Augmented Reality Applications to Support Data Analysis and Assessment of Science and Engineering",
"doi": null,
"abstractUrl": "/magazine/cs/2020/03/08978600/1haUwHHeDew",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a057",
"title": "Understanding VR Software Testing Needs from Stakeholders’ Points of View",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a057/1oZBAurDhv2",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09490310",
"title": "Shedding Light on Cast Shadows: An Investigation of Perceived Ground Contact in AR and VR",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09490310/1vmGThNh9jq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBqMDBD",
"title": "Proceedings IEEE Virtual Reality 2002",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqH9htu",
"doi": "10.1109/VR.2002.996542",
"title": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality",
"normalizedTitle": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality",
"abstract": "This paper presents a unified user interface technology, using 3D interaction techniques, constructive solid geometry, and a glove based menuing system, known as Tinmith-Hand, to support mobile outdoor augmented reality applications and indoor virtual reality applications. Tinmith-Hand uses similar concepts for both domains so that AR and VR applications are consistent and simple to use. The future goal of this user interface is to allow collaboration between outdoor AR and indoor VR systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a unified user interface technology, using 3D interaction techniques, constructive solid geometry, and a glove based menuing system, known as Tinmith-Hand, to support mobile outdoor augmented reality applications and indoor virtual reality applications. Tinmith-Hand uses similar concepts for both domains so that AR and VR applications are consistent and simple to use. The future goal of this user interface is to allow collaboration between outdoor AR and indoor VR systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a unified user interface technology, using 3D interaction techniques, constructive solid geometry, and a glove based menuing system, known as Tinmith-Hand, to support mobile outdoor augmented reality applications and indoor virtual reality applications. Tinmith-Hand uses similar concepts for both domains so that AR and VR applications are consistent and simple to use. The future goal of this user interface is to allow collaboration between outdoor AR and indoor VR systems.",
"fno": "14920287",
"keywords": [
"Augmented Reality",
"User Interface",
"Virtual Reality"
],
"authors": [
{
"affiliation": "University of South Australia",
"fullName": "Wayne Piekarski",
"givenName": "Wayne",
"surname": "Piekarski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Bruce H. Thomas",
"givenName": "Bruce H.",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-03-01T00:00:00",
"pubType": "proceedings",
"pages": "287",
"year": "2002",
"issn": "1087-8270",
"isbn": "0-7695-1492-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "14920285",
"articleId": "12OmNAKLZZY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "14920289",
"articleId": "12OmNCw3z9W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100032",
"title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wsc/2006/0500/0/04117851",
"title": "Structure of an Extensible Augmented Reality Framework for Visualization of Simulated Construction Processes",
"doi": null,
"abstractUrl": "/proceedings-article/wsc/2006/04117851/12OmNBOll2O",
"parentPublication": {
"id": "proceedings/wsc/2006/0500/0",
"title": "2006 Winter Simulation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2012/2712/0/IS225",
"title": "Vision-based user tracking for outdoor augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2012/IS225/12OmNrH1PAZ",
"parentPublication": {
"id": "proceedings/iscc/2012/2712/0",
"title": "2012 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2001/1318/0/13180031",
"title": "Tinmith-Metro: New Outdoor Techniques for Creating City Models with an Augmented Reality Wearable Computer",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2001/13180031/12OmNvlPkGK",
"parentPublication": {
"id": "proceedings/iswc/2001/1318/0",
"title": "Proceedings Fifth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2000/0795/0/07950139",
"title": "ARQuake: An Outdoor/Indoor Augmented Reality First Person Application",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2000/07950139/12OmNxdDFAr",
"parentPublication": {
"id": "proceedings/iswc/2000/0795/0",
"title": "Digest of Papers. Fourth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2004/8415/0/84150011",
"title": "Integrated Head and Hand Tracking for Indoor and Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2004/84150011/12OmNzBOi5o",
"parentPublication": {
"id": "proceedings/vr/2004/8415/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492748",
"title": "An empirical user-based study of text drawing styles and outdoor background textures for augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492748/12OmNzZmZwi",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04810999",
"title": "Indoor vs. Outdoor Depth Perception for Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04810999/12OmNzvhvIR",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/01/mcg2006010014",
"title": "3D Modeling with the Tinmith Mobile Outdoor Augmented Reality System",
"doi": null,
"abstractUrl": "/magazine/cg/2006/01/mcg2006010014/13rRUB6Sq2O",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2020/03/09080683",
"title": "Virtual and Augmented Reality Applications in Science and Engineering",
"doi": null,
"abstractUrl": "/magazine/cs/2020/03/09080683/1joA2jpzdSw",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx8Ounz",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwGZNQB",
"doi": "10.1109/HAPTIC.2010.5444645",
"title": "Stiffness modulation for Haptic Augmented Reality: Extension to 3D interaction",
"normalizedTitle": "Stiffness modulation for Haptic Augmented Reality: Extension to 3D interaction",
"abstract": "Haptic Augmented Reality (AR) allows a user to touch a real environment augmented with synthetic haptic stimuli. For example, medical students can palpate a virtual tumor inside a real mannequin using a haptic AR system to practice cancer detection. To realize such functionality, we need to alter the haptic attributes of a real object by means of virtual haptic feedback. Previously, we presented a haptic AR system with stiffness as a goal modulation property, and demonstrated its competent physical and perceptual performances for 1D interaction. In this paper, we extend the system so that a user can interact with a real object in any 3D exploratory pattern while perceiving its augmented stiffness. A series of algorithms are developed for contact detection, deformation estimation, force rendering, and force control. Their performances are thoroughly evaluated with real samples. A particular focus has been on minimizing the amount of preprocessing such as geometry modeling. Our haptic AR system can provide convincing stiffness modulation for real objects of relatively homogeneous deformation properties. The limitations of our AR system are also discussed along with a plan for future work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Haptic Augmented Reality (AR) allows a user to touch a real environment augmented with synthetic haptic stimuli. For example, medical students can palpate a virtual tumor inside a real mannequin using a haptic AR system to practice cancer detection. To realize such functionality, we need to alter the haptic attributes of a real object by means of virtual haptic feedback. Previously, we presented a haptic AR system with stiffness as a goal modulation property, and demonstrated its competent physical and perceptual performances for 1D interaction. In this paper, we extend the system so that a user can interact with a real object in any 3D exploratory pattern while perceiving its augmented stiffness. A series of algorithms are developed for contact detection, deformation estimation, force rendering, and force control. Their performances are thoroughly evaluated with real samples. A particular focus has been on minimizing the amount of preprocessing such as geometry modeling. Our haptic AR system can provide convincing stiffness modulation for real objects of relatively homogeneous deformation properties. The limitations of our AR system are also discussed along with a plan for future work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Haptic Augmented Reality (AR) allows a user to touch a real environment augmented with synthetic haptic stimuli. For example, medical students can palpate a virtual tumor inside a real mannequin using a haptic AR system to practice cancer detection. To realize such functionality, we need to alter the haptic attributes of a real object by means of virtual haptic feedback. Previously, we presented a haptic AR system with stiffness as a goal modulation property, and demonstrated its competent physical and perceptual performances for 1D interaction. In this paper, we extend the system so that a user can interact with a real object in any 3D exploratory pattern while perceiving its augmented stiffness. A series of algorithms are developed for contact detection, deformation estimation, force rendering, and force control. Their performances are thoroughly evaluated with real samples. A particular focus has been on minimizing the amount of preprocessing such as geometry modeling. Our haptic AR system can provide convincing stiffness modulation for real objects of relatively homogeneous deformation properties. The limitations of our AR system are also discussed along with a plan for future work.",
"fno": "05444645",
"keywords": [
"Augmented Reality",
"Deformation",
"Geometry",
"Haptic Interfaces",
"Stiffness Modulation",
"Haptic Augmented Reality",
"3 D Interaction",
"Synthetic Haptic Stimuli",
"Virtual Tumor",
"Haptic AR System",
"Cancer Detection",
"Virtual Haptic Feedback",
"3 D Exploratory Pattern",
"Augmented Stiffness",
"Contact Detection",
"Deformation Estimation",
"Force Rendering",
"Force Control",
"Geometry Modeling",
"Homogeneous Deformation Properties",
"Haptic Interfaces",
"Augmented Reality",
"Neoplasms",
"Virtual Reality",
"Performance Evaluation",
"Space Technology",
"Force Control",
"Industrial Training",
"Computer Science",
"Biomedical Engineering",
"H 5 2 Information Interfaces And Presentation User Interfaces Haptic I O",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Haptics and Virtual Reality Laboratory, Department of Computer Science and Engineering, Pohang University of Science and Technology (POSTECH), Republic of Korea",
"fullName": "Seokhee Jeon",
"givenName": "Seokhee",
"surname": "Jeon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Haptics and Virtual Reality Laboratory, Department of Computer Science and Engineering, Pohang University of Science and Technology (POSTECH), Republic of Korea",
"fullName": "Seungmoon Choi",
"givenName": "Seungmoon",
"surname": "Choi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-03-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2010",
"issn": "2324-7347",
"isbn": "978-1-4244-6821-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05444644",
"articleId": "12OmNAS9zDn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05444642",
"articleId": "12OmNwoPtoK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/whc/2009/3858/0/04810907",
"title": "Haptic augmented reality: Modulation of real object stiffness",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810907/12OmNrJROTF",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643617",
"title": "Breast cancer palpation system using haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643617/12OmNwF0C53",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643585",
"title": "Haptic simulation of breast cancer palpation: A case study of haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643585/12OmNwtn3ui",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549404",
"title": "HARP: A framework for visuo-haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549404/12OmNzBwGx8",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2009/5390/0/05336501",
"title": "Influence of visual and haptic delays on stiffness perception in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2009/05336501/12OmNzwpU4X",
"parentPublication": {
"id": "proceedings/ismar/2009/5390/0",
"title": "2009 8th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446280",
"title": "Enhancing the Stiffness Perception of Tangible Objects in Mixed Reality Using Wearable Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446280/13bd1AIBM2a",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/04/07892978",
"title": "Evaluation of Wearable Haptic Systems for the Fingers in Augmented Reality Applications",
"doi": null,
"abstractUrl": "/journal/th/2017/04/07892978/13rRUwInv4D",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/01/tth2012010077",
"title": "Rendering Virtual Tumors in Real Tissue Mock-Ups Using Haptic Augmented Reality",
"doi": null,
"abstractUrl": "/journal/th/2012/01/tth2012010077/13rRUwInvt1",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvRU0cM",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNylKASp",
"doi": "10.1109/ISMAR-Adjunct.2017.81",
"title": "Workshop on VR and AR meet creative industries",
"normalizedTitle": "Workshop on VR and AR meet creative industries",
"abstract": "Summary form only given, as follows. A complete record of the workshop was not made available for publication as part of the conference proceedings. The production of new virtual reality (VR) and augmented reality (AR) experiences tackle both technical, human and creative aspects. In this workshop, we would like to invite contributions mixing creative and technological viewpoints in order to share common understandings and lessons to provide better experiences for the final users. In this context, the workshop aims to foster participation of artists and designers as humanities scientists (philosophy, literature, etc.) to meet up with usual ISMAR attendance. Mainly we are interested with (but not limited to) the following themes and topics of interest: Innovative interaction design with consumer grade multimedia VR/AR systems; User feedback and Quality of Experience assessment for VR/AR content creation; Quality of Experience as an artistic intention in VR/AR; Usage of VR/AR technologies in art performances and design; Narrative studies/Storytelling in VR/AR; and Create in/with VR/AR, VR/AR platforms/tools to support design and art creation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given, as follows. A complete record of the workshop was not made available for publication as part of the conference proceedings. The production of new virtual reality (VR) and augmented reality (AR) experiences tackle both technical, human and creative aspects. In this workshop, we would like to invite contributions mixing creative and technological viewpoints in order to share common understandings and lessons to provide better experiences for the final users. In this context, the workshop aims to foster participation of artists and designers as humanities scientists (philosophy, literature, etc.) to meet up with usual ISMAR attendance. Mainly we are interested with (but not limited to) the following themes and topics of interest: Innovative interaction design with consumer grade multimedia VR/AR systems; User feedback and Quality of Experience assessment for VR/AR content creation; Quality of Experience as an artistic intention in VR/AR; Usage of VR/AR technologies in art performances and design; Narrative studies/Storytelling in VR/AR; and Create in/with VR/AR, VR/AR platforms/tools to support design and art creation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given, as follows. A complete record of the workshop was not made available for publication as part of the conference proceedings. The production of new virtual reality (VR) and augmented reality (AR) experiences tackle both technical, human and creative aspects. In this workshop, we would like to invite contributions mixing creative and technological viewpoints in order to share common understandings and lessons to provide better experiences for the final users. In this context, the workshop aims to foster participation of artists and designers as humanities scientists (philosophy, literature, etc.) to meet up with usual ISMAR attendance. Mainly we are interested with (but not limited to) the following themes and topics of interest: Innovative interaction design with consumer grade multimedia VR/AR systems; User feedback and Quality of Experience assessment for VR/AR content creation; Quality of Experience as an artistic intention in VR/AR; Usage of VR/AR technologies in art performances and design; Narrative studies/Storytelling in VR/AR; and Create in/with VR/AR, VR/AR platforms/tools to support design and art creation.",
"fno": "6327a253",
"keywords": [
"Augmented Reality",
"Virtual Reality",
"Art"
],
"authors": [
{
"affiliation": null,
"fullName": "Toinon Vigier",
"givenName": "Toinon",
"surname": "Vigier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Carola Moujan",
"givenName": "Carola",
"surname": "Moujan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jacques Gilbert",
"givenName": "Jacques",
"surname": "Gilbert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "253-253",
"year": "2017",
"issn": null,
"isbn": "978-0-7695-6327-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6327a252",
"articleId": "12OmNwnYG1O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6327a254",
"articleId": "12OmNxzuMIV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2012/4725/0/4725a116",
"title": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a116/12OmNAYoKsE",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836515",
"title": "Empower VR Art and AR Book with Spatial Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836515/12OmNB9t6u0",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892360",
"title": "Diving into the multiplicity: Liberating your design process from a convention-centered approach",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892360/12OmNCwlama",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a251",
"title": "Workshop on enterprise AR adoption obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a251/12OmNqI04Zv",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480774",
"title": "VARU Framework: Enabling Rapid Prototyping of VR, AR and Ubiquitous Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480774/12OmNzC5Tgt",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/var4good/2018/5977/0/08576885",
"title": "Degraded Reality: Using VR/AR to simulate visual impairments",
"doi": null,
"abstractUrl": "/proceedings-article/var4good/2018/08576885/17D45Xtvpep",
"parentPublication": {
"id": "proceedings/var4good/2018/5977/0",
"title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a222",
"title": "Design and User Research in AR/VR/MR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a222/1J7WgPd6kLe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089588",
"title": "Manipulating Puppets in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089588/1jIxbTl2uRi",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a599",
"title": "CDVVAR: VR/AR Collaborative Data Visualization Tool",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a599/1tnXiU5GF9K",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1AIBM2a",
"doi": "10.1109/VR.2018.8446280",
"title": "Enhancing the Stiffness Perception of Tangible Objects in Mixed Reality Using Wearable Haptics",
"normalizedTitle": "Enhancing the Stiffness Perception of Tangible Objects in Mixed Reality Using Wearable Haptics",
"abstract": "This paper studies the combination of tangible objects and wearable haptics for improving the display of stiffness sensations in virtual environments. Tangible objects enable to feel the general shape of objects, but they are often passive or unable to simulate several varying mechanical properties. Wearable haptic devices are portable and unobtrusive interfaces able to generate varying tactile sensations, but they often fail at providing convincing stiff contacts and distributed shape sensations. We propose to combine these two approaches in virtual and augmented reality (VR/AR), becoming able of arbitrarily augmenting the perceived stiffness of real/tangible objects by providing timely tactile stimuli at the fingers. We developed a proof-of-concept enabling to simulate varying elasticity/stiffness sensations when interacting with tangible objects by using wearable tactile modules at the fingertips. We carried out a user study showing that wearable haptic stimulation can well alter the perceived stiffness of real objects, even when the tactile stimuli are not delivered at the contact point. We illustrated our approach both in VR and AR, within several use cases and different tangible settings, such as when touching surfaces, pressing buttons and pistons, or holding an object. Taken together, our results pave the way for novel haptic sensations in VR/AR by better exploiting the multiple ways of providing simple, unobtrusive, and low-cost haptic displays.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper studies the combination of tangible objects and wearable haptics for improving the display of stiffness sensations in virtual environments. Tangible objects enable to feel the general shape of objects, but they are often passive or unable to simulate several varying mechanical properties. Wearable haptic devices are portable and unobtrusive interfaces able to generate varying tactile sensations, but they often fail at providing convincing stiff contacts and distributed shape sensations. We propose to combine these two approaches in virtual and augmented reality (VR/AR), becoming able of arbitrarily augmenting the perceived stiffness of real/tangible objects by providing timely tactile stimuli at the fingers. We developed a proof-of-concept enabling to simulate varying elasticity/stiffness sensations when interacting with tangible objects by using wearable tactile modules at the fingertips. We carried out a user study showing that wearable haptic stimulation can well alter the perceived stiffness of real objects, even when the tactile stimuli are not delivered at the contact point. We illustrated our approach both in VR and AR, within several use cases and different tangible settings, such as when touching surfaces, pressing buttons and pistons, or holding an object. Taken together, our results pave the way for novel haptic sensations in VR/AR by better exploiting the multiple ways of providing simple, unobtrusive, and low-cost haptic displays.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper studies the combination of tangible objects and wearable haptics for improving the display of stiffness sensations in virtual environments. Tangible objects enable to feel the general shape of objects, but they are often passive or unable to simulate several varying mechanical properties. Wearable haptic devices are portable and unobtrusive interfaces able to generate varying tactile sensations, but they often fail at providing convincing stiff contacts and distributed shape sensations. We propose to combine these two approaches in virtual and augmented reality (VR/AR), becoming able of arbitrarily augmenting the perceived stiffness of real/tangible objects by providing timely tactile stimuli at the fingers. We developed a proof-of-concept enabling to simulate varying elasticity/stiffness sensations when interacting with tangible objects by using wearable tactile modules at the fingertips. We carried out a user study showing that wearable haptic stimulation can well alter the perceived stiffness of real objects, even when the tactile stimuli are not delivered at the contact point. We illustrated our approach both in VR and AR, within several use cases and different tangible settings, such as when touching surfaces, pressing buttons and pistons, or holding an object. Taken together, our results pave the way for novel haptic sensations in VR/AR by better exploiting the multiple ways of providing simple, unobtrusive, and low-cost haptic displays.",
"fno": "08446280",
"keywords": [
"Augmented Reality",
"Elasticity",
"Haptic Interfaces",
"Virtual Reality",
"Augmented Reality",
"Elasticity Sensations",
"Stiff Contacts",
"Mixed Reality",
"Distributed Shape Sensations",
"Tactile Sensations",
"Wearable Haptic Devices",
"Stiffness Perception",
"Low Cost Haptic Displays",
"Wearable Haptic Stimulation",
"Wearable Tactile Modules",
"Haptic Interfaces",
"Skin",
"Shape",
"Belts",
"Virtual Environments",
"Mechanical Factors",
"Augmented Reality",
"Human Centered Computing Human Computer Interaction Interaction Devices Haptic Devices"
],
"authors": [
{
"affiliation": "Inria CNRS IRISA, Univ Rennes INSA, Rennes, France",
"fullName": "Xavier de Tinguy",
"givenName": "Xavier",
"surname": "de Tinguy",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria CNRS IRISA, Univ Rennes INSA, Rennes, France",
"fullName": "Claudio Pacchierotti",
"givenName": "Claudio",
"surname": "Pacchierotti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria CNRS IRISA, Univ Rennes INSA, Rennes, France",
"fullName": "Maud Marchal",
"givenName": "Maud",
"surname": "Marchal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria CNRS IRISA, Univ Rennes INSA, Rennes, France",
"fullName": "Anatole Lécuyer",
"givenName": "Anatole",
"surname": "Lécuyer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "81-90",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446153",
"articleId": "13bd1AIBM27",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446403",
"articleId": "13bd1eSlytA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2008/2005/0/04479918",
"title": "Haptic Identification of Stiffness and Force Magnitude",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479918/12OmNApLGq1",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2011/4458/0/4458a349",
"title": "A Tangible 3D Desktop Environment with Force Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2011/4458a349/12OmNqIzgU4",
"parentPublication": {
"id": "proceedings/nbis/2011/4458/0",
"title": "2011 14th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444645",
"title": "Stiffness modulation for Haptic Augmented Reality: Extension to 3D interaction",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444645/12OmNwGZNQB",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2013/11/0/06728921",
"title": "Haptic media construction and utilization of human-harmonized “tangible” information environment",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2013/06728921/12OmNxFsmnC",
"parentPublication": {
"id": "proceedings/icat/2013/11/0",
"title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627067",
"title": "Harness Design and Coupling Stiffness for Two-Axis Torso Haptics",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627067/12OmNyuyacp",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvri/2011/0054/0/05759662",
"title": "Pseudo-haptic feedback augmented with visual and tactile vibrations",
"doi": null,
"abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE",
"parentPublication": {
"id": "proceedings/isvri/2011/0054/0",
"title": "2011 IEEE International Symposium on VR Innovation (ISVRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/02/07604131",
"title": "Optimization-Based Wearable Tactile Rendering",
"doi": null,
"abstractUrl": "/journal/th/2017/02/07604131/13rRUxcbnCC",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08103791",
"title": "Magnetic Levitation Haptic Augmentation for Virtual Tissue Stiffness Perception",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08103791/14H4WM6Ory8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798205",
"title": "Toward Universal Tangible Objects: Optimizing Haptic Pinching Sensations in 3D Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798205/1cJ1bY8RJIc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1tl2omt",
"doi": "10.1109/VR.2018.8446053",
"title": "High-Fidelity Interaction for Virtual and Augmented Reality",
"normalizedTitle": "High-Fidelity Interaction for Virtual and Augmented Reality",
"abstract": "Expressive interaction with wearable head-mounted displays for virtual (VR) and augmented reality (AR) systems is essential for practical adoption. These systems pose new challenges and have higher performance standards compared to other computing paradigms. In this position paper, I argue that interactive devices for VR and AR systems can leverage high-precision tracking and haptics to achieve a robust set of interaction techniques and a rich sense of presence. I describe my past and proposed future research in designing interactive devices that innovate in the domains of eye tracking, wearable finger input, and handheld controllers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Expressive interaction with wearable head-mounted displays for virtual (VR) and augmented reality (AR) systems is essential for practical adoption. These systems pose new challenges and have higher performance standards compared to other computing paradigms. In this position paper, I argue that interactive devices for VR and AR systems can leverage high-precision tracking and haptics to achieve a robust set of interaction techniques and a rich sense of presence. I describe my past and proposed future research in designing interactive devices that innovate in the domains of eye tracking, wearable finger input, and handheld controllers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Expressive interaction with wearable head-mounted displays for virtual (VR) and augmented reality (AR) systems is essential for practical adoption. These systems pose new challenges and have higher performance standards compared to other computing paradigms. In this position paper, I argue that interactive devices for VR and AR systems can leverage high-precision tracking and haptics to achieve a robust set of interaction techniques and a rich sense of presence. I describe my past and proposed future research in designing interactive devices that innovate in the domains of eye tracking, wearable finger input, and handheld controllers.",
"fno": "08446053",
"keywords": [
"Augmented Reality",
"Haptic Interfaces",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Interactive Devices",
"High Precision Tracking",
"Interaction Techniques",
"Interactive Devices",
"Wearable Finger Input",
"High Fidelity Interaction",
"Wearable Head Mounted Displays",
"VR",
"Computing Paradigms",
"Virtual Reality",
"Augmented Reality",
"Haptic Interfaces",
"Gaze Tracking",
"Rendering Computer Graphics",
"Virtual Reality",
"Input Devices",
"Thumb",
"Sensors",
"Human Computer Interaction",
"Input Devices"
],
"authors": [
{
"affiliation": "University of Washington, Paul G. Allen School of Computer Science & Engineering",
"fullName": "Eric Whitmire",
"givenName": "Eric",
"surname": "Whitmire",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "796-798",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446400",
"articleId": "13bd1gFCjrS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446138",
"articleId": "13bd1fWcuDq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iwar/1999/0359/0/03590045",
"title": "Integrating Virtual and Augmented Realities in an Outdoor Application",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590045/12OmNBqMDEr",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/02/07833028",
"title": "Augmented Reality versus Virtual Reality for 3D Object Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2018/02/07833028/13rRUwInvsX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/02/mcg2018020028",
"title": "Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/magazine/cg/2018/02/mcg2018020028/13rRUzpzeHL",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rtcsa/2018/7759/0/775900a209",
"title": "Exploring Augmented Reality Interaction for Everyday Multipurpose Wearable Robots",
"doi": null,
"abstractUrl": "/proceedings-article/rtcsa/2018/775900a209/17D45WaTkd9",
"parentPublication": {
"id": "proceedings/rtcsa/2018/7759/0",
"title": "2018 IEEE 24th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699302",
"title": "Cognitive Aspects of Interaction in Virtual and Augmented Reality Systems (CAIVARS)",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699302/19F1TJ8MxIQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699236",
"title": "The Trouble with Augmented Reality/Virtual Reality Authoring Tools",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699236/19F1TNjWjtK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2022/1316/0/131600b552",
"title": "SoK: Authentication in Augmented and Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2022/131600b552/1FlQIjcP4FW",
"parentPublication": {
"id": "proceedings/sp/2022/1316/0/",
"title": "2022 IEEE Symposium on Security and Privacy (SP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798228",
"title": "On Sharing Physical Geometric Space between Augmented and Virtual Reality Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798228/1cJ0LLaIrPq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089504",
"title": "Touch the Wall: Comparison of Virtual and Augmented Reality with Conventional 2D Screen Eye-Hand Coordination Training Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089504/1jIxfvWzz6o",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800z018",
"title": "Keynote Speaker: Wearable Haptics for Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800z018/1yeD29pZAsw",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJixw",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"acronym": "Ismar-mashd",
"groupId": "1002953",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC3Xhjl",
"doi": "10.1109/ISMAR-MASHD.2015.18",
"title": "The Effect of Tangible User Interfaces on Cognitive Load in the Creative Design Process",
"normalizedTitle": "The Effect of Tangible User Interfaces on Cognitive Load in the Creative Design Process",
"abstract": "The aim of the study is to investigate how Graphical User Interfaces (GUI) and Tangible User Interfaces (TUI) affect the creative design process in design education through cognitive load. A simple design problem was introduced to 30 design students in two groups. One group was provided with a TUI that was operationalized through a Desktop Augmented Reality Environment (AR) the other group was provided with a GUI that was operationalized through a Desktop Virtual Reality Environment (VR). After using the two systems the cognitive load of each interface was measure through the NASA TLX tool. Theories from cognitive psychology, information sciences, and design cognition were combined to provide an explanatory mechanism of how these media types affect the design process. The results indicate that epistemic action in TUI's such as AR interfaces reduces cognitive load thereby reducing fixation in the design process and enhancing the creative design process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The aim of the study is to investigate how Graphical User Interfaces (GUI) and Tangible User Interfaces (TUI) affect the creative design process in design education through cognitive load. A simple design problem was introduced to 30 design students in two groups. One group was provided with a TUI that was operationalized through a Desktop Augmented Reality Environment (AR) the other group was provided with a GUI that was operationalized through a Desktop Virtual Reality Environment (VR). After using the two systems the cognitive load of each interface was measure through the NASA TLX tool. Theories from cognitive psychology, information sciences, and design cognition were combined to provide an explanatory mechanism of how these media types affect the design process. The results indicate that epistemic action in TUI's such as AR interfaces reduces cognitive load thereby reducing fixation in the design process and enhancing the creative design process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The aim of the study is to investigate how Graphical User Interfaces (GUI) and Tangible User Interfaces (TUI) affect the creative design process in design education through cognitive load. A simple design problem was introduced to 30 design students in two groups. One group was provided with a TUI that was operationalized through a Desktop Augmented Reality Environment (AR) the other group was provided with a GUI that was operationalized through a Desktop Virtual Reality Environment (VR). After using the two systems the cognitive load of each interface was measure through the NASA TLX tool. Theories from cognitive psychology, information sciences, and design cognition were combined to provide an explanatory mechanism of how these media types affect the design process. The results indicate that epistemic action in TUI's such as AR interfaces reduces cognitive load thereby reducing fixation in the design process and enhancing the creative design process.",
"fno": "9628a006",
"keywords": [
"Education",
"Graphical User Interfaces",
"Augmented Reality",
"Visualization",
"NASA",
"Cognition",
"Cognitive Load",
"Augmented Reality",
"Design"
],
"authors": [
{
"affiliation": null,
"fullName": "Tilanka Chandrasekera",
"givenName": "Tilanka",
"surname": "Chandrasekera",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "So-Yeon Yoon",
"givenName": "So-Yeon",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "Ismar-mashd",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "6-8",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9628-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9628a001",
"articleId": "12OmNwc3wu8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9628a009",
"articleId": "12OmNvkYxa6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2002/1781/0/17810157",
"title": "Alternative Tools for Tangible Interaction: A Usability Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810157/12OmNBSSVaM",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2010/4124/0/4124a036",
"title": "Evaluation of Tangible User Interfaces for Desktop AR",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a036/12OmNqGiu0p",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2016/1790/0/07757573",
"title": "The aptness of Tangible User Interfaces for explaining abstract computer network principles",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2016/07757573/12OmNwErpzN",
"parentPublication": {
"id": "proceedings/fie/2016/1790/0",
"title": "2016 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2007/2847/2/284720852",
"title": "Tangible User Interfaces for Cognitive Assistance",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2007/284720852/12OmNweBUIC",
"parentPublication": {
"id": "proceedings/ainaw/2007/2847/2",
"title": "Advanced Information Networking and Applications Workshops, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a253",
"title": "Workshop on VR and AR meet creative industries",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a253/12OmNylKASp",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476585",
"title": "Tangible User Interfaces Compensate for Low Spatial Cognition",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476585/12OmNzwZ6uu",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007333",
"title": "Cognitive Cost of Using Augmented Reality Displays",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007333/13rRUygT7fg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a443",
"title": "Accessibility of Immersive Serious Games for Persons with Cognitive Disabilities",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a443/1gyslwNaSd2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a778",
"title": "Evaluating Object Manipulation Interaction Techniques in Mixed Reality: Tangible User Interfaces and Gesture",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a778/1tuBngWRAC4",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a018",
"title": "Accessible Tangible User Interfaces in eXtended Reality Experiences for Cultural Heritage",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a018/1yeQGLRgXHW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyqRn7h",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMZpxI",
"doi": "10.1109/ISMAR.2002.1115091",
"title": "Augmented-Reality Visualizations Guided by Cognition:Perceptual Heuristics for Combining Visible and Obscured Information",
"normalizedTitle": "Augmented-Reality Visualizations Guided by Cognition:Perceptual Heuristics for Combining Visible and Obscured Information",
"abstract": "One of the unique applications of Mixed and Augmented Reality (MR/AR) systems is that hidden and occluded objects can be readily visualized. We call this specialized use of MR/AR, Obscured Information Visualization (OIV). In this paper, we describe the beginning of a research program designed to develop such visualizations through the use of principles derived from perceptual psychology and cognitive science. In this paper we surveyed the cognitive science literature as it applies to such visualization tasks, described experimental questions derived from these cognitive principles, and generated general guidelines that can be used in designing future OIV systems (as well improving AR displays more generally). Here we also report the results from an experiment that utilized a functioning AR-OIV system: we found that in a relative depth judgment, subjects reported rendered objects as being in front of real-world objects, except when additional occlusion and motion cues were presented together.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the unique applications of Mixed and Augmented Reality (MR/AR) systems is that hidden and occluded objects can be readily visualized. We call this specialized use of MR/AR, Obscured Information Visualization (OIV). In this paper, we describe the beginning of a research program designed to develop such visualizations through the use of principles derived from perceptual psychology and cognitive science. In this paper we surveyed the cognitive science literature as it applies to such visualization tasks, described experimental questions derived from these cognitive principles, and generated general guidelines that can be used in designing future OIV systems (as well improving AR displays more generally). Here we also report the results from an experiment that utilized a functioning AR-OIV system: we found that in a relative depth judgment, subjects reported rendered objects as being in front of real-world objects, except when additional occlusion and motion cues were presented together.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the unique applications of Mixed and Augmented Reality (MR/AR) systems is that hidden and occluded objects can be readily visualized. We call this specialized use of MR/AR, Obscured Information Visualization (OIV). In this paper, we describe the beginning of a research program designed to develop such visualizations through the use of principles derived from perceptual psychology and cognitive science. In this paper we surveyed the cognitive science literature as it applies to such visualization tasks, described experimental questions derived from these cognitive principles, and generated general guidelines that can be used in designing future OIV systems (as well improving AR displays more generally). Here we also report the results from an experiment that utilized a functioning AR-OIV system: we found that in a relative depth judgment, subjects reported rendered objects as being in front of real-world objects, except when additional occlusion and motion cues were presented together.",
"fno": "17810215",
"keywords": [
"Augmented And Mixed Reality",
"Cognition",
"Human Computer Interaction",
"Motion",
"Perception",
"Occlusion"
],
"authors": [
{
"affiliation": "HRL Laboratories,LLC",
"fullName": "Chris Furmanski",
"givenName": "Chris",
"surname": "Furmanski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "HRL Laboratories,LLC",
"fullName": "Ronald Azuma",
"givenName": "Ronald",
"surname": "Azuma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "HRL Laboratories,LLC",
"fullName": "Mike Daily",
"givenName": "Mike",
"surname": "Daily",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-09-01T00:00:00",
"pubType": "proceedings",
"pages": "215",
"year": "2002",
"issn": null,
"isbn": "0-7695-1781-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "17810203",
"articleId": "12OmNzmcluM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "17810225",
"articleId": "12OmNzd7bq2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2009/3789/0/3789a153",
"title": "An Implementation Review of Occlusion-Based Interaction in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a153/12OmNB7cjly",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/3941/1/3941a133",
"title": "Key Technique of Assembly System in an Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941a133/12OmNqC2uYI",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/3",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402561",
"title": "Using children's developmental psychology to guide augmented-reality design and usability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402561/12OmNrIrPhx",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444706",
"title": "Evaluating depth perception of photorealistic mixed reality visualizations for occluded objects in outdoor environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444706/12OmNsd6vhN",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444792",
"title": "Mixed reality in virtual world teleconferencing",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444792/12OmNwpoFEM",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvvrhc/1998/8283/0/82830078",
"title": "Vision and Graphics in Producing Mixed Reality Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1",
"parentPublication": {
"id": "proceedings/cvvrhc/1998/8283/0",
"title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/04/mcg2008040040",
"title": "Toward Next-Gen Mobile AR Games",
"doi": null,
"abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2022/8188/0/818800a082",
"title": "Integrating Inquiry-Based Pedagogy with Mixed Reality: Theories and Practices",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2022/818800a082/1J7VZM9bxDi",
"parentPublication": {
"id": "proceedings/cste/2022/8188/0",
"title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a885",
"title": "The ARgus Designer: Supporting experts while conducting user studies of AR/MR applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a885/1J7WssjkGSQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600z025",
"title": "B(l)ending Realities",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600z025/1tuAshVF9jq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvAiSE1",
"doi": "10.1109/ISMAR.2015.18",
"title": "The Ventriloquist Effect in Augmented Reality",
"normalizedTitle": "The Ventriloquist Effect in Augmented Reality",
"abstract": "An effective interaction in augmented reality (AR) requires utilization of different modalities. In this study, we investigated orienting the user in bimodal AR. Using auditory perception to support visual perception provides a useful approach for orienting the user to directions that are outside of the visual field-of-view (FOV). In particular, this is important in path-finding, where points-of-interest (POIs) can be all around the user. However, the ability to perceive the audio POIs is affected by the ventriloquism effect (VE), which means that audio POIs are captured by visual POIs. We measured the spatial limits for the VE in AR using a video see-through head-worn display. The results showed that the amount of the VE in AR was approx. 5°–15° higher than in a real environment. In AR, spatial disparity between an audio and visual POI should be at least 30° of azimuth angle, in order to perceive the audio and visual POIs as separate. The limit was affected by azimuth angle of visual POI and magnitude of head rotations. These results provide guidelines for designing bimodal AR systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An effective interaction in augmented reality (AR) requires utilization of different modalities. In this study, we investigated orienting the user in bimodal AR. Using auditory perception to support visual perception provides a useful approach for orienting the user to directions that are outside of the visual field-of-view (FOV). In particular, this is important in path-finding, where points-of-interest (POIs) can be all around the user. However, the ability to perceive the audio POIs is affected by the ventriloquism effect (VE), which means that audio POIs are captured by visual POIs. We measured the spatial limits for the VE in AR using a video see-through head-worn display. The results showed that the amount of the VE in AR was approx. 5°–15° higher than in a real environment. In AR, spatial disparity between an audio and visual POI should be at least 30° of azimuth angle, in order to perceive the audio and visual POIs as separate. The limit was affected by azimuth angle of visual POI and magnitude of head rotations. These results provide guidelines for designing bimodal AR systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An effective interaction in augmented reality (AR) requires utilization of different modalities. In this study, we investigated orienting the user in bimodal AR. Using auditory perception to support visual perception provides a useful approach for orienting the user to directions that are outside of the visual field-of-view (FOV). In particular, this is important in path-finding, where points-of-interest (POIs) can be all around the user. However, the ability to perceive the audio POIs is affected by the ventriloquism effect (VE), which means that audio POIs are captured by visual POIs. We measured the spatial limits for the VE in AR using a video see-through head-worn display. The results showed that the amount of the VE in AR was approx. 5°–15° higher than in a real environment. In AR, spatial disparity between an audio and visual POI should be at least 30° of azimuth angle, in order to perceive the audio and visual POIs as separate. The limit was affected by azimuth angle of visual POI and magnitude of head rotations. These results provide guidelines for designing bimodal AR systems.",
"fno": "7660a049",
"keywords": [
"Visualization",
"Azimuth",
"Augmented Reality",
"Speech",
"Visual Perception",
"Navigation",
"Uncertainty"
],
"authors": [
{
"affiliation": null,
"fullName": "Mikko Kyto",
"givenName": "Mikko",
"surname": "Kyto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kenta Kusumoto",
"givenName": "Kenta",
"surname": "Kusumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Pirkko Oittinen",
"givenName": "Pirkko",
"surname": "Oittinen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "49-53",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a043",
"articleId": "12OmNC1oT64",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a054",
"articleId": "12OmNCwUmB0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836534",
"title": "TeachAR: An Interactive Augmented Reality Tool for Teaching Basic English to Non-native Children",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836534/12OmNB9t6ld",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2012/4725/0/4725a174",
"title": "Combining Augmented Reality and Speech Technologies to Help Deaf and Hard of Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a174/12OmNBE7Mvc",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2016/9041/0/9041a133",
"title": "Game-Based Evacuation Drills Using Simple Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2016/9041a133/12OmNrYCXFo",
"parentPublication": {
"id": "proceedings/icalt/2016/9041/0",
"title": "2016 IEEE 16th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948404",
"title": "AR-mentor: Augmented reality based mentoring system",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948404/12OmNvnOwuE",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811001",
"title": "Explosion Diagrams in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811001/12OmNwE9OR0",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770761",
"title": "Augmented Reality Interface Toolkit",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770761/12OmNyUnELp",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a170",
"title": "[POSTER] Halo3D: A Technique for Visualizing Off-Screen Points of Interest in Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a170/12OmNyXMQmn",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577129",
"title": "Towards the Design and Evaluation of Delay-based Modeling of Acoustic Scenes in Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577129/17D45XERmlJ",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a389",
"title": "Implementation of Augmented Reality Globe in Teaching-Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a389/19wB38QGJS8",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/saner/2022/3786/0/378600a454",
"title": "A Preliminary Study on Accessibility of Augmented Reality Features in Mobile Apps",
"doi": null,
"abstractUrl": "/proceedings-article/saner/2022/378600a454/1FbSROSokxi",
"parentPublication": {
"id": "proceedings/saner/2022/3786/0",
"title": "2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqGA5ib",
"title": "International Conference on Ubi-Media Computing",
"acronym": "u-media",
"groupId": "1002010",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvnOwyH",
"doi": "10.1109/U-MEDIA.2011.66",
"title": "The Feasibility of Augmented Reality on Virtual Tourism Website",
"normalizedTitle": "The Feasibility of Augmented Reality on Virtual Tourism Website",
"abstract": "The Taiwan Tourism Bureau is carrying on with the \"Three-year Sprint Program\" of the Executive Yuan's \"Economic Development Vision for 201.,\" Tourism becomes the star industry in Taiwan and it will play a major role in future global economic development. Not only the physical improvements are encouraged, the information service online is also important. Therefore, we apply Augmented Reality technology for implementing the virtual tourism website. The website provided the necessary information for the destination basically, and increased the multimedia effects for the website especially. Some empirical researches show that a website with a high level of interactivity and rich multimedia is more likely than a text-based website to persuade consumers. Therefore, the virtual tourism website of Taichung City is herein provided for studying the feasibility of Augmented Reality on virtual Tourism. In the work, we conclude the technology advantage of Augmented Reality and demonstrate the effects of novel interactive operation with users. The result of the research will be the valuable reference as a company applying the Augmented Reality for virtual websites.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Taiwan Tourism Bureau is carrying on with the \"Three-year Sprint Program\" of the Executive Yuan's \"Economic Development Vision for 201.,\" Tourism becomes the star industry in Taiwan and it will play a major role in future global economic development. Not only the physical improvements are encouraged, the information service online is also important. Therefore, we apply Augmented Reality technology for implementing the virtual tourism website. The website provided the necessary information for the destination basically, and increased the multimedia effects for the website especially. Some empirical researches show that a website with a high level of interactivity and rich multimedia is more likely than a text-based website to persuade consumers. Therefore, the virtual tourism website of Taichung City is herein provided for studying the feasibility of Augmented Reality on virtual Tourism. In the work, we conclude the technology advantage of Augmented Reality and demonstrate the effects of novel interactive operation with users. The result of the research will be the valuable reference as a company applying the Augmented Reality for virtual websites.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Taiwan Tourism Bureau is carrying on with the \"Three-year Sprint Program\" of the Executive Yuan's \"Economic Development Vision for 201.,\" Tourism becomes the star industry in Taiwan and it will play a major role in future global economic development. Not only the physical improvements are encouraged, the information service online is also important. Therefore, we apply Augmented Reality technology for implementing the virtual tourism website. The website provided the necessary information for the destination basically, and increased the multimedia effects for the website especially. Some empirical researches show that a website with a high level of interactivity and rich multimedia is more likely than a text-based website to persuade consumers. Therefore, the virtual tourism website of Taichung City is herein provided for studying the feasibility of Augmented Reality on virtual Tourism. In the work, we conclude the technology advantage of Augmented Reality and demonstrate the effects of novel interactive operation with users. The result of the research will be the valuable reference as a company applying the Augmented Reality for virtual websites.",
"fno": "4493a253",
"keywords": [
"Augmented Reality",
"Virtual Tourism",
"Unity",
"User Interaction"
],
"authors": [
{
"affiliation": null,
"fullName": "Chouyin Hsu",
"givenName": "Chouyin",
"surname": "Hsu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "u-media",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-07-01T00:00:00",
"pubType": "proceedings",
"pages": "253-256",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4493-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4493a247",
"articleId": "12OmNBigFrP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4493a257",
"articleId": "12OmNz2C1ue",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icedeg/2015/8910/0/07114484",
"title": "Augmented reality applied in tourism mobile applications",
"doi": null,
"abstractUrl": "/proceedings-article/icedeg/2015/07114484/12OmNCfAPK9",
"parentPublication": {
"id": "proceedings/icedeg/2015/8910/0",
"title": "2015 Second International Conference on eDemocracy & eGovernment (ICEDEG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a108",
"title": "[POSTER] Transforming Your Website to an Augmented Reality View",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a108/12OmNrIJqv9",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2018/2290/0/08343165",
"title": "Mobile augmented reality on web-based for the tourism using HTML5",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2018/08343165/12OmNx19jUK",
"parentPublication": {
"id": "proceedings/icoin/2018/2290/0",
"title": "2018 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/07/mco2012070026",
"title": "Anywhere Interfaces Using Handheld Augmented Reality",
"doi": null,
"abstractUrl": "/magazine/co/2012/07/mco2012070026/13rRUxYrbPM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/07/mco2012070032",
"title": "Projection-Based Augmented Reality in Disney Theme Parks",
"doi": null,
"abstractUrl": "/magazine/co/2012/07/mco2012070032/13rRUyoyhJq",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2022/5176/0/517600a314",
"title": "EnterCY: A Virtual and Augmented Reality Tourism Platform for Cyprus",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2022/517600a314/1G89HJzCzII",
"parentPublication": {
"id": "proceedings/mdm/2022/5176/0",
"title": "2022 23rd IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/inciscos/2019/5581/0/558100a116",
"title": "ARTOUR: Augmented Reality for Tourism - A Case Study in Riobamba, Ecuador",
"doi": null,
"abstractUrl": "/proceedings-article/inciscos/2019/558100a116/1iHUGrjAlXO",
"parentPublication": {
"id": "proceedings/inciscos/2019/5581/0",
"title": "2019 International Conference on Information Systems and Computer Science (INCISCOS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isctis/2021/1441/0/144100a036",
"title": "Research on Interactive Design of Tourism Brand Based on Augmented Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/isctis/2021/144100a036/1yEZCkQrUdi",
"parentPublication": {
"id": "proceedings/isctis/2021/1441/0",
"title": "2021 International Symposium on Computer Technology and Information Science (ISCTIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2021/4254/0/425400a562",
"title": "A Study on the Application of Virtual Reality in the Marketing of Rural Cultural Tourism in Hubei Province",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2021/425400a562/1ziPkeVNlok",
"parentPublication": {
"id": "proceedings/iccst/2021/4254/0",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvjgWMS",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"acronym": "isar",
"groupId": "1000063",
"volume": "0",
"displayVolume": "0",
"year": "2001",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxFaLwB",
"doi": "10.1109/ISAR.2001.970521",
"title": "Mobile Collaborative Augmented Reality",
"normalizedTitle": "Mobile Collaborative Augmented Reality",
"abstract": "The combination of mobile computing and collaborative Augmented Reality into a single system makes the power of computer enhanced interaction and communication in the real world accessible anytime and everywhere. This paper describes our work to build a mobile collaborative Augmented Reality system that supports true stereoscopic 3D graphics, a pen and pad interface and direct interaction with virtual objects. The system is assembled from off-the- shelf hardware components and serves as a basic test bed for user interface experiments related to computer supported collaborative work in Augmented Reality. A mobile platform implementing the described features and collaboration between mobile and stationary users are demonstrated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The combination of mobile computing and collaborative Augmented Reality into a single system makes the power of computer enhanced interaction and communication in the real world accessible anytime and everywhere. This paper describes our work to build a mobile collaborative Augmented Reality system that supports true stereoscopic 3D graphics, a pen and pad interface and direct interaction with virtual objects. The system is assembled from off-the- shelf hardware components and serves as a basic test bed for user interface experiments related to computer supported collaborative work in Augmented Reality. A mobile platform implementing the described features and collaboration between mobile and stationary users are demonstrated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The combination of mobile computing and collaborative Augmented Reality into a single system makes the power of computer enhanced interaction and communication in the real world accessible anytime and everywhere. This paper describes our work to build a mobile collaborative Augmented Reality system that supports true stereoscopic 3D graphics, a pen and pad interface and direct interaction with virtual objects. The system is assembled from off-the- shelf hardware components and serves as a basic test bed for user interface experiments related to computer supported collaborative work in Augmented Reality. A mobile platform implementing the described features and collaboration between mobile and stationary users are demonstrated.",
"fno": "13750114",
"keywords": [
"Augmented Reality",
"Mobile Computing",
"Wearable Computing",
"Computer Supported Collaborative Work",
"3 D Interaction",
"Hybrid Tracking"
],
"authors": [
{
"affiliation": "Vienna University of Technology",
"fullName": "Gerhard Reitmayr",
"givenName": "Gerhard",
"surname": "Reitmayr",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vienna University of Technology",
"fullName": "Dieter Schmalstieg",
"givenName": "Dieter",
"surname": "Schmalstieg",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isar",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2001-10-01T00:00:00",
"pubType": "proceedings",
"pages": "114",
"year": "2001",
"issn": null,
"isbn": "0-7695-1375-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "13750107",
"articleId": "12OmNwJybQZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "13750124",
"articleId": "12OmNBOllgK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyYm2vR",
"title": "Multimedia Computing and Systems, International Conference on",
"acronym": "icmcs",
"groupId": "1000479",
"volume": "1",
"displayVolume": "1",
"year": "1999",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyQ7G3s",
"doi": "10.1109/MMCS.1999.779146",
"title": "Haptics in Augmented Reality",
"normalizedTitle": "Haptics in Augmented Reality",
"abstract": "An augmented reality system merges synthetic sensory information into a user's perception of a three-dimensional environment. An important performance goal for an augmented reality system is that the user perceives a single seamless environment. In most augmented reality systems the user views a real world augmented only with visual information and is not provided with a means to interact with the virtual objects. In this paper we describe an augmented reality system that, in addition to visual augmentation, merges synthetic haptic input into the user's perception of the real environment. Our system uses a PHANToM haptic interface device to generate the haptic sensory input in real-time. The system allows user interactions such as moving or lifting a virtual object, and demonstrates interactions between virtual and real objects. Methods to provide proper visual occlusion between real and virtual objects are also described",
"abstracts": [
{
"abstractType": "Regular",
"content": "An augmented reality system merges synthetic sensory information into a user's perception of a three-dimensional environment. An important performance goal for an augmented reality system is that the user perceives a single seamless environment. In most augmented reality systems the user views a real world augmented only with visual information and is not provided with a means to interact with the virtual objects. In this paper we describe an augmented reality system that, in addition to visual augmentation, merges synthetic haptic input into the user's perception of the real environment. Our system uses a PHANToM haptic interface device to generate the haptic sensory input in real-time. The system allows user interactions such as moving or lifting a virtual object, and demonstrates interactions between virtual and real objects. Methods to provide proper visual occlusion between real and virtual objects are also described",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An augmented reality system merges synthetic sensory information into a user's perception of a three-dimensional environment. An important performance goal for an augmented reality system is that the user perceives a single seamless environment. In most augmented reality systems the user views a real world augmented only with visual information and is not provided with a means to interact with the virtual objects. In this paper we describe an augmented reality system that, in addition to visual augmentation, merges synthetic haptic input into the user's perception of the real environment. Our system uses a PHANToM haptic interface device to generate the haptic sensory input in real-time. The system allows user interactions such as moving or lifting a virtual object, and demonstrates interactions between virtual and real objects. Methods to provide proper visual occlusion between real and virtual objects are also described",
"fno": "02539195",
"keywords": [
"Augmented Reality",
"Virtual Reality",
"Multimodal Interaction",
"Haptic Interface"
],
"authors": [
{
"affiliation": "Rochester Institute of Technology",
"fullName": "James Vallino",
"givenName": "James",
"surname": "Vallino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Rochester",
"fullName": "Christopher Brown",
"givenName": "Christopher",
"surname": "Brown",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmcs",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1999-06-01T00:00:00",
"pubType": "proceedings",
"pages": "9195",
"year": "1999",
"issn": "1530-2032",
"isbn": "0-7695-0253-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00779224",
"articleId": "12OmNrIrPiy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "02539201",
"articleId": "12OmNApcudp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdRqHEpry",
"doi": "10.1109/VRW55335.2022.00074",
"title": "Anthropomorphism of Virtual Agents and Human Cognitive Performance in Augmented Reality",
"normalizedTitle": "Anthropomorphism of Virtual Agents and Human Cognitive Performance in Augmented Reality",
"abstract": "In this preliminary study, we aimed to explore the role of anthropomorphism on cognitive task performance in augmented reality (AR). We implemented five different levels of anthropomorphic representations of a virtual agent, ranging from voice to whole-body. Participants performed anagram tasks in the presence of each representation. The results suggest that, except for the voice condition, social presence and cognitive performance increase as the level of anthropomorphism increases.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this preliminary study, we aimed to explore the role of anthropomorphism on cognitive task performance in augmented reality (AR). We implemented five different levels of anthropomorphic representations of a virtual agent, ranging from voice to whole-body. Participants performed anagram tasks in the presence of each representation. The results suggest that, except for the voice condition, social presence and cognitive performance increase as the level of anthropomorphism increases.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this preliminary study, we aimed to explore the role of anthropomorphism on cognitive task performance in augmented reality (AR). We implemented five different levels of anthropomorphic representations of a virtual agent, ranging from voice to whole-body. Participants performed anagram tasks in the presence of each representation. The results suggest that, except for the voice condition, social presence and cognitive performance increase as the level of anthropomorphism increases.",
"fno": "840200a329",
"keywords": [
"Augmented Reality",
"Cognition",
"Augmented Reality",
"Cognitive Task Performance",
"Anthropomorphic Representations",
"Virtual Agent",
"Anagram Tasks",
"Voice Condition",
"Cognitive Performance",
"Anthropomorphism",
"Social Presence",
"Three Dimensional Displays",
"Conferences",
"User Interfaces",
"Distance Measurement",
"Anthropomorphism",
"Task Analysis",
"Augmented Reality"
],
"authors": [
{
"affiliation": "Universität Hamburg",
"fullName": "Fariba Mostajeran",
"givenName": "Fariba",
"surname": "Mostajeran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Nadia Burke",
"givenName": "Nadia",
"surname": "Burke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Nazife Ertugrul",
"givenName": "Nazife",
"surname": "Ertugrul",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Kilian Hildebrandt",
"givenName": "Kilian",
"surname": "Hildebrandt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Joshua Matov",
"givenName": "Joshua",
"surname": "Matov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Noémie Tapie",
"givenName": "Noémie",
"surname": "Tapie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Wilhelm Gottlieb Zittel",
"givenName": "Wilhelm Gottlieb",
"surname": "Zittel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Pia Reisewitz",
"givenName": "Pia",
"surname": "Reisewitz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Frank Steinicke",
"givenName": "Frank",
"surname": "Steinicke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "329-332",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a323",
"articleId": "1CJexcgK4ne",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a333",
"articleId": "1CJdX2rYlfW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2008/3167/0/3167a186",
"title": "An Augmented Reality System for Learning the Interior of the Human Body",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2008/3167a186/12OmNxwWoUq",
"parentPublication": {
"id": "proceedings/icalt/2008/3167/0",
"title": "IEEE International Conference on Advanced Learning Technologies (ICALT 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2013/06/mic2013060066",
"title": "Augmented Reality Interfaces",
"doi": null,
"abstractUrl": "/magazine/ic/2013/06/mic2013060066/13rRUIJcWhZ",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/07/mco2012070024",
"title": "What's Real About Augmented Reality?",
"doi": null,
"abstractUrl": "/magazine/co/2012/07/mco2012070024/13rRUyZaxtP",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007333",
"title": "Cognitive Cost of Using Augmented Reality Displays",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007333/13rRUygT7fg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797893",
"title": "Augmented Reality Interfaces for Semi-Autonomous Drones",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797893/1cJ0NJAEGQw",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798010",
"title": "An Augmented Reality Motion Planning Interface for Robotics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798010/1cJ0O1wN4ti",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798055",
"title": "Embodying an Extra Virtual Body in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798055/1cJ0Y0o1pO8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a067",
"title": "Industrial Augmented Reality: Concepts and User Interface Designs for Augmented Reality Maintenance Worker Support Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a067/1pBMhXqBhCM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a645",
"title": "The Effects of Cognitive Load on Engagement in a Virtual Reality Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a645/1tuAPxH9Ycw",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1fHkPvjIgxi",
"title": "2019 Eighth International Conference of Educational Innovation through Technology (EITT)",
"acronym": "eitt",
"groupId": "1804904",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHkRE8QKd2",
"doi": "10.1109/EITT.2019.00053",
"title": "Impacts of Different Types of Scaffolding on Academic Performance, Cognitive Load and Satisfaction in Scientific Inquiry Activities Based on Augmented Reality",
"normalizedTitle": "Impacts of Different Types of Scaffolding on Academic Performance, Cognitive Load and Satisfaction in Scientific Inquiry Activities Based on Augmented Reality",
"abstract": "The emergence of augmented reality technology has injected new vitality into teaching, especially when the visualization and interactivity have increased students' interest in scientific inquiry. Although augmented reality technology has brought freshness to students, it is necessary to design suitable teaching support to promote student learning. Therefore, knowing how to design a teaching support system has become an important research problem. The researchers have designed open scaffolding and convergent scaffold based on augmented reality technology as a teaching support. Twenty-one elementary school students were randomly assigned to use either the convergent scaffolding or the open scaffolding for a learning task. Using a mixed research method, this study examined the effects of different scaffoldings on knowledge retention, knowledge transfer, learning satisfaction, and cognitive load. The results revealed that open scaffolding could improve students' knowledge retention, knowledge transfer, and reduce cognitive load.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The emergence of augmented reality technology has injected new vitality into teaching, especially when the visualization and interactivity have increased students' interest in scientific inquiry. Although augmented reality technology has brought freshness to students, it is necessary to design suitable teaching support to promote student learning. Therefore, knowing how to design a teaching support system has become an important research problem. The researchers have designed open scaffolding and convergent scaffold based on augmented reality technology as a teaching support. Twenty-one elementary school students were randomly assigned to use either the convergent scaffolding or the open scaffolding for a learning task. Using a mixed research method, this study examined the effects of different scaffoldings on knowledge retention, knowledge transfer, learning satisfaction, and cognitive load. The results revealed that open scaffolding could improve students' knowledge retention, knowledge transfer, and reduce cognitive load.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The emergence of augmented reality technology has injected new vitality into teaching, especially when the visualization and interactivity have increased students' interest in scientific inquiry. Although augmented reality technology has brought freshness to students, it is necessary to design suitable teaching support to promote student learning. Therefore, knowing how to design a teaching support system has become an important research problem. The researchers have designed open scaffolding and convergent scaffold based on augmented reality technology as a teaching support. Twenty-one elementary school students were randomly assigned to use either the convergent scaffolding or the open scaffolding for a learning task. Using a mixed research method, this study examined the effects of different scaffoldings on knowledge retention, knowledge transfer, learning satisfaction, and cognitive load. The results revealed that open scaffolding could improve students' knowledge retention, knowledge transfer, and reduce cognitive load.",
"fno": "428800a239",
"keywords": [
"Augmented Reality",
"Cognition",
"Computer Aided Instruction",
"Data Visualisation",
"Educational Administrative Data Processing",
"Teaching",
"Cognitive Load",
"Scientific Inquiry Activities",
"Augmented Reality Technology",
"Student Learning",
"Teaching Support System",
"Elementary School Students",
"Academic Performance",
"Visualization",
"Augmented Reality",
"Educational Technology",
"Tools",
"Knowledge Transfer",
"Force",
"Real Time Systems",
"Augmented Reality",
"Scaffolding",
"Scientific Inquiry",
"Academic Performance",
"Cognitive Load"
],
"authors": [
{
"affiliation": "Beijing Normal University",
"fullName": "Bingbing Niu",
"givenName": "Bingbing",
"surname": "Niu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Normal University",
"fullName": "Changhao Liu",
"givenName": "Changhao",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Normal University",
"fullName": "Jiaqi Liu",
"givenName": "Jiaqi",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Normal University",
"fullName": "Yuhong Deng",
"givenName": "Yuhong",
"surname": "Deng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Normal University",
"fullName": "Qingqing Wan",
"givenName": "Qingqing",
"surname": "Wan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Normal University",
"fullName": "Ning Ma",
"givenName": "Ning",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "eitt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "239-244",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4288-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "428800a233",
"articleId": "1fHkRtHnIvS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "428800a245",
"articleId": "1fHkRfKcm3e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2012/4702/0/4702a728",
"title": "School of the Future: Using Augmented Reality for Contextual Information and Navigation in Academic Buildings",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a728/12OmNAkWvcI",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a390",
"title": "The Effect of Learning English Idioms Using Scaffolding Strategy Through Situated Learning Supported by Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a390/12OmNApLGIP",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2014/6887/0/06935444",
"title": "[Poster] CI-Spy: Using mobile-AR for scaffolding historical inquiry learning",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935444/12OmNrH1PBP",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2014/6887/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a542",
"title": "Effects of Knowledge-Based Scaffolding Interactive e-Book on Multiplication Concept",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a542/12OmNvSbBMR",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a009",
"title": "CI-Spy: Designing A Mobile Augmented Reality System for Scaffolding Historical Inquiry Learning",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a009/12OmNvkYxa6",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2015/9957/0/07373930",
"title": "Using the Augmented Reality Technique to Develop Visualization Mindtools for Chemical Inquiry-Based Activities",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2015/07373930/12OmNxFsmEq",
"parentPublication": {
"id": "proceedings/iiai-aai/2015/9957/0",
"title": "2015 IIAI 4th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiaiaai/2014/4174/0/06913314",
"title": "A Mobile Augmented Reality Based Scaffolding Platform for Outdoor Fieldtrip Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913314/12OmNyNQSAi",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2021/2757/0/275700a182",
"title": "Design of metacognitive scaffolding for k-12 programming education and its effects on students' problem solving ability and metacognition",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2021/275700a182/1AFsrLsRcAw",
"parentPublication": {
"id": "proceedings/eitt/2021/2757/0",
"title": "2021 Tenth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2021/2420/0/242000a926",
"title": "Designing an Augmented Reality Educational Board Game Learning Activity with Dual-Scaffolding Teaching Strategy to Enhance EFL Reading Comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2021/242000a926/1Eb2FXYhdE4",
"parentPublication": {
"id": "proceedings/iiai-aai/2021/2420/0",
"title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2021/2420/0/242000a928",
"title": "Designing an Augmented Reality-based Educational Board Game Integrated with Dual-Scaffolding Framework for High school History Course: The Evaluation of Learning Performance and Flow State",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2021/242000a928/1Eb2Mj6qLS0",
"parentPublication": {
"id": "proceedings/iiai-aai/2021/2420/0",
"title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzlUKD1",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"acronym": "case",
"groupId": "1001095",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqzu6R9",
"doi": "10.1109/CoASE.2012.6386480",
"title": "Fast randomized planner for SLAM automation",
"normalizedTitle": "Fast randomized planner for SLAM automation",
"abstract": "In this paper, we automate the traditional problem of Simultaneous Localization and Mapping (SLAM) by interleaving planning for exploring unknown environments by a mobile robot. We denote such planned SLAM systems as SPLAM (Simultaneous Planning Localization and Mapping). The main aim of SPLAM is to plan paths for the SLAM process such that the robot and map uncertainty upon execution of the path remains minimum and tractable. The planning is interleaved with SLAM and hence the terminology SPLAM. While typical SPLAM routines find paths when the robot traverses amidst known regions of the constructed map, herein we use the SPLAM formulation for an exploration like situation. Exploration is carried out through a frontier based approach where we identify multiple frontiers in the known map. Using Randomized Planning techniques we calculate various possible trajectories to all the known frontiers. We introduce a novel strategy for selecting frontiers which mimics Fast SLAM, selects a trajectory for robot motion that will minimize the map and robot state covariance. By using a Fast SLAM like approach for selecting frontiers we are able to decouple the robot and landmark covariance resulting in a faster selection of next best location, while maintaining the same kind of robustness of an EKF based SPLAM framework. We then compare our results with Shortest Path Algorithm and EKF based Planning. We show significant reduction in covariance when compared with shortest frontier first approach, while the uncertainties are comparable to EKF-SPLAM albeit at much faster planning times.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we automate the traditional problem of Simultaneous Localization and Mapping (SLAM) by interleaving planning for exploring unknown environments by a mobile robot. We denote such planned SLAM systems as SPLAM (Simultaneous Planning Localization and Mapping). The main aim of SPLAM is to plan paths for the SLAM process such that the robot and map uncertainty upon execution of the path remains minimum and tractable. The planning is interleaved with SLAM and hence the terminology SPLAM. While typical SPLAM routines find paths when the robot traverses amidst known regions of the constructed map, herein we use the SPLAM formulation for an exploration like situation. Exploration is carried out through a frontier based approach where we identify multiple frontiers in the known map. Using Randomized Planning techniques we calculate various possible trajectories to all the known frontiers. We introduce a novel strategy for selecting frontiers which mimics Fast SLAM, selects a trajectory for robot motion that will minimize the map and robot state covariance. By using a Fast SLAM like approach for selecting frontiers we are able to decouple the robot and landmark covariance resulting in a faster selection of next best location, while maintaining the same kind of robustness of an EKF based SPLAM framework. We then compare our results with Shortest Path Algorithm and EKF based Planning. We show significant reduction in covariance when compared with shortest frontier first approach, while the uncertainties are comparable to EKF-SPLAM albeit at much faster planning times.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we automate the traditional problem of Simultaneous Localization and Mapping (SLAM) by interleaving planning for exploring unknown environments by a mobile robot. We denote such planned SLAM systems as SPLAM (Simultaneous Planning Localization and Mapping). The main aim of SPLAM is to plan paths for the SLAM process such that the robot and map uncertainty upon execution of the path remains minimum and tractable. The planning is interleaved with SLAM and hence the terminology SPLAM. While typical SPLAM routines find paths when the robot traverses amidst known regions of the constructed map, herein we use the SPLAM formulation for an exploration like situation. Exploration is carried out through a frontier based approach where we identify multiple frontiers in the known map. Using Randomized Planning techniques we calculate various possible trajectories to all the known frontiers. We introduce a novel strategy for selecting frontiers which mimics Fast SLAM, selects a trajectory for robot motion that will minimize the map and robot state covariance. By using a Fast SLAM like approach for selecting frontiers we are able to decouple the robot and landmark covariance resulting in a faster selection of next best location, while maintaining the same kind of robustness of an EKF based SPLAM framework. We then compare our results with Shortest Path Algorithm and EKF based Planning. We show significant reduction in covariance when compared with shortest frontier first approach, while the uncertainties are comparable to EKF-SPLAM albeit at much faster planning times.",
"fno": "06386480",
"keywords": [
"Covariance Analysis",
"Kalman Filters",
"Mobile Robots",
"Path Planning",
"Random Processes",
"SLAM Robots",
"Randomized Planning Technique",
"SLAM Automation",
"Simultaneous Planning Localization And Mapping",
"Interleaving Planning",
"Mobile Robot",
"SPLAM",
"Map Uncertainty",
"Frontier Based Approach",
"Robot Trajectory",
"Robot State Covariance",
"Landmark Covariance",
"EKF",
"Shortest Path Algorithm",
"Path Planning",
"Simultaneous Localization And Mapping",
"Planning",
"Trajectory",
"Uncertainty",
"Covariance Matrix",
"SPLAM",
"Exploration",
"Trajectory Planning",
"Fast SLAM"
],
"authors": [
{
"affiliation": "Robotics Research Center at IIIT Hyderabad, India",
"fullName": "Amey Parulkar",
"givenName": "Amey",
"surname": "Parulkar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Robotics Research Center at IIIT Hyderabad, India",
"fullName": "Piyush Shukla",
"givenName": "Piyush",
"surname": "Shukla",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty with the Robotics Research Center, IIIT Hyderabad, India",
"fullName": "K Madhava Krishna",
"givenName": "K Madhava",
"surname": "Krishna",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "case",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-08-01T00:00:00",
"pubType": "proceedings",
"pages": "765-770",
"year": "2012",
"issn": "2161-8070",
"isbn": "978-1-4673-0430-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06386479",
"articleId": "12OmNxRnvWr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06386481",
"articleId": "12OmNzxgHtA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icarsc/2016/2255/0/07781977",
"title": "Indoor SLAM for Micro Aerial Vehicles Control Using Monocular Camera and Sensor Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icarsc/2016/07781977/12OmNCfjesr",
"parentPublication": {
"id": "proceedings/icarsc/2016/2255/0",
"title": "2016 International Conference on Autonomous Robot Systems and Competitions (ICARSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifita/2009/3600/2/3600b435",
"title": "A Review on Localization and Mapping Algorithm Based on Extended Kalman Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/ifita/2009/3600b435/12OmNvonIMM",
"parentPublication": {
"id": "proceedings/ifita/2009/3600/2",
"title": "2009 International Forum on Information Technology and Applications (IFITA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ci/2013/3194/0/06855911",
"title": "Multilayer Perceptron Use in a Mapping Task by Cooperating Robots",
"doi": null,
"abstractUrl": "/proceedings-article/ci/2013/06855911/12OmNvy256d",
"parentPublication": {
"id": "proceedings/ci/2013/3194/0",
"title": "2013 BRICS Congress on Computational Intelligence & 11th Brazilian Congress on Computational Intelligence (BRICS-CCI & CBIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579440",
"title": "Simultaneous Localization and Mapping for Mobile Robots in Dynamic Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579440/12OmNwoPtza",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295128",
"title": "Towards robust image registration for underwater visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295128/12OmNzcPAsA",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2019/2632/0/263200a035",
"title": "Mobile Robot SLAM Algorithm Based on Improved Firefly Particle Filter",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2019/263200a035/1cI6kIi8EFi",
"parentPublication": {
"id": "proceedings/icris/2019/2632/0",
"title": "2019 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cds/2020/7106/0/710600a439",
"title": "The Research of RBPF-SLAM Accuracy under the Influence of Depth Camera Noises",
"doi": null,
"abstractUrl": "/proceedings-article/cds/2020/710600a439/1pqa2yyFXck",
"parentPublication": {
"id": "proceedings/cds/2020/7106/0",
"title": "2020 International Conference on Computing and Data Science (CDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c814",
"title": "Differentiable SLAM-net: Learning Particle SLAM for Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c814/1yeIHPaF1WU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdata/2021/2901/0/290100a140",
"title": "Hardware Architecture of EKF-SLAM's Prediction Stage and its FPGA Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/icdata/2021/290100a140/1yov0tOOO3e",
"parentPublication": {
"id": "proceedings/icdata/2021/2901/0",
"title": "2021 International Conference on Digital Age & Technological Advances for Sustainable Development (ICDATA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx7G661",
"doi": "10.1109/ISMAR.2014.6948422",
"title": "Dense planar SLAM",
"normalizedTitle": "Dense planar SLAM",
"abstract": "Using higher-level entities during mapping has the potential to improve camera localisation performance and give substantial perception capabilities to real-time 3D SLAM systems. We present an efficient new real-time approach which densely maps an environment using bounded planes and surfels extracted from depth images (like those produced by RGB-D sensors or dense multi-view stereo reconstruction). Our method offers the every-pixel descriptive power of the latest dense SLAM approaches, but takes advantage directly of the planarity of many parts of real-world scenes via a data-driven process to directly regularize planar regions and represent their accurate extent efficiently using an occupancy approach with on-line compression. Large areas can be mapped efficiently and with useful semantic planar structure which enables intuitive and useful AR applications such as using any wall or other planar surface in a scene to display a user's content.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Using higher-level entities during mapping has the potential to improve camera localisation performance and give substantial perception capabilities to real-time 3D SLAM systems. We present an efficient new real-time approach which densely maps an environment using bounded planes and surfels extracted from depth images (like those produced by RGB-D sensors or dense multi-view stereo reconstruction). Our method offers the every-pixel descriptive power of the latest dense SLAM approaches, but takes advantage directly of the planarity of many parts of real-world scenes via a data-driven process to directly regularize planar regions and represent their accurate extent efficiently using an occupancy approach with on-line compression. Large areas can be mapped efficiently and with useful semantic planar structure which enables intuitive and useful AR applications such as using any wall or other planar surface in a scene to display a user's content.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Using higher-level entities during mapping has the potential to improve camera localisation performance and give substantial perception capabilities to real-time 3D SLAM systems. We present an efficient new real-time approach which densely maps an environment using bounded planes and surfels extracted from depth images (like those produced by RGB-D sensors or dense multi-view stereo reconstruction). Our method offers the every-pixel descriptive power of the latest dense SLAM approaches, but takes advantage directly of the planarity of many parts of real-world scenes via a data-driven process to directly regularize planar regions and represent their accurate extent efficiently using an occupancy approach with on-line compression. Large areas can be mapped efficiently and with useful semantic planar structure which enables intuitive and useful AR applications such as using any wall or other planar surface in a scene to display a user's content.",
"fno": "06948422",
"keywords": [
"Simultaneous Localization And Mapping",
"Cameras",
"Real Time Systems",
"Three Dimensional Displays",
"Noise",
"Indexes",
"Virtual Realities",
"Computing Methodologies Scene Understanding",
"Computing Methodologies Reconstruction Computing Methodologies Image Processing And Computer Vision Segmentation Information Systems Information Interfaces And Presentation",
"Artificial",
"Augmented"
],
"authors": [
{
"affiliation": "Imperial College London",
"fullName": "Renato F. Salas-Moreno",
"givenName": "Renato F.",
"surname": "Salas-Moreno",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Ben Glocken",
"givenName": "Ben",
"surname": "Glocken",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Paul H. J. Kelly",
"givenName": "Paul H. J.",
"surname": "Kelly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Andrew J. Davison",
"givenName": "Andrew J.",
"surname": "Davison",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "157-164",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948421",
"articleId": "12OmNwkzupV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948423",
"articleId": "12OmNwe2InV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948492",
"title": "[DEMO] Dense planar SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948492/12OmNxvNZZT",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbr-lars-r/2016/3656/0/07783496",
"title": "Object Subtraction Planar RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars-r/2016/07783496/12OmNxwENic",
"parentPublication": {
"id": "proceedings/sbr-lars-r/2016/3656/0",
"title": "2016 XIII Latin-American Robotics Symposium and IV Brazilian Robotics Symposium (LARS/SBR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g565",
"title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007295",
"title": "Dense Visual SLAM with Probabilistic Surfel Map",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007295/13rRUNvgz4n",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2776",
"title": "NICE-SLAM: Neural Implicit Scalable Encoding for SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2776/1H1jhZSaE0M",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d096",
"title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/2019/1601/0/160100a083",
"title": "FPGA Architectures for Real-time Dense SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/asap/2019/160100a083/1d5kDsB2fdu",
"parentPublication": {
"id": "proceedings/asap/2019/1601/2160-052X",
"title": "2019 IEEE 30th International Conference on Application-specific Systems, Architectures and Processors (ASAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pact/2019/3613/0/361300a296",
"title": "SLAMBooster: An Application-Aware Online Controller for Approximation in Dense SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/pact/2019/361300a296/1eLy3QnWKuA",
"parentPublication": {
"id": "proceedings/pact/2019/3613/0",
"title": "2019 28th International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09496211",
"title": "PlaneFusion: Real-Time Indoor Scene Reconstruction With Planar Prior",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09496211/1vyjumhb4ZO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyoiYVr",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzTH0Qa",
"doi": "10.1109/CVPR.2017.695",
"title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"normalizedTitle": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"abstract": "Given the recent advances in depth prediction from Convolutional Neural Networks (CNNs), this paper investigates how predicted depth maps from a deep neural network can be deployed for the goal of accurate and dense monocular reconstruction. We propose a method where CNN-predicted dense depth maps are naturally fused together with depth measurements obtained from direct monocular SLAM, based on a scheme that privileges depth prediction in image locations where monocular SLAM approaches tend to fail, e.g. along low-textured regions, and vice-versa. We demonstrate the use of depth prediction to estimate the absolute scale of the reconstruction, hence overcoming one of the major limitations of monocular SLAM. Finally, we propose a framework to efficiently fuse semantic labels, obtained from a single frame, with dense SLAM, so to yield semantically coherent scene reconstruction from a single view. Evaluation results on two benchmark datasets show the robustness and accuracy of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Given the recent advances in depth prediction from Convolutional Neural Networks (CNNs), this paper investigates how predicted depth maps from a deep neural network can be deployed for the goal of accurate and dense monocular reconstruction. We propose a method where CNN-predicted dense depth maps are naturally fused together with depth measurements obtained from direct monocular SLAM, based on a scheme that privileges depth prediction in image locations where monocular SLAM approaches tend to fail, e.g. along low-textured regions, and vice-versa. We demonstrate the use of depth prediction to estimate the absolute scale of the reconstruction, hence overcoming one of the major limitations of monocular SLAM. Finally, we propose a framework to efficiently fuse semantic labels, obtained from a single frame, with dense SLAM, so to yield semantically coherent scene reconstruction from a single view. Evaluation results on two benchmark datasets show the robustness and accuracy of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Given the recent advances in depth prediction from Convolutional Neural Networks (CNNs), this paper investigates how predicted depth maps from a deep neural network can be deployed for the goal of accurate and dense monocular reconstruction. We propose a method where CNN-predicted dense depth maps are naturally fused together with depth measurements obtained from direct monocular SLAM, based on a scheme that privileges depth prediction in image locations where monocular SLAM approaches tend to fail, e.g. along low-textured regions, and vice-versa. We demonstrate the use of depth prediction to estimate the absolute scale of the reconstruction, hence overcoming one of the major limitations of monocular SLAM. Finally, we propose a framework to efficiently fuse semantic labels, obtained from a single frame, with dense SLAM, so to yield semantically coherent scene reconstruction from a single view. Evaluation results on two benchmark datasets show the robustness and accuracy of our approach.",
"fno": "0457g565",
"keywords": [
"Image Reconstruction",
"Image Texture",
"Learning Artificial Intelligence",
"Mobile Robots",
"Neural Nets",
"Robot Vision",
"SLAM Robots",
"Stereo Image Processing",
"Dense Monocular Reconstruction",
"Depth Measurements",
"Direct Monocular SLAM",
"CNN SLAM",
"Real Time Dense Monocular SLAM",
"Deep Neural Network",
"Convolutional Neural Networks",
"Depth Maps Prediction Learning",
"Simultaneous Localization And Mapping",
"Image Reconstruction",
"Cameras",
"Semantics",
"Pose Estimation",
"Three Dimensional Displays"
],
"authors": [
{
"affiliation": null,
"fullName": "Keisuke Tateno",
"givenName": "Keisuke",
"surname": "Tateno",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Federico Tombari",
"givenName": "Federico",
"surname": "Tombari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Iro Laina",
"givenName": "Iro",
"surname": "Laina",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "6565-6574",
"year": "2017",
"issn": "1063-6919",
"isbn": "978-1-5386-0457-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0457g555",
"articleId": "12OmNvzJGch",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0457g575",
"articleId": "12OmNzTYC7f",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034a912",
"title": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a912/12OmNvD8RuE",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486548",
"title": "Dense Reconstruction from Monocular Slam with Fusion of Sparse Map-Points and Cnn-Inferred Depth",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486548/14jQfP7ey4y",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c560",
"title": "CodeSLAM - Learning a Compact, Optimisable Representation for Dense Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c560/17D45VUZMVf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545173",
"title": "Em-SLAM: a Fast and Robust Monocular SLAM Method for Embedded Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545173/17D45XdBRQs",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699273",
"title": "CNN-MonoFusion: Online Monocular Dense Reconstruction Using Learned Depth from Single View",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699273/19F1QKV77QQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956576",
"title": "Joint Self-Supervised Monocular Depth Estimation and SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d096",
"title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700b760",
"title": "SLAM in the Field: An Evaluation of Monocular Mapping and Localization on Challenging Dynamic Agricultural Environment",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700b760/1uqGnyhqL6g",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a101",
"title": "Improved ORB-SLAM Based 3D Dense Reconstruction for Monocular Endoscopic Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a101/1vg8aYQPZi8",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b362",
"title": "DSP-SLAM: Object Oriented SLAM with Deep Shape Priors",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b362/1zWEhxN28YU",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1AFswbWGnQs",
"title": "2021 2nd International Conference on Computer Engineering and Intelligent Control (ICCEIC)",
"acronym": "icceic",
"groupId": "1840484",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AFsC5XRpMk",
"doi": "10.1109/ICCEIC54227.2021.00024",
"title": "IDMC-VSLAM: Improved dense map construction and visual SLAM in dynamic environments",
"normalizedTitle": "IDMC-VSLAM: Improved dense map construction and visual SLAM in dynamic environments",
"abstract": "In this paper, an improved visual SLAM system is proposed. Firstly, we embed a lightweight convolutional neural networks for semantic segmentation into the visual SLAM to improve the efficiency of detecting moving objects in dynamic environments. Then, the detected moving objects are eliminated to improve the accuracy and robustness of visual SLAM. Subsequently, a dense point cloud map using unsupervised learning is proposed, for robot obstacle avoidance and path planning. Experimental results in the open TUM RGB-D dataset show that our method not only significantly improves the computational efficiency but also ensures the positioning accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, an improved visual SLAM system is proposed. Firstly, we embed a lightweight convolutional neural networks for semantic segmentation into the visual SLAM to improve the efficiency of detecting moving objects in dynamic environments. Then, the detected moving objects are eliminated to improve the accuracy and robustness of visual SLAM. Subsequently, a dense point cloud map using unsupervised learning is proposed, for robot obstacle avoidance and path planning. Experimental results in the open TUM RGB-D dataset show that our method not only significantly improves the computational efficiency but also ensures the positioning accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, an improved visual SLAM system is proposed. Firstly, we embed a lightweight convolutional neural networks for semantic segmentation into the visual SLAM to improve the efficiency of detecting moving objects in dynamic environments. Then, the detected moving objects are eliminated to improve the accuracy and robustness of visual SLAM. Subsequently, a dense point cloud map using unsupervised learning is proposed, for robot obstacle avoidance and path planning. Experimental results in the open TUM RGB-D dataset show that our method not only significantly improves the computational efficiency but also ensures the positioning accuracy.",
"fno": "021200a081",
"keywords": [
"Collision Avoidance",
"Image Segmentation",
"Mobile Robots",
"Neural Nets",
"Object Detection",
"Path Planning",
"Robot Vision",
"SLAM Robots",
"Unsupervised Learning",
"Improved Visual SLAM System",
"Lightweight Convolutional Neural Networks",
"Dynamic Environments",
"Detected Moving Objects",
"Dense Point Cloud Map",
"IDMC VSLAM",
"Improved Dense Map Construction",
"Point Cloud Compression",
"Visualization",
"Simultaneous Localization And Mapping",
"Image Color Analysis",
"Semantics",
"Lasers",
"Path Planning",
"Visual SLAM",
"Semantic Segmentation",
"Unsupervised Learning"
],
"authors": [
{
"affiliation": "Heilongjiang University,School of Electronic Engineering,Harbin,China",
"fullName": "Shifeng Jia",
"givenName": "Shifeng",
"surname": "Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Heilongjiang University,School of Electronic Engineering,Harbin,China",
"fullName": "Haihua Yu",
"givenName": "Haihua",
"surname": "Yu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icceic",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "81-85",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0212-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "021200a077",
"articleId": "1AFsyZ4i8q4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "021200a086",
"articleId": "1AFsz9bTf0I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/11/08007295",
"title": "Dense Visual SLAM with Probabilistic Surfel Map",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007295/13rRUNvgz4n",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a210",
"title": "A Comparative Analysis of Visual-Inertial SLAM for Assisted Wayfinding of the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a210/18j8P4rWFdm",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/insai/2021/0859/0/085900a090",
"title": "2D LIDAR SLAM Based On Gauss-Newton",
"doi": null,
"abstractUrl": "/proceedings-article/insai/2021/085900a090/1CHwZ4MyfkY",
"parentPublication": {
"id": "proceedings/insai/2021/0859/0",
"title": "2021 International Conference on Networking Systems of AI (INSAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icica/2022/9011/0/901100a073",
"title": "MN-SLAM: Multi-networks Visual SLAM for Dynamic and Complicated Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icica/2022/901100a073/1LKxbryd1CM",
"parentPublication": {
"id": "proceedings/icica/2022/9011/0",
"title": "2022 11th International Conference on Information Communication and Applications (ICICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a509",
"title": "SCP-SLAM: Accelerating DynaSLAM With Static Confidence Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a509/1MNgqqS7YI0",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2019/3024/0/302400a924",
"title": "Improved SLAM Merged 2D and 3D Sensors for Mobile Robots",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2019/302400a924/1eEUs9V6UN2",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2019/3024/0",
"title": "2019 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a574",
"title": "Mobile Photometric Stereo with Keypoint-Based SLAM for Dense 3D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a574/1ezREwjZfFe",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a959",
"title": "FC-vSLAM: Integrating Feature Credibility in Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a959/1qyxjlMBUYw",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412341",
"title": "Learning to Segment Dynamic Objects using SLAM Outliers",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412341/1tmjm0W44yQ",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400c072",
"title": "VSLAM based on instance segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400c072/1tzyLKi1FGo",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1KxUhhFgzlK",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1L8qEHGGTlu",
"doi": "10.1109/WACV56688.2023.00311",
"title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"normalizedTitle": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"abstract": "We present a novel method to reconstruct 3D scenes from images by leveraging deep dense monocular SLAM and fast uncertainty propagation. The proposed approach is able to 3D reconstruct scenes densely, accurately, and in realtime while being robust to extremely noisy depth estimates coming from dense monocular SLAM. Differently from previous approaches, that either use ad-hoc depth filters, or that estimate the depth uncertainty from RGB-D cameras’ sensor models, our probabilistic depth uncertainty derives directly from the information matrix of the underlying bundle adjustment problem in SLAM. We show that the resulting depth uncertainty provides an excellent signal to weight the depth-maps for volumetric fusion. Without our depth uncertainty, the resulting mesh is noisy and with artifacts, while our approach generates an accurate 3D mesh with significantly fewer artifacts. We provide results on the challenging Euroc dataset, and show that our approach achieves 92% better accuracy than directly fusing depths from monocular SLAM, and up to 90% improvements compared to the best competing approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel method to reconstruct 3D scenes from images by leveraging deep dense monocular SLAM and fast uncertainty propagation. The proposed approach is able to 3D reconstruct scenes densely, accurately, and in realtime while being robust to extremely noisy depth estimates coming from dense monocular SLAM. Differently from previous approaches, that either use ad-hoc depth filters, or that estimate the depth uncertainty from RGB-D cameras’ sensor models, our probabilistic depth uncertainty derives directly from the information matrix of the underlying bundle adjustment problem in SLAM. We show that the resulting depth uncertainty provides an excellent signal to weight the depth-maps for volumetric fusion. Without our depth uncertainty, the resulting mesh is noisy and with artifacts, while our approach generates an accurate 3D mesh with significantly fewer artifacts. We provide results on the challenging Euroc dataset, and show that our approach achieves 92% better accuracy than directly fusing depths from monocular SLAM, and up to 90% improvements compared to the best competing approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel method to reconstruct 3D scenes from images by leveraging deep dense monocular SLAM and fast uncertainty propagation. The proposed approach is able to 3D reconstruct scenes densely, accurately, and in realtime while being robust to extremely noisy depth estimates coming from dense monocular SLAM. Differently from previous approaches, that either use ad-hoc depth filters, or that estimate the depth uncertainty from RGB-D cameras’ sensor models, our probabilistic depth uncertainty derives directly from the information matrix of the underlying bundle adjustment problem in SLAM. We show that the resulting depth uncertainty provides an excellent signal to weight the depth-maps for volumetric fusion. Without our depth uncertainty, the resulting mesh is noisy and with artifacts, while our approach generates an accurate 3D mesh with significantly fewer artifacts. We provide results on the challenging Euroc dataset, and show that our approach achieves 92% better accuracy than directly fusing depths from monocular SLAM, and up to 90% improvements compared to the best competing approach.",
"fno": "934600d096",
"keywords": [
"Image Colour Analysis",
"Image Fusion",
"Image Reconstruction",
"Robot Vision",
"SLAM Robots",
"Accurate 3 D Mesh",
"Ad Hoc Depth",
"Bundle Adjustment Problem",
"Deep Dense Monocular SLAM",
"Depth Uncertainty",
"Depth Maps",
"Fast Uncertainty Propagation",
"Noisy Depth",
"Probabilistic Depth Uncertainty",
"Probabilistic Volumetric Fusion",
"RGB D Cameras",
"Geometry",
"Visualization",
"Uncertainty",
"Three Dimensional Displays",
"Simultaneous Localization And Mapping",
"Semantics",
"Probabilistic Logic",
"Algorithms 3 D Computer Vision",
"Robotics"
],
"authors": [
{
"affiliation": "Massachusetts Institute of Technology",
"fullName": "Antoni Rosinol",
"givenName": "Antoni",
"surname": "Rosinol",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massachusetts Institute of Technology",
"fullName": "John J. Leonard",
"givenName": "John J.",
"surname": "Leonard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Massachusetts Institute of Technology",
"fullName": "Luca Carlone",
"givenName": "Luca",
"surname": "Carlone",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-01-01T00:00:00",
"pubType": "proceedings",
"pages": "3096-3104",
"year": "2023",
"issn": null,
"isbn": "978-1-6654-9346-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "934600d086",
"articleId": "1KxVIbV91zq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "934600d105",
"articleId": "1KxVaVLkeLS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c408",
"title": "Edge SLAM: Edge Points Based Monocular Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c408/12OmNCb3frz",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a912",
"title": "3D Scene Mesh from CNN Depth Predictions and Sparse Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a912/12OmNvD8RuE",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2018/5114/0/511401a350",
"title": "Monocular SLAM Algorithm Based on Improved Depth Map Estimation and Keyframe Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2018/511401a350/12OmNyeECAZ",
"parentPublication": {
"id": "proceedings/icmtma/2018/5114/0",
"title": "2018 10th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g565",
"title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486548",
"title": "Dense Reconstruction from Monocular Slam with Fusion of Sparse Map-Points and Cnn-Inferred Depth",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486548/14jQfP7ey4y",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c560",
"title": "CodeSLAM - Learning a Compact, Optimisable Representation for Dense Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c560/17D45VUZMVf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956576",
"title": "Joint Self-Supervised Monocular Depth Estimation and SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956576/1IHpbIpwRfW",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991394",
"title": "Pseudo Depth Maps for RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a103",
"title": "Real-Time Monocular Visual SLAM by Combining Points and Lines",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a103/1cdORi5z7fa",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a101",
"title": "Improved ORB-SLAM Based 3D Dense Reconstruction for Monocular Endoscopic Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a101/1vg8aYQPZi8",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1d5kCMKkXdK",
"title": "2019 IEEE 30th International Conference on Application-specific Systems, Architectures and Processors (ASAP)",
"acronym": "asap",
"groupId": "1000037",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1d5kDsB2fdu",
"doi": "10.1109/ASAP.2019.00-25",
"title": "FPGA Architectures for Real-time Dense SLAM",
"normalizedTitle": "FPGA Architectures for Real-time Dense SLAM",
"abstract": "Simultaneous Localization And Mapping (SLAM) is an important technique used in robotics, computer vision, and virtual/augmented reality. SLAM algorithms have moved past creating sparse maps to making dense 3D reconstruction of the environment. Dense SLAM algorithms have high computational demands that require hardware acceleration to be done efficiently in real-time. FPGAs are an attractive compute platform for SLAM systems as they are low power and high performance. Unfortunately, dense SLAM algorithms are complex and FPGAs are notoriously difficult to program. In this work, we study the best techniques for accelerating 3D reconstruction on FPGA. We analyze a 3D reconstruction system, and implement modular FPGA designs for the main components of this application. We target both an FPGA SoC and a larger FPGA PCIe board, and perform a design space exploration (DSE) of our designs. We analyze the results of our DSE, characterize the design spaces to highlight important features, and we implement the best designs in an open-source and end-to-end dense SLAM system running on a FPGA SoC board. On the SoC board, using the FPGA increases the throughput of the whole application by a factor of two compared to the ARM processor, and individual algorithms are up to 38 times faster on the FPGA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Simultaneous Localization And Mapping (SLAM) is an important technique used in robotics, computer vision, and virtual/augmented reality. SLAM algorithms have moved past creating sparse maps to making dense 3D reconstruction of the environment. Dense SLAM algorithms have high computational demands that require hardware acceleration to be done efficiently in real-time. FPGAs are an attractive compute platform for SLAM systems as they are low power and high performance. Unfortunately, dense SLAM algorithms are complex and FPGAs are notoriously difficult to program. In this work, we study the best techniques for accelerating 3D reconstruction on FPGA. We analyze a 3D reconstruction system, and implement modular FPGA designs for the main components of this application. We target both an FPGA SoC and a larger FPGA PCIe board, and perform a design space exploration (DSE) of our designs. We analyze the results of our DSE, characterize the design spaces to highlight important features, and we implement the best designs in an open-source and end-to-end dense SLAM system running on a FPGA SoC board. On the SoC board, using the FPGA increases the throughput of the whole application by a factor of two compared to the ARM processor, and individual algorithms are up to 38 times faster on the FPGA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Simultaneous Localization And Mapping (SLAM) is an important technique used in robotics, computer vision, and virtual/augmented reality. SLAM algorithms have moved past creating sparse maps to making dense 3D reconstruction of the environment. Dense SLAM algorithms have high computational demands that require hardware acceleration to be done efficiently in real-time. FPGAs are an attractive compute platform for SLAM systems as they are low power and high performance. Unfortunately, dense SLAM algorithms are complex and FPGAs are notoriously difficult to program. In this work, we study the best techniques for accelerating 3D reconstruction on FPGA. We analyze a 3D reconstruction system, and implement modular FPGA designs for the main components of this application. We target both an FPGA SoC and a larger FPGA PCIe board, and perform a design space exploration (DSE) of our designs. We analyze the results of our DSE, characterize the design spaces to highlight important features, and we implement the best designs in an open-source and end-to-end dense SLAM system running on a FPGA SoC board. On the SoC board, using the FPGA increases the throughput of the whole application by a factor of two compared to the ARM processor, and individual algorithms are up to 38 times faster on the FPGA.",
"fno": "160100a083",
"keywords": [
"Computer Vision",
"Field Programmable Gate Arrays",
"Image Reconstruction",
"SLAM Robots",
"System On Chip",
"High Computational Demands",
"Attractive Compute Platform",
"SLAM Systems",
"Dense SLAM Algorithms",
"3 D Reconstruction System",
"Modular FPGA Designs",
"Design Space Exploration",
"End To End Dense SLAM System",
"FPGA So C Board",
"FPGA Architectures",
"Real Time Dense SLAM",
"Computer Vision",
"Sparse Maps",
"Dense 3 D Reconstruction",
"FPGA PC Ie Board",
"Field Programmable Gate Arrays",
"Simultaneous Localization And Mapping",
"Three Dimensional Displays",
"Real Time Systems",
"Optimization",
"Casting",
"Solid Modeling",
"FPGA",
"SLAM",
"So C",
"3 D Reconstruction",
"Design Space Exploration"
],
"authors": [
{
"affiliation": "University of California, San Diego, USA",
"fullName": "Quentin Gautier",
"givenName": "Quentin",
"surname": "Gautier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, San Diego, USA",
"fullName": "Alric Althoff",
"givenName": "Alric",
"surname": "Althoff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, San Diego, USA",
"fullName": "Ryan Kastner",
"givenName": "Ryan",
"surname": "Kastner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "asap",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-07-01T00:00:00",
"pubType": "proceedings",
"pages": "83-90",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1601-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "160100a042",
"articleId": "1d5kG3SSUyk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "160100a051",
"articleId": "1d5kEZxzEFq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948422",
"title": "Dense planar SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948422/12OmNx7G661",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g565",
"title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945638",
"title": "High-level synthesis for FPGA design based-SLAM application",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945638/12OmNzmclNd",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007295",
"title": "Dense Visual SLAM with Probabilistic Surfel Map",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007295/13rRUNvgz4n",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2776",
"title": "NICE-SLAM: Neural Implicit Scalable Encoding for SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2776/1H1jhZSaE0M",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d096",
"title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a574",
"title": "Mobile Photometric Stereo with Keypoint-Based SLAM for Dense 3D Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a574/1ezREwjZfFe",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a101",
"title": "Improved ORB-SLAM Based 3D Dense Reconstruction for Monocular Endoscopic Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a101/1vg8aYQPZi8",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdata/2021/2901/0/290100a140",
"title": "Hardware Architecture of EKF-SLAM's Prediction Stage and its FPGA Implementation",
"doi": null,
"abstractUrl": "/proceedings-article/icdata/2021/290100a140/1yov0tOOO3e",
"parentPublication": {
"id": "proceedings/icdata/2021/2901/0",
"title": "2021 International Conference on Digital Age & Technological Advances for Sustainable Development (ICDATA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1eLxZSVsOLC",
"title": "2019 28th International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"acronym": "pact",
"groupId": "1000535",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1eLy3QnWKuA",
"doi": "10.1109/PACT.2019.00031",
"title": "SLAMBooster: An Application-Aware Online Controller for Approximation in Dense SLAM",
"normalizedTitle": "SLAMBooster: An Application-Aware Online Controller for Approximation in Dense SLAM",
"abstract": "Simultaneous Localization and Mapping (SLAM) is the problem of constructing a map of a mobile agent's environment while localizing the agent within the map. Dense SLAM algorithms perform reconstruction and localization at pixel granularity. These algorithms require a lot of computational power, which has hindered their use on low-power resource-constrained devices. Approximate computing can be used to speed up SLAM implementations as long as the approximations do not prevent the agent from navigating correctly through the environment. Previous studies of approximation in SLAM have assumed that the entire trajectory of the agent is known before the agent starts, and they have focused on offline controllers that set approximation knobs at the start of the trajectory. In practice, the trajectory is usually not known ahead of time, and allowing knob settings to change dynamically opens up more opportunities for reducing computation time and energy. In this paper, we describe SLAMBooster, an application-aware, online control system for dense SLAM that adaptively controls approximation knobs during the motion of the agent. SLAMBooster is based on a control technique called proportional-integral-derivative (PID) controller but our experiments showed this application-agnostic controller led to an unacceptable reduction in localization accuracy. To address this problem, SLAMBooster also exploits domain knowledge for controlling approximation by performing smooth surface detection and pose correction. We implemented SLAMBooster in the open-source SLAMBench framework and evaluated it on more than a dozen trajectories from both the literature and our own study. Our experiments show that on the average, SLAMBooster reduces the computation time by 72% and energy consumption by 35% on an embedded platform, while maintaining the accuracy of localization within reasonable bounds. These improvements make it feasible to deploy SLAM on a wider range of devices.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Simultaneous Localization and Mapping (SLAM) is the problem of constructing a map of a mobile agent's environment while localizing the agent within the map. Dense SLAM algorithms perform reconstruction and localization at pixel granularity. These algorithms require a lot of computational power, which has hindered their use on low-power resource-constrained devices. Approximate computing can be used to speed up SLAM implementations as long as the approximations do not prevent the agent from navigating correctly through the environment. Previous studies of approximation in SLAM have assumed that the entire trajectory of the agent is known before the agent starts, and they have focused on offline controllers that set approximation knobs at the start of the trajectory. In practice, the trajectory is usually not known ahead of time, and allowing knob settings to change dynamically opens up more opportunities for reducing computation time and energy. In this paper, we describe SLAMBooster, an application-aware, online control system for dense SLAM that adaptively controls approximation knobs during the motion of the agent. SLAMBooster is based on a control technique called proportional-integral-derivative (PID) controller but our experiments showed this application-agnostic controller led to an unacceptable reduction in localization accuracy. To address this problem, SLAMBooster also exploits domain knowledge for controlling approximation by performing smooth surface detection and pose correction. We implemented SLAMBooster in the open-source SLAMBench framework and evaluated it on more than a dozen trajectories from both the literature and our own study. Our experiments show that on the average, SLAMBooster reduces the computation time by 72% and energy consumption by 35% on an embedded platform, while maintaining the accuracy of localization within reasonable bounds. These improvements make it feasible to deploy SLAM on a wider range of devices.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Simultaneous Localization and Mapping (SLAM) is the problem of constructing a map of a mobile agent's environment while localizing the agent within the map. Dense SLAM algorithms perform reconstruction and localization at pixel granularity. These algorithms require a lot of computational power, which has hindered their use on low-power resource-constrained devices. Approximate computing can be used to speed up SLAM implementations as long as the approximations do not prevent the agent from navigating correctly through the environment. Previous studies of approximation in SLAM have assumed that the entire trajectory of the agent is known before the agent starts, and they have focused on offline controllers that set approximation knobs at the start of the trajectory. In practice, the trajectory is usually not known ahead of time, and allowing knob settings to change dynamically opens up more opportunities for reducing computation time and energy. In this paper, we describe SLAMBooster, an application-aware, online control system for dense SLAM that adaptively controls approximation knobs during the motion of the agent. SLAMBooster is based on a control technique called proportional-integral-derivative (PID) controller but our experiments showed this application-agnostic controller led to an unacceptable reduction in localization accuracy. To address this problem, SLAMBooster also exploits domain knowledge for controlling approximation by performing smooth surface detection and pose correction. We implemented SLAMBooster in the open-source SLAMBench framework and evaluated it on more than a dozen trajectories from both the literature and our own study. Our experiments show that on the average, SLAMBooster reduces the computation time by 72% and energy consumption by 35% on an embedded platform, while maintaining the accuracy of localization within reasonable bounds. These improvements make it feasible to deploy SLAM on a wider range of devices.",
"fno": "361300a296",
"keywords": [
"Adaptive Control",
"Control Engineering Computing",
"Image Reconstruction",
"Mobile Robots",
"Robot Vision",
"SLAM Robots",
"Three Term Control",
"SLAM Booster",
"Application Aware Online Controller",
"Mobile Agent",
"Dense SLAM Algorithms",
"Pixel Granularity",
"Low Power Resource Constrained Devices",
"Online Control System",
"Proportional Integral Derivative Controller",
"Application Agnostic Controller",
"Simultaneous Localization And Mapping",
"Trajectory",
"Approximation Algorithms",
"Navigation",
"Three Dimensional Displays",
"Cameras",
"Performance Evaluation",
"Approximate Computing",
"SLAM",
"Kinect Fusion",
"Control Theory"
],
"authors": [
{
"affiliation": "The University of Texas at Austin",
"fullName": "Yan Pei",
"givenName": "Yan",
"surname": "Pei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indian Institute of Technology Kanpur",
"fullName": "Swarnendu Biswas",
"givenName": "Swarnendu",
"surname": "Biswas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at Austin",
"fullName": "Donald S. Fussell",
"givenName": "Donald S.",
"surname": "Fussell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Texas at Austin",
"fullName": "Keshav Pingali",
"givenName": "Keshav",
"surname": "Pingali",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pact",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-09-01T00:00:00",
"pubType": "proceedings",
"pages": "296-310",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3613-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "361300a284",
"articleId": "1eLy18gAz5u",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "361300a311",
"articleId": "1eLy3WPTyLu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sbr-lars-r/2016/3656/0/07783548",
"title": "Effects of Water Currents in a Continuous Attractor Neural Network for SLAM Applications",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars-r/2016/07783548/12OmNCfAPNh",
"parentPublication": {
"id": "proceedings/sbr-lars-r/2016/3656/0",
"title": "2016 XIII Latin-American Robotics Symposium and IV Brazilian Robotics Symposium (LARS/SBR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icarsc/2016/2255/0/07781977",
"title": "Indoor SLAM for Micro Aerial Vehicles Control Using Monocular Camera and Sensor Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icarsc/2016/07781977/12OmNCfjesr",
"parentPublication": {
"id": "proceedings/icarsc/2016/2255/0",
"title": "2016 International Conference on Autonomous Robot Systems and Competitions (ICARSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386480",
"title": "Fast randomized planner for SLAM automation",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386480/12OmNqzu6R9",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a602",
"title": "A Review of SLAM Techniques and Security in Autonomous Driving",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a602/18M7gCQ0uas",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2776",
"title": "NICE-SLAM: Neural Implicit Scalable Encoding for SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2776/1H1jhZSaE0M",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/2019/1601/0/160100a083",
"title": "FPGA Architectures for Real-time Dense SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/asap/2019/160100a083/1d5kDsB2fdu",
"parentPublication": {
"id": "proceedings/asap/2019/1601/2160-052X",
"title": "2019 IEEE 30th International Conference on Application-specific Systems, Architectures and Processors (ASAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2019/6092/0/609200a301",
"title": "A Hybrid Loop Closure Detection Method Based on Lidar SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2019/609200a301/1i5m22oSNOg",
"parentPublication": {
"id": "proceedings/cis/2019/6092/0",
"title": "2019 15th International Conference on Computational Intelligence and Security (CIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412466",
"title": "AV-SLAM: Autonomous Vehicle SLAM with Gravity Direction Initialization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412466/1tmj8Cfx70Y",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c814",
"title": "Differentiable SLAM-net: Learning Particle SLAM for Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c814/1yeIHPaF1WU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gyr6w5YIIU",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyr8GIX9E4",
"doi": "10.1109/CVPR.2019.00022",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"normalizedTitle": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"abstract": "A key component of Simultaneous Localization and Mapping (SLAM) systems is the joint optimization of the estimated 3D map and camera trajectory. Bundle adjustment (BA) is the gold standard for this. Due to the large number of variables in dense RGB-D SLAM, previous work has focused on approximating BA. In contrast, in this paper we present a novel, fast direct BA formulation which we implement in a real-time dense RGB-D SLAM algorithm. In addition, we show that direct RGB-D SLAM systems are highly sensitive to rolling shutter, RGB and depth sensor synchronization, and calibration errors. In order to facilitate state-of-the-art research on direct RGB-D SLAM, we propose a novel, well-calibrated benchmark for this task that uses synchronized global shutter RGB and depth cameras. It includes a training set, a test set without public ground truth, and an online evaluation service. We observe that the ranking of methods changes on this dataset compared to existing ones, and our proposed algorithm outperforms all other evaluated SLAM methods. Our benchmark and our open source SLAM algorithm are available at: www.eth3d.net",
"abstracts": [
{
"abstractType": "Regular",
"content": "A key component of Simultaneous Localization and Mapping (SLAM) systems is the joint optimization of the estimated 3D map and camera trajectory. Bundle adjustment (BA) is the gold standard for this. Due to the large number of variables in dense RGB-D SLAM, previous work has focused on approximating BA. In contrast, in this paper we present a novel, fast direct BA formulation which we implement in a real-time dense RGB-D SLAM algorithm. In addition, we show that direct RGB-D SLAM systems are highly sensitive to rolling shutter, RGB and depth sensor synchronization, and calibration errors. In order to facilitate state-of-the-art research on direct RGB-D SLAM, we propose a novel, well-calibrated benchmark for this task that uses synchronized global shutter RGB and depth cameras. It includes a training set, a test set without public ground truth, and an online evaluation service. We observe that the ranking of methods changes on this dataset compared to existing ones, and our proposed algorithm outperforms all other evaluated SLAM methods. Our benchmark and our open source SLAM algorithm are available at: www.eth3d.net",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A key component of Simultaneous Localization and Mapping (SLAM) systems is the joint optimization of the estimated 3D map and camera trajectory. Bundle adjustment (BA) is the gold standard for this. Due to the large number of variables in dense RGB-D SLAM, previous work has focused on approximating BA. In contrast, in this paper we present a novel, fast direct BA formulation which we implement in a real-time dense RGB-D SLAM algorithm. In addition, we show that direct RGB-D SLAM systems are highly sensitive to rolling shutter, RGB and depth sensor synchronization, and calibration errors. In order to facilitate state-of-the-art research on direct RGB-D SLAM, we propose a novel, well-calibrated benchmark for this task that uses synchronized global shutter RGB and depth cameras. It includes a training set, a test set without public ground truth, and an online evaluation service. We observe that the ranking of methods changes on this dataset compared to existing ones, and our proposed algorithm outperforms all other evaluated SLAM methods. Our benchmark and our open source SLAM algorithm are available at: www.eth3d.net",
"fno": "329300a134",
"keywords": [
"Calibration",
"Cameras",
"Image Colour Analysis",
"Mobile Robots",
"Optimisation",
"Robot Vision",
"SLAM Robots",
"Simultaneous Localization And Mapping System",
"Joint Optimization",
"Bundle Adjustment",
"Fast Direct BA Formulation",
"Real Time Dense RGB D SLAM Algorithm",
"Direct RGB D SLAM Systems",
"Rolling Shutter",
"Depth Sensor Synchronization",
"Global Shutter RGB",
"Depth Cameras",
"Open Source SLAM Algorithm",
"BAD SLAM",
"Bundle Adjusted Direct RGB D SLAM",
"Training",
"Simultaneous Localization And Mapping",
"Benchmark Testing",
"Cameras",
"Real Time Systems",
"Trajectory",
"Synchronization",
"3 D From Multiview And Sensors",
"Datasets And Evaluation",
"RGBD Sensors And Analytics"
],
"authors": [
{
"affiliation": "ETH Zurich",
"fullName": "Thomas Schöps",
"givenName": "Thomas",
"surname": "Schöps",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chalmers Univ. of Technology",
"fullName": "Torsten Sattler",
"givenName": "Torsten",
"surname": "Sattler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ETH Zurich / Microsoft",
"fullName": "Marc Pollefeys",
"givenName": "Marc",
"surname": "Pollefeys",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-06-01T00:00:00",
"pubType": "proceedings",
"pages": "134-144",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3293-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "329300a124",
"articleId": "1gys5jEqTeM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "329300a145",
"articleId": "1gyrT1dzUOI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icarsc/2016/2255/0/07781972",
"title": "Histogram Based Visual Place Recognition for Improving SLAM Performance",
"doi": null,
"abstractUrl": "/proceedings-article/icarsc/2016/07781972/12OmNC1Y5kW",
"parentPublication": {
"id": "proceedings/icarsc/2016/2255/0",
"title": "2016 International Conference on Autonomous Robot Systems and Competitions (ICARSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a452",
"title": "Planes Detection for Robust Localization and Mapping in RGB-D SLAM Systems",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a452/12OmNqH9hdY",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbr-lars-r/2016/3656/0/07783496",
"title": "Object Subtraction Planar RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars-r/2016/07783496/12OmNxwENic",
"parentPublication": {
"id": "proceedings/sbr-lars-r/2016/3656/0",
"title": "2016 XIII Latin-American Robotics Symposium and IV Brazilian Robotics Symposium (LARS/SBR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipsn/2017/4890/0/07944826",
"title": "Poster Abstract: Improving RGB-D SLAM Using Wi-Fi",
"doi": null,
"abstractUrl": "/proceedings-article/ipsn/2017/07944826/12OmNxymodC",
"parentPublication": {
"id": "proceedings/ipsn/2017/4890/0",
"title": "2017 16th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477636",
"title": "CoRBS: Comprehensive RGB-D benchmark for SLAM using Kinect v2",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477636/12OmNzsJ7Hx",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/1/08005867",
"title": "Processed RGB-D Slam Using Open-Source Software",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005867/17D45XfSEUE",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/1",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a110",
"title": "HRPSlam: A Benchmark for RGB-D Dynamic SLAM and Humanoid Vision",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a110/18M7hZvA1JC",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991394",
"title": "Pseudo Depth Maps for RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09521742",
"title": "Linear RGB-D SLAM for Structured Environments",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09521742/1wkrmZrcdcQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icwcsg/2021/2598/0/259800a327",
"title": "Visual SLAM algorithm based on RGB-D",
"doi": null,
"abstractUrl": "/proceedings-article/icwcsg/2021/259800a327/1yQB8ogDPRm",
"parentPublication": {
"id": "proceedings/icwcsg/2021/2598/0",
"title": "2021 International Conference on Wireless Communications and Smart Grid (ICWCSG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlbuejvUI",
"doi": "10.1109/ICCV.2019.00595",
"title": "Learning Meshes for Dense Visual SLAM",
"normalizedTitle": "Learning Meshes for Dense Visual SLAM",
"abstract": "Estimating motion and surrounding geometry of a moving camera remains a challenging inference problem. From an information theoretic point of view, estimates should get better as more information is included, such as is done in dense SLAM, but this is strongly dependent on the validity of the underlying models. In the present paper, we use triangular meshes as both compact and dense geometry representation. To allow for simple and fast usage, we propose a view-based formulation for which we predict the in-plane vertex coordinates directly from images and then employ the remaining vertex depth components as free variables. Flexible and continuous integration of information is achieved through the use of a residual based inference technique. This so-called factor graph encodes all information as mapping from free variables to residuals, the squared sum of which is minimised during inference. We propose the use of different types of learnable residuals, which are trained end-to-end to increase their suitability as information bearing models and to enable accurate and reliable estimation. Detailed evaluation of all components is provided on both synthetic and real data which confirms the practicability of the presented approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Estimating motion and surrounding geometry of a moving camera remains a challenging inference problem. From an information theoretic point of view, estimates should get better as more information is included, such as is done in dense SLAM, but this is strongly dependent on the validity of the underlying models. In the present paper, we use triangular meshes as both compact and dense geometry representation. To allow for simple and fast usage, we propose a view-based formulation for which we predict the in-plane vertex coordinates directly from images and then employ the remaining vertex depth components as free variables. Flexible and continuous integration of information is achieved through the use of a residual based inference technique. This so-called factor graph encodes all information as mapping from free variables to residuals, the squared sum of which is minimised during inference. We propose the use of different types of learnable residuals, which are trained end-to-end to increase their suitability as information bearing models and to enable accurate and reliable estimation. Detailed evaluation of all components is provided on both synthetic and real data which confirms the practicability of the presented approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Estimating motion and surrounding geometry of a moving camera remains a challenging inference problem. From an information theoretic point of view, estimates should get better as more information is included, such as is done in dense SLAM, but this is strongly dependent on the validity of the underlying models. In the present paper, we use triangular meshes as both compact and dense geometry representation. To allow for simple and fast usage, we propose a view-based formulation for which we predict the in-plane vertex coordinates directly from images and then employ the remaining vertex depth components as free variables. Flexible and continuous integration of information is achieved through the use of a residual based inference technique. This so-called factor graph encodes all information as mapping from free variables to residuals, the squared sum of which is minimised during inference. We propose the use of different types of learnable residuals, which are trained end-to-end to increase their suitability as information bearing models and to enable accurate and reliable estimation. Detailed evaluation of all components is provided on both synthetic and real data which confirms the practicability of the presented approach.",
"fno": "480300f854",
"keywords": [
"Geometry",
"Graph Theory",
"Inference Mechanisms",
"Motion Estimation",
"SLAM Robots",
"Flexible Integration",
"Information Integration",
"Residual Based Inference Technique",
"Factor Graph",
"Squared Sum",
"Learnable Residuals",
"Moving Camera",
"Inference Problem",
"Dense SLAM",
"Underlying Models",
"Triangular Meshes",
"Geometry Representation",
"View Based Formulation",
"In Plane Vertex Coordinates",
"Mesh Learning",
"Dense Visual SLAM",
"Motion Estimation",
"Vertex Depth Components",
"Simultaneous Localization And Mapping",
"Geometry",
"Optimization",
"Visualization",
"Three Dimensional Displays",
"Neural Networks",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Deepmind",
"fullName": "Michael Bloesch",
"givenName": "Michael",
"surname": "Bloesch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Tristan Laidlow",
"givenName": "Tristan",
"surname": "Laidlow",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Ronald Clark",
"givenName": "Ronald",
"surname": "Clark",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Stefan Leutenegger",
"givenName": "Stefan",
"surname": "Leutenegger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Imperial College London",
"fullName": "Andrew Davison",
"givenName": "Andrew",
"surname": "Davison",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "5854-5863",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300f844",
"articleId": "1hQqwUi1kn6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300f864",
"articleId": "1hQqoQaNkBy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a018",
"title": "σ-DVO: Sensor Noise Model Meets Dense Visual Odometry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a018/12OmNCwUmxA",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948422",
"title": "Dense planar SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948422/12OmNx7G661",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457g565",
"title": "CNN-SLAM: Real-Time Dense Monocular SLAM with Learned Depth Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457g565/12OmNzTH0Qa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a185",
"title": "Relative Camera Refinement for Accurate Dense Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a185/12OmNzmtWvT",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007295",
"title": "Dense Visual SLAM with Probabilistic Surfel Map",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007295/13rRUNvgz4n",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000c560",
"title": "CodeSLAM - Learning a Compact, Optimisable Representation for Dense Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000c560/17D45VUZMVf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2776",
"title": "NICE-SLAM: Neural Implicit Scalable Encoding for SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2776/1H1jhZSaE0M",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d096",
"title": "Probabilistic Volumetric Fusion for Dense Monocular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d096/1L8qEHGGTlu",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asap/2019/1601/0/160100a083",
"title": "FPGA Architectures for Real-time Dense SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/asap/2019/160100a083/1d5kDsB2fdu",
"parentPublication": {
"id": "proceedings/asap/2019/1601/2160-052X",
"title": "2019 IEEE 30th International Conference on Application-specific Systems, Architectures and Processors (ASAP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a101",
"title": "Improved ORB-SLAM Based 3D Dense Reconstruction for Monocular Endoscopic Image",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a101/1vg8aYQPZi8",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yQB741Nims",
"title": "2021 International Conference on Wireless Communications and Smart Grid (ICWCSG)",
"acronym": "icwcsg",
"groupId": "1837684",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yQB8ogDPRm",
"doi": "10.1109/ICWCSG53609.2021.00071",
"title": "Visual SLAM algorithm based on RGB-D",
"normalizedTitle": "Visual SLAM algorithm based on RGB-D",
"abstract": "Aiming at the problem of poor real-time performance and low accuracy in visual simultaneous localization and mapping (SLAM), a visual SLAM algorithm based on RGB-D is designed. Firstly, it uses the improved oriented fast rotated brief (ORB) algorithm to extract feature points of RGB image; Secondly, the random sample consensus (RANSAC) method is employed for feature matching, and the iterative closest point (ICP) algorithm is applied to estimate camera pose according to the matched point pairs. In order to make localization more accurate, the closed-loop detection is added to reduce the cumulative error in localization process, and the general graph optimization(G20) is utilized to optimize the estimated camera pose graph to obtain the best global camera pose and motion trajectory; Finally, a 3D dense point cloud map is constructed. For FR1 data sets, the average root mean square error (RMSE) is 0.043m, the minimum RMSE is 0.029m, and the average image processing time per frame is 0.034s, which meets the requirements of visual SLAM.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Aiming at the problem of poor real-time performance and low accuracy in visual simultaneous localization and mapping (SLAM), a visual SLAM algorithm based on RGB-D is designed. Firstly, it uses the improved oriented fast rotated brief (ORB) algorithm to extract feature points of RGB image; Secondly, the random sample consensus (RANSAC) method is employed for feature matching, and the iterative closest point (ICP) algorithm is applied to estimate camera pose according to the matched point pairs. In order to make localization more accurate, the closed-loop detection is added to reduce the cumulative error in localization process, and the general graph optimization(G20) is utilized to optimize the estimated camera pose graph to obtain the best global camera pose and motion trajectory; Finally, a 3D dense point cloud map is constructed. For FR1 data sets, the average root mean square error (RMSE) is 0.043m, the minimum RMSE is 0.029m, and the average image processing time per frame is 0.034s, which meets the requirements of visual SLAM.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Aiming at the problem of poor real-time performance and low accuracy in visual simultaneous localization and mapping (SLAM), a visual SLAM algorithm based on RGB-D is designed. Firstly, it uses the improved oriented fast rotated brief (ORB) algorithm to extract feature points of RGB image; Secondly, the random sample consensus (RANSAC) method is employed for feature matching, and the iterative closest point (ICP) algorithm is applied to estimate camera pose according to the matched point pairs. In order to make localization more accurate, the closed-loop detection is added to reduce the cumulative error in localization process, and the general graph optimization(G20) is utilized to optimize the estimated camera pose graph to obtain the best global camera pose and motion trajectory; Finally, a 3D dense point cloud map is constructed. For FR1 data sets, the average root mean square error (RMSE) is 0.043m, the minimum RMSE is 0.029m, and the average image processing time per frame is 0.034s, which meets the requirements of visual SLAM.",
"fno": "259800a327",
"keywords": [
"Cameras",
"Chemical Technology",
"Feature Extraction",
"Image Colour Analysis",
"Image Matching",
"Image Registration",
"Image Sensors",
"Iterative Methods",
"Mean Square Error Methods",
"Mobile Robots",
"Pose Estimation",
"Robot Vision",
"SLAM Robots",
"Visual SLAM Algorithm",
"RGB D",
"Real Time Performance",
"Visual Simultaneous Localization",
"Improved Oriented Fast Rotated Brief Algorithm",
"Feature Points",
"RGB Image",
"Random Sample Consensus Method",
"Feature Matching",
"Iterative Closest Point Algorithm",
"Matched Point Pairs",
"Localization Process",
"General Graph Optimization",
"Estimated Camera",
"Global Camera Pose",
"3 D Dense Point Cloud Map",
"Average Image Processing Time",
"Size 0 043 M",
"Size 0 029 M",
"Time 0 034 S",
"Location Awareness",
"Wireless Communication",
"Visualization",
"Simultaneous Localization And Mapping",
"Three Dimensional Displays",
"Feature Extraction",
"Cameras",
"RGB D",
"Visual SLAM",
"Feature Extraction",
"Pose Estimation"
],
"authors": [
{
"affiliation": "Lanzhou JiaoTong University,School of Electronic and Information Engineering,Lanzhou,China",
"fullName": "Dong Shen",
"givenName": "Dong",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lanzhou JiaoTong University,School of Electronic and Information Engineering,Lanzhou,China",
"fullName": "Haoyu Fang",
"givenName": "Haoyu",
"surname": "Fang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lanzhou JiaoTong University,School of Electronic and Information Engineering,Lanzhou,China",
"fullName": "Jiale Liu",
"givenName": "Jiale",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lanzhou JiaoTong University,School of Electronic and Information Engineering,Lanzhou,China",
"fullName": "Sheng Guo",
"givenName": "Sheng",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icwcsg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-08-01T00:00:00",
"pubType": "proceedings",
"pages": "327-330",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2598-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "259800a321",
"articleId": "1yQBdR9HWYU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "259800a331",
"articleId": "1yQBmLmTvQA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a227",
"title": "A Fast Feature Tracking Algorithm for Visual Odometry and Mapping Based on RGB-D Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a227/12OmNxbEtOb",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a148",
"title": "Fusion of Inertial and Visual Measurements for RGB-D SLAM on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a148/12OmNy7h36Q",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09866716",
"title": "Edge Assisted Mobile Semantic Visual SLAM",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09866716/1G6fkNkibbq",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdiime/2022/9009/0/900900a157",
"title": "Research of Visual SLAM in Dynamic Environment using Convolutional Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/icdiime/2022/900900a157/1Iz536ZwFBm",
"parentPublication": {
"id": "proceedings/icdiime/2022/9009/0",
"title": "2022 International Conference on 3D Immersion, Interaction and Multi-sensory Experiences (ICDIIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a720",
"title": "OA-SLAM: Leveraging Objects for Camera Relocalization in Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a720/1JrRdfQyove",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991394",
"title": "Pseudo Depth Maps for RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a959",
"title": "FC-vSLAM: Integrating Feature Credibility in Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a959/1qyxjlMBUYw",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoias/2021/4195/0/419500a169",
"title": "Dynamic Objects Recognizing and Masking for RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icoias/2021/419500a169/1wG6epjTGTK",
"parentPublication": {
"id": "proceedings/icoias/2021/4195/0",
"title": "2021 4th International Conference on Intelligent Autonomous Systems (ICoIAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09521742",
"title": "Linear RGB-D SLAM for Structured Environments",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09521742/1wkrmZrcdcQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxE2mWh",
"title": "2013 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAgGwdn",
"doi": "10.1109/VR.2013.6549351",
"title": "An advanced interaction framework for augmented reality based exposure treatment",
"normalizedTitle": "An advanced interaction framework for augmented reality based exposure treatment",
"abstract": "In this paper we present a novel interaction framework for augmented reality, and demonstrate its application in an interactive AR exposure treatment system for the fear of spiders. We use data from the Microsoft Kinect to track and model real world objects in the AR environment, enabling realistic interaction between them and virtual content. Objects are tracked in three dimensions using the Iterative Closest Point algorithm and a point cloud model of the objects is incrementally developed. The approximate motion and shape of each object in the scene serve as inputs to the AR application. Very few restrictions are placed on the types of objects that can be used. In particular, we do not require objects to be marked in a certain way in order to be recognized, facilitating natural interaction. To demonstrate our interaction framework we present an AR exposure treatment system where virtual spiders can walk up, around, or behind real objects and can be carried, prodded and occluded by the user. We also discuss improvements we are making to the interaction framework and its potential for use in other applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a novel interaction framework for augmented reality, and demonstrate its application in an interactive AR exposure treatment system for the fear of spiders. We use data from the Microsoft Kinect to track and model real world objects in the AR environment, enabling realistic interaction between them and virtual content. Objects are tracked in three dimensions using the Iterative Closest Point algorithm and a point cloud model of the objects is incrementally developed. The approximate motion and shape of each object in the scene serve as inputs to the AR application. Very few restrictions are placed on the types of objects that can be used. In particular, we do not require objects to be marked in a certain way in order to be recognized, facilitating natural interaction. To demonstrate our interaction framework we present an AR exposure treatment system where virtual spiders can walk up, around, or behind real objects and can be carried, prodded and occluded by the user. We also discuss improvements we are making to the interaction framework and its potential for use in other applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a novel interaction framework for augmented reality, and demonstrate its application in an interactive AR exposure treatment system for the fear of spiders. We use data from the Microsoft Kinect to track and model real world objects in the AR environment, enabling realistic interaction between them and virtual content. Objects are tracked in three dimensions using the Iterative Closest Point algorithm and a point cloud model of the objects is incrementally developed. The approximate motion and shape of each object in the scene serve as inputs to the AR application. Very few restrictions are placed on the types of objects that can be used. In particular, we do not require objects to be marked in a certain way in order to be recognized, facilitating natural interaction. To demonstrate our interaction framework we present an AR exposure treatment system where virtual spiders can walk up, around, or behind real objects and can be carried, prodded and occluded by the user. We also discuss improvements we are making to the interaction framework and its potential for use in other applications.",
"fno": "06549351",
"keywords": [
"Augmented Reality",
"Three Dimensional Displays",
"Iterative Closest Point Algorithm",
"Tracking",
"Cameras",
"Real Time Systems",
"Computational Modeling",
"Exposure Treatment",
"Augmented Reality",
"3 D Interaction",
"Kinect",
"Environment Awareness"
],
"authors": [
{
"affiliation": "Human Interface Technol. Lab. NZ, Univ. of Canterbury, Christchurch, New Zealand",
"fullName": "Sam Corbett-Davies",
"givenName": "Sam",
"surname": "Corbett-Davies",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human Interface Technol. Lab. NZ, Univ. of Canterbury, Christchurch, New Zealand",
"fullName": "Andreas Dunser",
"givenName": "Andreas",
"surname": "Dunser",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Univ. of Canterbury, Christchurch, New Zealand",
"fullName": "Richard Green",
"givenName": "Richard",
"surname": "Green",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human Interface Technol. Lab. NZ, Univ. of Canterbury, Christchurch, New Zealand",
"fullName": "Adrian Clark",
"givenName": "Adrian",
"surname": "Clark",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-03-01T00:00:00",
"pubType": "proceedings",
"pages": "19-22",
"year": "2013",
"issn": "1087-8270",
"isbn": "978-1-4673-4795-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06549350",
"articleId": "12OmNzb7Znq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06549352",
"articleId": "12OmNzmclA3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671841",
"title": "Markerless 3D gesture-based interaction for handheld Augmented Reality interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671841/12OmNAIvcZU",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a153",
"title": "An Implementation Review of Occlusion-Based Interaction in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a153/12OmNB7cjly",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a084",
"title": "[POSTER] Natural 3D Interaction Using a See-Through Mobile AR System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a084/12OmNCcbE15",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2013/5009/0/5009a439",
"title": "VECAR: Virtual English Classroom with Markerless Augmented Reality and Intuitive Gesture Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a439/12OmNwJPMVw",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2009/3890/0/3890a187",
"title": "Dual Face Interaction in Handheld Augmented Reality Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2009/3890a187/12OmNxGj9VX",
"parentPublication": {
"id": "proceedings/ism/2009/3890/0",
"title": "2009 11th IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06483998",
"title": "An interactive Augmented Reality system for exposure treatment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06483998/12OmNzWfp9s",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504721",
"title": "Space-sharing AR interaction on multiple mobile devices with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504721/12OmNzZ5ogy",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/06/mcg2005060031",
"title": "Using Augmented Reality to Treat Phobias",
"doi": null,
"abstractUrl": "/magazine/cg/2005/06/mcg2005060031/13rRUzpzeEi",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699326",
"title": "Evaluation of Direct Manipulation Methods in Augmented Reality Environments Using Google Glass",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699326/19F1Oa8ukP6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a306",
"title": "Augmented Reality Narratives for Post-Traumatic Stress Disorder Treatment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a306/1pBMfCveZaw",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAsTgXc",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIzh3f",
"doi": "10.1109/ICCVW.2011.6130357",
"title": "Real-time multi-person tracking with detector assisted structure propagation",
"normalizedTitle": "Real-time multi-person tracking with detector assisted structure propagation",
"abstract": "Classical tracking-by-detection approaches require a robust object detector that needs to be executed in each frame. However the detector is typically the most computationally expensive component, especially if more than one object class needs to be detected. In this paper we investigate how the usage of the object detector can be reduced by using stereo range data for following detected objects over time. To this end we propose a hybrid tracking framework consisting of a stereo based ICP (Iterative Closest Point) tracker and a high-level multi-hypothesis tracker. Initiated by a detector response, the ICP tracker follows individual pedestrians over time using just the raw depth information. Its output is then fed into the high-level tracker that is responsible for solving long-term data association and occlusion handling. In addition, we propose to constrain the detector to run only on some small regions of interest (ROIs) that are extracted from a 3D depth based occupancy map of the scene. The ROIs are tracked over time and only newly appearing ROIs are evaluated by the detector. We present experiments on real stereo sequences recorded from a moving camera setup in urban scenarios and show that our proposed approach achieves state of the art performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Classical tracking-by-detection approaches require a robust object detector that needs to be executed in each frame. However the detector is typically the most computationally expensive component, especially if more than one object class needs to be detected. In this paper we investigate how the usage of the object detector can be reduced by using stereo range data for following detected objects over time. To this end we propose a hybrid tracking framework consisting of a stereo based ICP (Iterative Closest Point) tracker and a high-level multi-hypothesis tracker. Initiated by a detector response, the ICP tracker follows individual pedestrians over time using just the raw depth information. Its output is then fed into the high-level tracker that is responsible for solving long-term data association and occlusion handling. In addition, we propose to constrain the detector to run only on some small regions of interest (ROIs) that are extracted from a 3D depth based occupancy map of the scene. The ROIs are tracked over time and only newly appearing ROIs are evaluated by the detector. We present experiments on real stereo sequences recorded from a moving camera setup in urban scenarios and show that our proposed approach achieves state of the art performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Classical tracking-by-detection approaches require a robust object detector that needs to be executed in each frame. However the detector is typically the most computationally expensive component, especially if more than one object class needs to be detected. In this paper we investigate how the usage of the object detector can be reduced by using stereo range data for following detected objects over time. To this end we propose a hybrid tracking framework consisting of a stereo based ICP (Iterative Closest Point) tracker and a high-level multi-hypothesis tracker. Initiated by a detector response, the ICP tracker follows individual pedestrians over time using just the raw depth information. Its output is then fed into the high-level tracker that is responsible for solving long-term data association and occlusion handling. In addition, we propose to constrain the detector to run only on some small regions of interest (ROIs) that are extracted from a 3D depth based occupancy map of the scene. The ROIs are tracked over time and only newly appearing ROIs are evaluated by the detector. We present experiments on real stereo sequences recorded from a moving camera setup in urban scenarios and show that our proposed approach achieves state of the art performance.",
"fno": "06130357",
"keywords": [
"Feature Extraction",
"Hidden Feature Removal",
"Image Sequences",
"Iterative Methods",
"Object Detection",
"Stereo Image Processing",
"Multiperson Tracking",
"Detector Assisted Structure Propagation",
"Tracking By Detection Approach",
"Object Detector",
"Stereo Range Data",
"Stereo Based ICP Tracker",
"Iterative Closest Point",
"Multihypothesis Tracker",
"Data Association",
"Occlusion Handling",
"Regions Of Interest Extraction",
"3 D Depth Based Occupancy Map",
"Stereo Sequence",
"Detectors",
"Iterative Closest Point Algorithm",
"Trajectory",
"Three Dimensional Displays",
"Tracking",
"Cameras",
"Kalman Filters"
],
"authors": [
{
"affiliation": "UMIC Research Centre, RWTH Aachen University, Germany",
"fullName": "Dennis Mitzel",
"givenName": "Dennis",
"surname": "Mitzel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UMIC Research Centre, RWTH Aachen University, Germany",
"fullName": "Bastian Leibe",
"givenName": "Bastian",
"surname": "Leibe",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-11-01T00:00:00",
"pubType": "proceedings",
"pages": "974-981",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0063-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06130356",
"articleId": "12OmNzYwceb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06130358",
"articleId": "12OmNyrqztk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hpcc/2016/4297/0/07828526",
"title": "Registration of Low Cost Maps within Large Scale MMS Maps",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc/2016/07828526/12OmNApLGsF",
"parentPublication": {
"id": "proceedings/hpcc/2016/4297/0",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a963",
"title": "Facial Landmark Tracking by Tree-Based Deformable Part Model Based Detector",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a963/12OmNBDyAaQ",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmip/2017/5954/0/5954a058",
"title": "A Log-Polar Feature Guided Iterative Closest Point Method for Image Registration",
"doi": null,
"abstractUrl": "/proceedings-article/icmip/2017/5954a058/12OmNBvkdlJ",
"parentPublication": {
"id": "proceedings/icmip/2017/5954/0",
"title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a350",
"title": "Recognizing Occluded 3D Faces Using an Efficient ICP Variant",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a350/12OmNqHqSAK",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761507",
"title": "Registration by using a pseudo color attribute",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761507/12OmNxjBfnz",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2009/4442/0/05457442",
"title": "A high speed iterative closest point tracker on an FPGA platform",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2009/05457442/12OmNy4r3Ob",
"parentPublication": {
"id": "proceedings/iccvw/2009/4442/0",
"title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecmsm/2015/6972/0/07208683",
"title": "FAST ICP-SLAM for a bi-steerable mobile robot in large environments",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2015/07208683/12OmNz2C1rh",
"parentPublication": {
"id": "proceedings/ecmsm/2015/6972/0",
"title": "2015 IEEE International Workshop of Electronics, Control, Measurement, Signals and their application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/11/07368945",
"title": "Go-ICP: A Globally Optimal Solution to 3D ICP Point-Set Registration",
"doi": null,
"abstractUrl": "/journal/tp/2016/11/07368945/13rRUwfZC1L",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a407",
"title": "An Improved ICP Point Cloud Registration Algorithm Based on Three-Points Congruent Sets",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a407/1BzTJDeh3Ms",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/07/09336308",
"title": "Fast and Robust Iterative Closest Point",
"doi": null,
"abstractUrl": "/journal/tp/2022/07/09336308/1qHLSa3LUM8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy9Prj1",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrY3LA8",
"doi": "10.1109/ICCVW.2017.82",
"title": "Real-Time Hand Tracking Under Occlusion from an Egocentric RGB-D Sensor",
"normalizedTitle": "Real-Time Hand Tracking Under Occlusion from an Egocentric RGB-D Sensor",
"abstract": "We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints-common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints-common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints-common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.",
"fno": "1034b284",
"keywords": [
"Three Dimensional Displays",
"Joints",
"Cameras",
"Kinematics",
"Real Time Systems",
"Tracking",
"Clutter"
],
"authors": [
{
"affiliation": null,
"fullName": "Franziska Mueller",
"givenName": "Franziska",
"surname": "Mueller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dushyant Mehta",
"givenName": "Dushyant",
"surname": "Mehta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Oleksandr Sotnychenko",
"givenName": "Oleksandr",
"surname": "Sotnychenko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Srinath Sridhar",
"givenName": "Srinath",
"surname": "Sridhar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Casas",
"givenName": "Dan",
"surname": "Casas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Theobalt",
"givenName": "Christian",
"surname": "Theobalt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1284-1293",
"year": "2017",
"issn": "2473-9944",
"isbn": "978-1-5386-1034-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1034b274",
"articleId": "12OmNC4eSDU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1034b294",
"articleId": "12OmNqGiuaa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391b868",
"title": "Depth-Based Hand Pose Estimation: Data, Methods, and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b868/12OmNAgY7lR",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b949",
"title": "Lending A Hand: Detecting Hands and Recognizing Activities in Complex Egocentric Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b949/12OmNAm4TJU",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b163",
"title": "Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b163/12OmNwJybPZ",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a493",
"title": "Single-Frame Indexing for 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a493/12OmNxWcHnV",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177448",
"title": "Egocentric hand pose estimation and distance recovery in a single RGB image",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177448/12OmNyuy9Ro",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a705",
"title": "Real-Time Hand Pose Estimation from RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a705/12OmNzlly47",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a409",
"title": "First-Person Hand Action Benchmark with RGB-D Videos and 3D Hand Pose Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a409/17D45WK5Amg",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i417",
"title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a040",
"title": "A Robust Method for Hands Gesture Recognition from Egocentric Depth Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a040/1a3x7tWsXYI",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09444887",
"title": "UNOC: Understanding Occlusion for Embodied Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09444887/1u51yNn52s8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwJybPZ",
"doi": "10.1109/ICCV.2017.131",
"title": "Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor",
"normalizedTitle": "Real-Time Hand Tracking under Occlusion from an Egocentric RGB-D Sensor",
"abstract": "We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints-common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints-common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an approach for real-time, robust and accurate hand pose estimation from moving egocentric RGB-D cameras in cluttered real environments. Existing methods typically fail for hand-object interactions in cluttered scenes imaged from egocentric viewpoints-common for virtual or augmented reality applications. Our approach uses two subsequently applied Convolutional Neural Networks (CNNs) to localize the hand and regress 3D joint locations. Hand localization is achieved by using a CNN to estimate the 2D position of the hand center in the input, even in the presence of clutter and occlusions. The localized hand position, together with the corresponding input depth value, is used to generate a normalized cropped image that is fed into a second CNN to regress relative 3D hand joint locations in real time. For added accuracy, robustness and temporal stability, we refine the pose estimates using a kinematic pose tracking energy. To train the CNNs, we introduce a new photorealistic dataset that uses a merged reality approach to capture and synthesize large amounts of annotated data of natural hand interaction in cluttered scenes. Through quantitative and qualitative evaluation, we show that our method is robust to self-occlusion and occlusions by objects, particularly in moving egocentric perspectives.",
"fno": "1032b163",
"keywords": [
"Augmented Reality",
"Cameras",
"Gesture Recognition",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Segmentation",
"Image Sensors",
"Neural Nets",
"Pose Estimation",
"Tracking",
"Virtual Reality",
"Kinematic Pose Tracking Energy",
"CNN",
"Natural Hand Interaction",
"Cluttered Scenes",
"Egocentric Perspectives",
"Egocentric RGB D Sensor",
"Egocentric RGB D Cameras",
"Cluttered Real Environments",
"Hand Object Interactions",
"Egocentric Viewpoints",
"Virtual Reality Applications",
"Augmented Reality Applications",
"Hand Localization",
"Normalized Cropped Image",
"Relative 3 D Hand Joint Locations",
"Temporal Stability",
"Convolutional Neural Networks",
"Three Dimensional Displays",
"Joints",
"Cameras",
"Kinematics",
"Real Time Systems",
"Tracking",
"Clutter"
],
"authors": [
{
"affiliation": null,
"fullName": "Franziska Mueller",
"givenName": "Franziska",
"surname": "Mueller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dushyant Mehta",
"givenName": "Dushyant",
"surname": "Mehta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Oleksandr Sotnychenko",
"givenName": "Oleksandr",
"surname": "Sotnychenko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Srinath Sridhar",
"givenName": "Srinath",
"surname": "Sridhar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Casas",
"givenName": "Dan",
"surname": "Casas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Theobalt",
"givenName": "Christian",
"surname": "Theobalt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1163-1172",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032b153",
"articleId": "12OmNBuL1c2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032b173",
"articleId": "12OmNzGDsNa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391b868",
"title": "Depth-Based Hand Pose Estimation: Data, Methods, and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b868/12OmNAgY7lR",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b949",
"title": "Lending A Hand: Detecting Hands and Recognizing Activities in Complex Egocentric Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b949/12OmNAm4TJU",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034b284",
"title": "Real-Time Hand Tracking Under Occlusion from an Egocentric RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034b284/12OmNrY3LA8",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a493",
"title": "Single-Frame Indexing for 3D Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a493/12OmNxWcHnV",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177448",
"title": "Egocentric hand pose estimation and distance recovery in a single RGB image",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177448/12OmNyuy9Ro",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a705",
"title": "Real-Time Hand Pose Estimation from RGB-D Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a705/12OmNzlly47",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a409",
"title": "First-Person Hand Action Benchmark with RGB-D Videos and 3D Hand Pose Annotations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a409/17D45WK5Amg",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i417",
"title": "Hand PointNet: 3D Hand Pose Estimation Using Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i417/17D45XERmmi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2018/8497/0/849700a040",
"title": "A Robust Method for Hands Gesture Recognition from Egocentric Depth Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2018/849700a040/1a3x7tWsXYI",
"parentPublication": {
"id": "proceedings/icvrv/2018/8497/0",
"title": "2018 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09444887",
"title": "UNOC: Understanding Occlusion for Embodied Presence in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09444887/1u51yNn52s8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyKJicb",
"doi": "10.1109/ISMAR.2015.43",
"title": "[POSTER] Rubix: Dynamic Spatial Augmented Reality by Extraction of Plane Regions with a RGB-D Camera",
"normalizedTitle": "[POSTER] Rubix: Dynamic Spatial Augmented Reality by Extraction of Plane Regions with a RGB-D Camera",
"abstract": "Dynamic spatial augmented reality requires accurate real-time 3D pose information of the physical objects that are to be projected onto. Previous depth-based methods for tracking objects required strong features to enable recognition; making it difficult to estimate an accurate 6DOF pose for physical objects with a small set of recognizable features (such as a non-textured cube). We propose a more accurate method with fewer limitations for the pose estimation of a tangible object that has known planar faces and using depth data from an RGB-D camera only. In this paper, the physical object's shape is limited to cubes of different sizes. We apply this new tracking method to achieve dynamic projections onto these cubes. In our method, 3D points from an RGB-D camera are divided into a cluster of planar regions, and the point cloud inside each face of the object is fitted to an already-known geometric model of a cube. With the 6DOF pose of the physical object, SAR generated imagery is then projected correctly onto the physical object. The 6DOF tracking is designed to support tangible interactions with the physical object. We implemented example interactive applications with one or multiple cubes to show the capability of our method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dynamic spatial augmented reality requires accurate real-time 3D pose information of the physical objects that are to be projected onto. Previous depth-based methods for tracking objects required strong features to enable recognition; making it difficult to estimate an accurate 6DOF pose for physical objects with a small set of recognizable features (such as a non-textured cube). We propose a more accurate method with fewer limitations for the pose estimation of a tangible object that has known planar faces and using depth data from an RGB-D camera only. In this paper, the physical object's shape is limited to cubes of different sizes. We apply this new tracking method to achieve dynamic projections onto these cubes. In our method, 3D points from an RGB-D camera are divided into a cluster of planar regions, and the point cloud inside each face of the object is fitted to an already-known geometric model of a cube. With the 6DOF pose of the physical object, SAR generated imagery is then projected correctly onto the physical object. The 6DOF tracking is designed to support tangible interactions with the physical object. We implemented example interactive applications with one or multiple cubes to show the capability of our method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dynamic spatial augmented reality requires accurate real-time 3D pose information of the physical objects that are to be projected onto. Previous depth-based methods for tracking objects required strong features to enable recognition; making it difficult to estimate an accurate 6DOF pose for physical objects with a small set of recognizable features (such as a non-textured cube). We propose a more accurate method with fewer limitations for the pose estimation of a tangible object that has known planar faces and using depth data from an RGB-D camera only. In this paper, the physical object's shape is limited to cubes of different sizes. We apply this new tracking method to achieve dynamic projections onto these cubes. In our method, 3D points from an RGB-D camera are divided into a cluster of planar regions, and the point cloud inside each face of the object is fitted to an already-known geometric model of a cube. With the 6DOF pose of the physical object, SAR generated imagery is then projected correctly onto the physical object. The 6DOF tracking is designed to support tangible interactions with the physical object. We implemented example interactive applications with one or multiple cubes to show the capability of our method.",
"fno": "7660a148",
"keywords": [
"Three Dimensional Displays",
"Cameras",
"Iterative Closest Point Algorithm",
"Augmented Reality",
"Real Time Systems",
"Target Tracking",
"RGB D Camera",
"Spatial Augmented Reality",
"Six Degree Of Freedom Tracking"
],
"authors": [
{
"affiliation": null,
"fullName": "Masayuki Sano",
"givenName": "Masayuki",
"surname": "Sano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kazuki Matsumoto",
"givenName": "Kazuki",
"surname": "Matsumoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bruce H. Thomas",
"givenName": "Bruce H.",
"surname": "Thomas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hideo Saito",
"givenName": "Hideo",
"surname": "Saito",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "148-151",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a144",
"articleId": "12OmNBO3K9k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a152",
"articleId": "12OmNxYtucC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549351",
"title": "An advanced interaction framework for augmented reality based exposure treatment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549351/12OmNAgGwdn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2016/4065/0/07588815",
"title": "Surface Construction from Kinect RGB-D Stream",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2016/07588815/12OmNBKW9Ef",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2016/4065/0",
"title": "2016 IEEE 14th Intl Conf on Dependable, Autonomic and Secure Computing, 14th Intl Conf on Pervasive Intelligence and Computing, 2nd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c165",
"title": "Efficient and Accurate Registration of Point Clouds with Plane to Plane Correspondences",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c165/12OmNCf1DvT",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671777",
"title": "Real-time RGB-D camera relocalization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671777/12OmNqEAT3B",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948421",
"title": "Sticky projections — A new approach to interactive shader lamp tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948421/12OmNwkzupV",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c459",
"title": "RGB-D Multi-view System Calibration for Full 3D Scene Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c459/12OmNx4Q6Bg",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2012/0430/0/06386413",
"title": "6DOF pose estimation using 2D-3D sensor fusion",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386413/12OmNxEjXUB",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460968",
"title": "Plane based multi camera calibration under unknown correspondence using ICP-like approach",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460968/12OmNz5JBUr",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/03/07138633",
"title": "Sticky Projections-A Model-Based Approach to Interactive Shader Lamps Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2016/03/07138633/13rRUxly8XI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gc/2019/4129/0/412900a068",
"title": "An Object-Pose Estimation Acceleration Technique for Picking Robot Applications by Using Graph-Reusing k-NN Search",
"doi": null,
"abstractUrl": "/proceedings-article/gc/2019/412900a068/1ifeCmnEwzm",
"parentPublication": {
"id": "proceedings/gc/2019/4129/0",
"title": "2019 First International Conference on Graph Computing (GC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNy4r3R2",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyugz2W",
"doi": "10.1109/CVPR.2009.5206776",
"title": "Linear solution to scale and rotation invariant object matching",
"normalizedTitle": "Linear solution to scale and rotation invariant object matching",
"abstract": "Images of an object undergoing ego- or camera-motion often appear to be scaled, rotated, and deformed versions of each other. To detect and match such distorted patterns to a single sample view of the object requires solving a hard computational problem that has eluded most object matching methods. We propose a linear formulation that simultaneously finds feature point correspondences and global geometrical transformations in a constrained solution space. Further reducing the search space based on the lower convex hull property of the formulation, our method scales well with the number of candidate features. Our results on a variety of images and videos demonstrate that our method is accurate, efficient, and robust over local deformation, occlusion, clutter, and large geometrical transformations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Images of an object undergoing ego- or camera-motion often appear to be scaled, rotated, and deformed versions of each other. To detect and match such distorted patterns to a single sample view of the object requires solving a hard computational problem that has eluded most object matching methods. We propose a linear formulation that simultaneously finds feature point correspondences and global geometrical transformations in a constrained solution space. Further reducing the search space based on the lower convex hull property of the formulation, our method scales well with the number of candidate features. Our results on a variety of images and videos demonstrate that our method is accurate, efficient, and robust over local deformation, occlusion, clutter, and large geometrical transformations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Images of an object undergoing ego- or camera-motion often appear to be scaled, rotated, and deformed versions of each other. To detect and match such distorted patterns to a single sample view of the object requires solving a hard computational problem that has eluded most object matching methods. We propose a linear formulation that simultaneously finds feature point correspondences and global geometrical transformations in a constrained solution space. Further reducing the search space based on the lower convex hull property of the formulation, our method scales well with the number of candidate features. Our results on a variety of images and videos demonstrate that our method is accurate, efficient, and robust over local deformation, occlusion, clutter, and large geometrical transformations.",
"fno": "05206776",
"keywords": [
"Cameras",
"Image Matching",
"Image Motion Analysis",
"Object Detection",
"Rotation Invariant Object Matching Method",
"Camera Motion",
"Object Detection",
"Linear Formulation",
"Feature Point Correspondence",
"Global Geometrical Transformation",
"Search Space",
"Convex Hull Property",
"Shape",
"Noise Robustness",
"Animation",
"Iterative Closest Point Algorithm",
"Art",
"Noise Shaping",
"Uncertainty",
"Calibration",
"Deformable Models",
"Spatial Resolution"
],
"authors": [
{
"affiliation": "Computer Science Department, Boston College, Chestnut Hill, MA 02467, USA",
"fullName": "Hao Jiang",
"givenName": "Hao",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Science Department, Boston College, Chestnut Hill, MA 02467, USA",
"fullName": "Stella X. Yu",
"givenName": "Stella X.",
"surname": "Yu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2474-2481",
"year": "2009",
"issn": "1063-6919",
"isbn": "978-1-4244-3992-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05206775",
"articleId": "12OmNzmtWHo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05206773",
"articleId": "12OmNBpEeUa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2006/2521/3/252130336",
"title": "Rotation-Invariant Neoperceptron",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252130336/12OmNASraWg",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/3",
"title": "2006 18th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995580",
"title": "Scale and rotation invariant matching using linearly augmented trees",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995580/12OmNApu5M7",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1988/0878/0/00028486",
"title": "Optical pattern recognition using correlation procedure for rotation- and scale-invariant feature extraction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1988/00028486/12OmNArthbm",
"parentPublication": {
"id": "proceedings/icpr/1988/0878/0",
"title": "9th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/019P1A19",
"title": "Scale resilient, rotation invariant articulated object matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/019P1A19/12OmNwe2Ipo",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139712",
"title": "Constrained deformable superquadrics and nonrigid motion tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139712/12OmNxzMnPf",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761510",
"title": "Object localization using affine invariant substructure constraints.",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761510/12OmNyvY9ra",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1998/8821/1/882110156",
"title": "Rotation-invariant 3D reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1998/882110156/12OmNzV70zx",
"parentPublication": {
"id": "proceedings/icip/1998/8821/1",
"title": "Proceedings of IPCIP'98 International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206775",
"title": "Isometric registration of ambiguous and partial data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206775/12OmNzmtWHo",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/07/ttp2011071339",
"title": "Linear Scale and Rotation Invariant Matching",
"doi": null,
"abstractUrl": "/journal/tp/2011/07/ttp2011071339/13rRUB6Sq1A",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/12/07054480",
"title": "Scale and Rotation Invariant Matching Using Linearly Augmented Trees",
"doi": null,
"abstractUrl": "/journal/tp/2015/12/07054480/13rRUwIF6eY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3ne124gRa",
"doi": "10.1109/CVPR42600.2020.00101",
"title": "Scene Recomposition by Learning-Based ICP",
"normalizedTitle": "Scene Recomposition by Learning-Based ICP",
"abstract": "By moving a depth sensor around a room, we compute a 3D CAD model of the environment, capturing the room shape and contents such as chairs, desks, sofas, and tables. Rather than reconstructing geometry, we match, place, and align each object in the scene to thousands of CAD models of objects. In addition to the fully automatic system, the key technical contribution is a novel approach for aligning CAD models to 3D scans, based on deep reinforcement learning. This approach, which we call Learning-based ICP, outperforms prior ICP methods in the literature, by learning the best points to match and conditioning on object viewpoint. LICP learns to align using only synthetic data and does not require ground truth annotation of object pose or keypoint pair matching in real scene scans. While LICP is trained on synthetic data and without 3D real scene annotations, it outperforms both learned local deep feature matching and geometric based alignment methods in real scenes. The proposed method is evaluated on real scenes datasets of SceneNN and ScanNet as well as synthetic scenes of SUNCG. High quality results are demonstrated on a range of real world scenes, with robustness to clutter, viewpoint, and occlusion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "By moving a depth sensor around a room, we compute a 3D CAD model of the environment, capturing the room shape and contents such as chairs, desks, sofas, and tables. Rather than reconstructing geometry, we match, place, and align each object in the scene to thousands of CAD models of objects. In addition to the fully automatic system, the key technical contribution is a novel approach for aligning CAD models to 3D scans, based on deep reinforcement learning. This approach, which we call Learning-based ICP, outperforms prior ICP methods in the literature, by learning the best points to match and conditioning on object viewpoint. LICP learns to align using only synthetic data and does not require ground truth annotation of object pose or keypoint pair matching in real scene scans. While LICP is trained on synthetic data and without 3D real scene annotations, it outperforms both learned local deep feature matching and geometric based alignment methods in real scenes. The proposed method is evaluated on real scenes datasets of SceneNN and ScanNet as well as synthetic scenes of SUNCG. High quality results are demonstrated on a range of real world scenes, with robustness to clutter, viewpoint, and occlusion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "By moving a depth sensor around a room, we compute a 3D CAD model of the environment, capturing the room shape and contents such as chairs, desks, sofas, and tables. Rather than reconstructing geometry, we match, place, and align each object in the scene to thousands of CAD models of objects. In addition to the fully automatic system, the key technical contribution is a novel approach for aligning CAD models to 3D scans, based on deep reinforcement learning. This approach, which we call Learning-based ICP, outperforms prior ICP methods in the literature, by learning the best points to match and conditioning on object viewpoint. LICP learns to align using only synthetic data and does not require ground truth annotation of object pose or keypoint pair matching in real scene scans. While LICP is trained on synthetic data and without 3D real scene annotations, it outperforms both learned local deep feature matching and geometric based alignment methods in real scenes. The proposed method is evaluated on real scenes datasets of SceneNN and ScanNet as well as synthetic scenes of SUNCG. High quality results are demonstrated on a range of real world scenes, with robustness to clutter, viewpoint, and occlusion.",
"fno": "716800a927",
"keywords": [
"CAD",
"Computer Vision",
"Feature Extraction",
"Image Matching",
"Iterative Methods",
"Learning Artificial Intelligence",
"Solid Modelling",
"Iterative Closest Point Algorithm",
"Scene NN Real Scene Datasets",
"Learned Local Deep Feature Matching",
"Geometric Based Alignment Methods",
"Synthetic Scenes",
"Scene Recomposition",
"Depth Sensor",
"3 D CAD Model",
"Room Shape",
"Fully Automatic System",
"Deep Reinforcement Learning",
"ICP Methods",
"Object Viewpoint",
"Synthetic Data",
"Real Scene Scans",
"Geometry Reconstruction",
"Learning Based ICP",
"LICP",
"Scan Net Real Scene Datasets",
"Three Dimensional Displays",
"Solid Modeling",
"Shape",
"Iterative Closest Point Algorithm",
"Geometry",
"Computational Modeling",
"Cameras"
],
"authors": [
{
"affiliation": "University of Washington",
"fullName": "Hamid Izadinia",
"givenName": "Hamid",
"surname": "Izadinia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Washington",
"fullName": "Steven M. Seitz",
"givenName": "Steven M.",
"surname": "Seitz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "927-936",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800a916",
"articleId": "1m3nH4ehlvi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800a937",
"articleId": "1m3nzh4rzwI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2013/3022/0/3022a307",
"title": "Single-View RGBD-Based Reconstruction of Dynamic Human Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a307/12OmNAP1Z1Q",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460066",
"title": "Online ICP forecast for patients with traumatic brain injury",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460066/12OmNBC8At9",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecmsm/2013/6298/0/06648973",
"title": "ICP-SLAM methods implementation on a bi-steerable mobile robot",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2013/06648973/12OmNBVIUA9",
"parentPublication": {
"id": "proceedings/ecmsm/2013/6298/0",
"title": "2013 IEEE 11th International Workshop of Electronics, Control, Measurement, Signals and their application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a350",
"title": "Recognizing Occluded 3D Faces Using an Efficient ICP Variant",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a350/12OmNqHqSAK",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a039",
"title": "Simultaneous Scene Reconstruction and Auto-Calibration Using Constrained Iterative Closest Point for 3D Depth Sensor Array",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a039/12OmNvDqsSA",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284894",
"title": "ICP with Bounded Scale for Registration of M-D Point Sets",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284894/12OmNvSbBmC",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecmsm/2015/6972/0/07208683",
"title": "FAST ICP-SLAM for a bi-steerable mobile robot in large environments",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2015/07208683/12OmNz2C1rh",
"parentPublication": {
"id": "proceedings/ecmsm/2015/6972/0",
"title": "2015 IEEE International Workshop of Electronics, Control, Measurement, Signals and their application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/11/07368945",
"title": "Go-ICP: A Globally Optimal Solution to 3D ICP Point-Set Registration",
"doi": null,
"abstractUrl": "/journal/tp/2016/11/07368945/13rRUwfZC1L",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/2006/2720/0/04031352",
"title": "Worst-case and Smoothed Analysis of the ICP Algorithm, with an Application to the k-means Method",
"doi": null,
"abstractUrl": "/proceedings-article/focs/2006/04031352/14jQfQ20k5w",
"parentPublication": {
"id": "proceedings/focs/2006/2720/0",
"title": "2006 47th Annual IEEE Symposium on Foundations of Computer Science (FOCS'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a407",
"title": "An Improved ICP Point Cloud Registration Algorithm Based on Three-Points Congruent Sets",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a407/1BzTJDeh3Ms",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.