data dict |
|---|
{
"proceeding": {
"id": "12OmNvRU0m1",
"title": "2009 IEEE International Symposium on Modeling, Analysis & Simulation of Computer and Telecommunication Systems",
"acronym": "mascots",
"groupId": "1000469",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCdBDOO",
"doi": "10.1109/MASCOT.2009.5366652",
"title": "Policies for probe-wear leveling in MEMS-based storage devices",
"normalizedTitle": "Policies for probe-wear leveling in MEMS-based storage devices",
"abstract": "Probes (or read/write heads) in MEMS-based storage devices are susceptible to wear. We study probe wear, and analyze the causes of probe uneven wear. We show that under real-world traces some probes can wear one order of magnitude faster than other probes leading to premature expiry of some probes. Premature expiry has severe consequences for the reliability, timing performance, energy-efficiency, and the lifetime of MEMS-based storage devices. Therefore, wear-leveling is a must to preclude premature expiry. We discuss how probe wear in MEMS-based storage is different from medium wear in Flash, calling for a different treatment. We present three policies to level probe wear. By simulation against three real-world traces, our work shows that an inevitable trade-off exists between lifetime, timing performance, and energy efficiency. The policies differ in the size of the trade-off. One of the policies maximizes the lifetime, so that it is optimal; and the other two are less optimal, and are used based on the configuration of the device.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Probes (or read/write heads) in MEMS-based storage devices are susceptible to wear. We study probe wear, and analyze the causes of probe uneven wear. We show that under real-world traces some probes can wear one order of magnitude faster than other probes leading to premature expiry of some probes. Premature expiry has severe consequences for the reliability, timing performance, energy-efficiency, and the lifetime of MEMS-based storage devices. Therefore, wear-leveling is a must to preclude premature expiry. We discuss how probe wear in MEMS-based storage is different from medium wear in Flash, calling for a different treatment. We present three policies to level probe wear. By simulation against three real-world traces, our work shows that an inevitable trade-off exists between lifetime, timing performance, and energy efficiency. The policies differ in the size of the trade-off. One of the policies maximizes the lifetime, so that it is optimal; and the other two are less optimal, and are used based on the configuration of the device.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Probes (or read/write heads) in MEMS-based storage devices are susceptible to wear. We study probe wear, and analyze the causes of probe uneven wear. We show that under real-world traces some probes can wear one order of magnitude faster than other probes leading to premature expiry of some probes. Premature expiry has severe consequences for the reliability, timing performance, energy-efficiency, and the lifetime of MEMS-based storage devices. Therefore, wear-leveling is a must to preclude premature expiry. We discuss how probe wear in MEMS-based storage is different from medium wear in Flash, calling for a different treatment. We present three policies to level probe wear. By simulation against three real-world traces, our work shows that an inevitable trade-off exists between lifetime, timing performance, and energy efficiency. The policies differ in the size of the trade-off. One of the policies maximizes the lifetime, so that it is optimal; and the other two are less optimal, and are used based on the configuration of the device.",
"fno": "05366652",
"keywords": [
"Micromechanical Devices",
"Probes",
"Reliability",
"Wear",
"Policies",
"Probe Wear Leveling",
"MEMS Based Storage Devices",
"Premature Expiry",
"Reliability",
"Timing Performance",
"Energy Efficiency",
"Probes",
"Energy Efficiency",
"Timing",
"Energy Storage",
"Micromechanical Devices",
"Microelectromechanical Devices",
"Buffer Storage",
"Mathematics",
"Computer Science",
"Physical Layer",
"Probe Wear",
"Wear Leveling",
"MEMS Based Storage",
"Probe Storage"
],
"authors": [
{
"affiliation": "Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, P.O. Box 217, 7500 AE Enschede, The Netherlands",
"fullName": "Mohammed G. Khatib",
"givenName": "Mohammed G.",
"surname": "Khatib",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Faculty of Electrical Engineering, Mathematics and Computer Science, University of Twente, P.O. Box 217, 7500 AE Enschede, The Netherlands",
"fullName": "Pieter H. Hartel",
"givenName": "Pieter H.",
"surname": "Hartel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mascots",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-09-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2009",
"issn": "1526-7539",
"isbn": "978-1-4244-4927-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05366667",
"articleId": "12OmNxxNbQr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05366656",
"articleId": "12OmNqGA5dT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2007/1153/0/04341814",
"title": "Multi-Probe Micro-Assembly",
"doi": null,
"abstractUrl": "/proceedings-article/case/2007/04341814/12OmNAq3hFs",
"parentPublication": {
"id": "proceedings/case/2007/1153/0",
"title": "3rd Annual IEEE Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciss/1997/4276/0/00630277",
"title": "RF MEMS for digitally-controlled front-end components",
"doi": null,
"abstractUrl": "/proceedings-article/iciss/1997/00630277/12OmNBOCWaP",
"parentPublication": {
"id": "proceedings/iciss/1997/4276/0",
"title": "1997 Proceedings Second Annual IEEE International Conference on Innovative Systems in Silicon",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2002/7607/0/01167559",
"title": "Extraction and LVS for mixed-domain integrated MEMS layouts",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2002/01167559/12OmNBOCWfi",
"parentPublication": {
"id": "proceedings/iccad/2002/7607/0",
"title": "2002 IEEE/ACM International Conference on Computer Aided Design (ICCAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/test/2007/1127/0/04437618",
"title": "A matched expansion MEMS probe card with low CTE LTCC substrate",
"doi": null,
"abstractUrl": "/proceedings-article/test/2007/04437618/12OmNBpVPWo",
"parentPublication": {
"id": "proceedings/test/2007/1127/0",
"title": "International Test Conference 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/memsys/1997/3744/0/00581868",
"title": "A new MEMS wafer probe card",
"doi": null,
"abstractUrl": "/proceedings-article/memsys/1997/00581868/12OmNrF2DIS",
"parentPublication": {
"id": "proceedings/memsys/1997/3744/0",
"title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/idt/2009/5750/0/05404127",
"title": "RF MEMS resonators: Material properties extraction",
"doi": null,
"abstractUrl": "/proceedings-article/idt/2009/05404127/12OmNvwTGD2",
"parentPublication": {
"id": "proceedings/idt/2009/5750/0",
"title": "2009 4th International Design and Test Workshop (IDT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/memsys/1997/3744/0/00581894",
"title": "A flexible MEMS technology and its first application to shear stress sensor skin",
"doi": null,
"abstractUrl": "/proceedings-article/memsys/1997/00581894/12OmNwFidd2",
"parentPublication": {
"id": "proceedings/memsys/1997/3744/0",
"title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ims3tw/2010/7793/0/05502999",
"title": "Simulating and monitoring the resonant frequency of MEMS for failure detection and prediction",
"doi": null,
"abstractUrl": "/proceedings-article/ims3tw/2010/05502999/12OmNymjN2n",
"parentPublication": {
"id": "proceedings/ims3tw/2010/7793/0",
"title": "2010 IEEE 16th International Mixed-Signals, Sensors and Systems Test Workshop (IMS3TW 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciss/1996/3639/0/00552415",
"title": "Microelectromechanical probe for an integrated electroantennographic system",
"doi": null,
"abstractUrl": "/proceedings-article/iciss/1996/00552415/12OmNzUgd6y",
"parentPublication": {
"id": "proceedings/iciss/1996/3639/0",
"title": "1996 Proceedings. Eighth Annual IEEE International Conference on Innovative Systems in Silicon",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2003/762/0/01257674",
"title": "Physical and reduced-order dynamic analysis of MEMS",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2003/01257674/12OmNzd7bLS",
"parentPublication": {
"id": "proceedings/iccad/2003/762/0",
"title": "ICCAD-2003. International Conference on Computer Aided Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCmpcNk",
"title": "Visualization Conference, IEEE",
"acronym": "ieee-vis",
"groupId": "1000796",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvlxJxx",
"doi": "10.1109/VIS.2005.78",
"title": "Phonon Tracing for Auralization and Visualization of Sound",
"normalizedTitle": "Phonon Tracing for Auralization and Visualization of Sound",
"abstract": "We present a new particle tracing approach for the simulation of mid- and high-frequency sound. Inspired by the photorealism obtained by methods like photon mapping, we develop a similar method for the physical simulation of sound within rooms. For given source and listener positions, our method computes a finiteresponse filter accounting for the different reflections at various surfaces with frequency-dependent absorption coefficients. Convoluting this filter with an anechoic input signal reproduces a realistic aural impression of the simulated room. We do not consider diffraction effects due to low frequencies, since these can be better computed by finite elements. Our method allows the visualization of a wavefront propagation using color-coded blobs traversing the paths of individual phonons.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a new particle tracing approach for the simulation of mid- and high-frequency sound. Inspired by the photorealism obtained by methods like photon mapping, we develop a similar method for the physical simulation of sound within rooms. For given source and listener positions, our method computes a finiteresponse filter accounting for the different reflections at various surfaces with frequency-dependent absorption coefficients. Convoluting this filter with an anechoic input signal reproduces a realistic aural impression of the simulated room. We do not consider diffraction effects due to low frequencies, since these can be better computed by finite elements. Our method allows the visualization of a wavefront propagation using color-coded blobs traversing the paths of individual phonons.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a new particle tracing approach for the simulation of mid- and high-frequency sound. Inspired by the photorealism obtained by methods like photon mapping, we develop a similar method for the physical simulation of sound within rooms. For given source and listener positions, our method computes a finiteresponse filter accounting for the different reflections at various surfaces with frequency-dependent absorption coefficients. Convoluting this filter with an anechoic input signal reproduces a realistic aural impression of the simulated room. We do not consider diffraction effects due to low frequencies, since these can be better computed by finite elements. Our method allows the visualization of a wavefront propagation using color-coded blobs traversing the paths of individual phonons.",
"fno": "27660020",
"keywords": [
"Acoustics",
"Auralization",
"Raytracing",
"Photon Mapping"
],
"authors": [
{
"affiliation": "TU Kaiserslautern",
"fullName": "Martin Bertram",
"givenName": "Martin",
"surname": "Bertram",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Kaiserslautern",
"fullName": "Eduard Deines",
"givenName": "Eduard",
"surname": "Deines",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ITWM Kaiserslautern",
"fullName": "Jan Mohring",
"givenName": "Jan",
"surname": "Mohring",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ITWM Kaiserslautern",
"fullName": "Jevgenij Jegorovs",
"givenName": "Jevgenij",
"surname": "Jegorovs",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Kaiserslautern",
"fullName": "Hans Hagen",
"givenName": "Hans",
"surname": "Hagen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieee-vis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-10-01T00:00:00",
"pubType": "proceedings",
"pages": "20",
"year": "2005",
"issn": null,
"isbn": "0-7803-9462-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01532805",
"articleId": "12OmNyQGS8K",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01532806",
"articleId": "12OmNzUPpfH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciap/2001/1183/0/11830354",
"title": "Integrated Tracking with Vision and Sound",
"doi": null,
"abstractUrl": "/proceedings-article/iciap/2001/11830354/12OmNASILKr",
"parentPublication": {
"id": "proceedings/iciap/2001/1183/0",
"title": "Proceedings ICIAP 2001. 11th International Conference on Image Analysis and Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2002/1862/0/18620191",
"title": "Robotic Spatial Sound Localization and Its 3-D Sound Human Interface",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2002/18620191/12OmNCf1DwV",
"parentPublication": {
"id": "proceedings/cw/2002/1862/0",
"title": "First International Symposium on Cyber Worlds, 2002. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284719",
"title": "Development of a Directional Loudspeaker System for Sound Reproduction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284719/12OmNwdbVaK",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2012/2120/0/06299341",
"title": "Physical Factors and Spatial Impressions of Surround Sound Recording",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2012/06299341/12OmNx7ov3v",
"parentPublication": {
"id": "proceedings/snpd/2012/2120/0",
"title": "2012 13th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel & Distributed Computing (SNPD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532790",
"title": "Phonon tracing for auralization and visualization of sound",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532790/12OmNxxdZyx",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07384541",
"title": "SynCoPation: Interactive Synthesis-Coupled Sound Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07384541/13rRUx0xPIM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/11/ttg2012111797",
"title": "Guided Multiview Ray Tracing for Fast Auralization",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111797/13rRUxAAST4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07360212",
"title": "Tracing Analytic Ray Curves for Light and Sound Propagation in Non-Linear Media",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07360212/13rRUxYIN4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/1999/04/c4048",
"title": "Data Sonification and Sound Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/1999/04/c4048/13rRUy08MzR",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090655",
"title": "Physics-based Concatenative Sound Synthesis of Photogrammetric models for Aural and Haptic Feedback in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090655/1jIxxvNJ4CQ",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfL",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwp74ug",
"doi": "10.1109/ICPR.2008.4761758",
"title": "Classification method for microarray probe selection using sequence, thermodynamics and secondary structure parameters",
"normalizedTitle": "Classification method for microarray probe selection using sequence, thermodynamics and secondary structure parameters",
"abstract": "Probe design is the most important step for any microarray based assay. Accurate and efficient probe design and selection for the target sequence is critical in generating reliable and useful results. Several different approaches for probe design are reported in literature and an increasing number of bioinformatics tools are available for the same. However, based on the reported low accuracy, determining the hybridization efficiency of the probes is still a big computational challenge. Present study deals with the extraction of various novel features related to sequence composition, thermodynamics and secondary structure that may be essential for designing good probes. A feature selection method has been used to assess the relative importance of all these features. In this paper, we validate the importance of various features currently used for designing an oligonucleotide probe. Finally, a classification methodology is presented that can be used to predict the hybridization quality of a probe.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Probe design is the most important step for any microarray based assay. Accurate and efficient probe design and selection for the target sequence is critical in generating reliable and useful results. Several different approaches for probe design are reported in literature and an increasing number of bioinformatics tools are available for the same. However, based on the reported low accuracy, determining the hybridization efficiency of the probes is still a big computational challenge. Present study deals with the extraction of various novel features related to sequence composition, thermodynamics and secondary structure that may be essential for designing good probes. A feature selection method has been used to assess the relative importance of all these features. In this paper, we validate the importance of various features currently used for designing an oligonucleotide probe. Finally, a classification methodology is presented that can be used to predict the hybridization quality of a probe.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Probe design is the most important step for any microarray based assay. Accurate and efficient probe design and selection for the target sequence is critical in generating reliable and useful results. Several different approaches for probe design are reported in literature and an increasing number of bioinformatics tools are available for the same. However, based on the reported low accuracy, determining the hybridization efficiency of the probes is still a big computational challenge. Present study deals with the extraction of various novel features related to sequence composition, thermodynamics and secondary structure that may be essential for designing good probes. A feature selection method has been used to assess the relative importance of all these features. In this paper, we validate the importance of various features currently used for designing an oligonucleotide probe. Finally, a classification methodology is presented that can be used to predict the hybridization quality of a probe.",
"fno": "04761758",
"keywords": [
"Biology Computing",
"Feature Extraction",
"Pattern Classification",
"Thermodynamics",
"Classification Method",
"Microarray Probe Selection",
"Thermodynamics",
"Microarray Based Assay",
"Bioinformatics",
"Feature Extraction",
"Oligonucleotide Probe",
"Secondary Structure Parameter",
"Probes",
"Thermodynamics",
"Machine Learning",
"Artificial Neural Networks",
"Springs",
"Bioinformatics",
"Asia",
"Technological Innovation",
"Laboratories",
"Feature Extraction"
],
"authors": [
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Lalit Gupta",
"givenName": "Lalit",
"surname": "Gupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Sunil Kumar",
"givenName": "Sunil",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Randeep Singh",
"givenName": "Randeep",
"surname": "Singh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Rafi Shaik",
"givenName": "Rafi",
"surname": "Shaik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Nevenka Dimitrova",
"givenName": "Nevenka",
"surname": "Dimitrova",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Aparna Gorthi",
"givenName": "Aparna",
"surname": "Gorthi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cold Spring Harbor Laboratory: 1, Bungtown Road, NY 11724 USA",
"fullName": "B. Lakshmi",
"givenName": "B.",
"surname": "Lakshmi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cold Spring Harbor Laboratory: 1, Bungtown Road, NY 11724 USA",
"fullName": "Deepa Pai",
"givenName": "Deepa",
"surname": "Pai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Philips Research Asia - Bangalore, Philips Innovation Campus, Nagavara, India",
"fullName": "Sitharthan Kamalakaran",
"givenName": "Sitharthan",
"surname": "Kamalakaran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cold Spring Harbor Laboratory: 1, Bungtown Road, NY 11724 USA",
"fullName": "Xiaoyue Zhao",
"givenName": "Xiaoyue",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Cold Spring Harbor Laboratory: 1, Bungtown Road, NY 11724 USA",
"fullName": "Michael Wigler",
"givenName": "Michael",
"surname": "Wigler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-12-01T00:00:00",
"pubType": "proceedings",
"pages": "",
"year": "2008",
"issn": "1051-4651",
"isbn": "978-1-4244-2174-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04761757",
"articleId": "12OmNBQ2VX8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04761759",
"articleId": "12OmNyQGS9H",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2011/1799/0/06120464",
"title": "PRIMEGENSw3: A Web-Based Tool for High-Throughput Primer and Probe Design",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120464/12OmNqFJhzR",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lssa/2006/0277/0/04015799",
"title": "Estimating Oligo-nucleotide Microarray Expression by Hybridization Process Modelling",
"doi": null,
"abstractUrl": "/proceedings-article/lssa/2006/04015799/12OmNrMHOcf",
"parentPublication": {
"id": "proceedings/lssa/2006/0277/0",
"title": "2006 IEEE/NLM Life Science Systems and Applications Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csb/2003/2000/0/20000084",
"title": "Group Testing With DNA Chips: Generating Designs and Decoding Experiments",
"doi": null,
"abstractUrl": "/proceedings-article/csb/2003/20000084/12OmNrMZpEZ",
"parentPublication": {
"id": "proceedings/csb/2003/2000/0",
"title": "Proceedings of the 2003 IEEE Bioinformatics Conference. CSB2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2003/1926/0/19260154a",
"title": "Accurate Method for Fast Design of Diagnostic Oligonucleotide Probe Sets for DNA Microarrays",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2003/19260154a/12OmNwNOaJU",
"parentPublication": {
"id": "proceedings/ipdps/2003/1926/0",
"title": "Parallel and Distributed Processing Symposium, International",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2011/1612/0/06112452",
"title": "Probe design optimization of HLA microarray: Data cleaning of probe signals from cDNA tiling microarray: Outlier detection, noise reduction, and identification of uninformative probes in HLA typing application",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2011/06112452/12OmNxFaLEU",
"parentPublication": {
"id": "proceedings/bibmw/2011/1612/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biotechno/2008/3191/0/3191a137",
"title": "Design of Microarray Probes for Detection of Mutations",
"doi": null,
"abstractUrl": "/proceedings-article/biotechno/2008/3191a137/12OmNxWcHm2",
"parentPublication": {
"id": "proceedings/biotechno/2008/3191/0",
"title": "International Conference on Biocomputation, Bioinformatics, and Biomedical Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2010/8303/0/05703789",
"title": "A hybridization model for tiling array analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2010/05703789/12OmNyPQ4Nu",
"parentPublication": {
"id": "proceedings/bibmw/2010/8303/0",
"title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2003/762/0/01257670",
"title": "Evaluation of placement techniques for DNA probe array layout",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2003/01257670/12OmNzTYC4b",
"parentPublication": {
"id": "proceedings/iccad/2003/762/0",
"title": "ICCAD-2003. International Conference on Computer Aided Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2011/06/ttb2011061642",
"title": "Selecting Oligonucleotide Probes for Whole-Genome Tiling Arrays with a Cross-Hybridization Potential",
"doi": null,
"abstractUrl": "/journal/tb/2011/06/ttb2011061642/13rRUyhaIn9",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2021/05/08928580",
"title": "APT: An Automated Probe Tracker From Gene Expression Data",
"doi": null,
"abstractUrl": "/journal/tb/2021/05/08928580/1gKuimeF2HS",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpM",
"title": "International Conference on Software in Telecommunications and Computer Networks",
"acronym": "softcom",
"groupId": "1001541",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy3iFru",
"doi": "10.1109/SOFTCOM.2006.329717",
"title": "Usage Limitations of Field Strength Probes for RADHAZ Survey",
"normalizedTitle": "Usage Limitations of Field Strength Probes for RADHAZ Survey",
"abstract": "The radiation hazard (RADHAZ) survey is commonly done using field strength probes. These probes are inherently wideband and thus non-selective, what makes them a good choice for quick wideband surveys. On the other hand, this characteristic presents a problem when trying to discriminate signals in a multifrequency environment. The most commonly used diode-based probes have another significant drawback: some probes are very sensitive to the waveform of the measured signal. As shown by the measurement experiment described in this paper, the measurement result of such a probe can show overestimated value of the field strength, which is still usable for the worst-case RADHAZ analysis. Also, the experiment shows that such probes can show the underestimation of the field strength, which makes them unusable for RADHAZ survey. Knowing this, some guidelines on the probe usage and RADHAZ survey procedures are given",
"abstracts": [
{
"abstractType": "Regular",
"content": "The radiation hazard (RADHAZ) survey is commonly done using field strength probes. These probes are inherently wideband and thus non-selective, what makes them a good choice for quick wideband surveys. On the other hand, this characteristic presents a problem when trying to discriminate signals in a multifrequency environment. The most commonly used diode-based probes have another significant drawback: some probes are very sensitive to the waveform of the measured signal. As shown by the measurement experiment described in this paper, the measurement result of such a probe can show overestimated value of the field strength, which is still usable for the worst-case RADHAZ analysis. Also, the experiment shows that such probes can show the underestimation of the field strength, which makes them unusable for RADHAZ survey. Knowing this, some guidelines on the probe usage and RADHAZ survey procedures are given",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The radiation hazard (RADHAZ) survey is commonly done using field strength probes. These probes are inherently wideband and thus non-selective, what makes them a good choice for quick wideband surveys. On the other hand, this characteristic presents a problem when trying to discriminate signals in a multifrequency environment. The most commonly used diode-based probes have another significant drawback: some probes are very sensitive to the waveform of the measured signal. As shown by the measurement experiment described in this paper, the measurement result of such a probe can show overestimated value of the field strength, which is still usable for the worst-case RADHAZ analysis. Also, the experiment shows that such probes can show the underestimation of the field strength, which makes them unusable for RADHAZ survey. Knowing this, some guidelines on the probe usage and RADHAZ survey procedures are given",
"fno": "04129871",
"keywords": [
"Worst Case RADHAZ Analysis",
"RADHAZ Survey",
"Usage Limitations",
"Field Strength Probes",
"Radiation Hazard Survey",
"Diode Based Probes"
],
"authors": [
{
"affiliation": "FESB, Univ. of Split",
"fullName": "A. Sarolic",
"givenName": "A.",
"surname": "Sarolic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "softcom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-09-01T00:00:00",
"pubType": "proceedings",
"pages": "43-47",
"year": "2006",
"issn": null,
"isbn": "953-6114-90-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04129870",
"articleId": "12OmNxdm4Br",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04129872",
"articleId": "12OmNyvGylj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/esem/2012/1056/0/06475408",
"title": "Survey on agile and lean usage in finnish software industry",
"doi": null,
"abstractUrl": "/proceedings-article/esem/2012/06475408/12OmNBrlPAo",
"parentPublication": {
"id": "proceedings/esem/2012/1056/0",
"title": "Proceedings of the 2012 ACM-IEEE International Symposium on Empirical Software Engineering and Measurement",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issrew/2017/2387/0/2387a009",
"title": "Reliable Inspection of an Autonomous System At System Runtime with Built-in Data Probes",
"doi": null,
"abstractUrl": "/proceedings-article/issrew/2017/2387a009/12OmNwHhoZy",
"parentPublication": {
"id": "proceedings/issrew/2017/2387/0",
"title": "2017 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biotechno/2008/3191/0/3191a137",
"title": "Design of Microarray Probes for Detection of Mutations",
"doi": null,
"abstractUrl": "/proceedings-article/biotechno/2008/3191a137/12OmNxWcHm2",
"parentPublication": {
"id": "proceedings/biotechno/2008/3191/0",
"title": "International Conference on Biocomputation, Bioinformatics, and Biomedical Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scam/2013/5739/0/06648187",
"title": "Tracing with a minimal number of probes",
"doi": null,
"abstractUrl": "/proceedings-article/scam/2013/06648187/12OmNxveNFf",
"parentPublication": {
"id": "proceedings/scam/2013/5739/0",
"title": "2013 IEEE 13th International Working Conference on Source Code Analysis and Manipulation (SCAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/test/1989/9999/0/00082296",
"title": "An analysis of tungsten probes' effect on yield in a production wafer probe environment",
"doi": null,
"abstractUrl": "/proceedings-article/test/1989/00082296/12OmNyv7mlu",
"parentPublication": {
"id": "proceedings/test/1989/9999/0",
"title": "1989 International Test Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itc/2005/9038/0/01584024",
"title": "Bead probes in practice",
"doi": null,
"abstractUrl": "/proceedings-article/itc/2005/01584024/12OmNzd7bFS",
"parentPublication": {
"id": "proceedings/itc/2005/9038/0",
"title": "IEEE International Conference on Test, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/test/1990/9064/0/00114109",
"title": "Improving wafer sort yields with radius-tip probes",
"doi": null,
"abstractUrl": "/proceedings-article/test/1990/00114109/12OmNzh5z1h",
"parentPublication": {
"id": "proceedings/test/1990/9064/0",
"title": "1990 International Test Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2014/7615/0/07097880",
"title": "An efficient detection scheme for urban traffic condition using volunteer probes",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097880/12OmNzhnaef",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2011/06/ttb2011061642",
"title": "Selecting Oligonucleotide Probes for Whole-Genome Tiling Arrays with a Cross-Hybridization Potential",
"doi": null,
"abstractUrl": "/journal/tb/2011/06/ttb2011061642/13rRUyhaIn9",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730703",
"title": "HuMAn: Human Movement Analytics via WiFi Probes",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730703/1aDSNmC9zws",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwpGgL8",
"title": "ICCAD-2005 International Conference on Computer Aided Design",
"acronym": "iccad",
"groupId": "1000151",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "1fHHmbpZoT6",
"doi": "10.1109/ICCAD.2005.1560139",
"title": "A multi-harmonic probe technique for computing oscillator steady states",
"normalizedTitle": "A multi-harmonic probe technique for computing oscillator steady states",
"abstract": "We present a novel method for finding periodic steady states of general classes of oscillators robustly. The new method, which we term the multi-harmonic probe (MHP) technique, generalizes the well-known technique of augmenting harmonic balance (HB) for oscillators using an external probe. By using non-sinusoidal periodic probes, MHP enhances the applicability of the standard probe method (which uses purely sinusoidal probes) to broader classes of oscillators. We thus obtain a general and robust method for the periodic steady state of any kind of oscillator. Results on LC and ring oscillator circuits are presented that testify to the efficacy of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel method for finding periodic steady states of general classes of oscillators robustly. The new method, which we term the multi-harmonic probe (MHP) technique, generalizes the well-known technique of augmenting harmonic balance (HB) for oscillators using an external probe. By using non-sinusoidal periodic probes, MHP enhances the applicability of the standard probe method (which uses purely sinusoidal probes) to broader classes of oscillators. We thus obtain a general and robust method for the periodic steady state of any kind of oscillator. Results on LC and ring oscillator circuits are presented that testify to the efficacy of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel method for finding periodic steady states of general classes of oscillators robustly. The new method, which we term the multi-harmonic probe (MHP) technique, generalizes the well-known technique of augmenting harmonic balance (HB) for oscillators using an external probe. By using non-sinusoidal periodic probes, MHP enhances the applicability of the standard probe method (which uses purely sinusoidal probes) to broader classes of oscillators. We thus obtain a general and robust method for the periodic steady state of any kind of oscillator. Results on LC and ring oscillator circuits are presented that testify to the efficacy of our approach.",
"fno": "01560139",
"keywords": [
"Oscillators",
"Network Analysis",
"Probes",
"Multiharmonic Probe",
"Oscillator Steady States",
"Harmonic Balance",
"Nonsinusoidal Periodic Probe",
"Standard Probe Method",
"LC Oscillator Circuit",
"Ring Oscillator Circuit",
"Probes",
"Circuits",
"Injection Locked Oscillators",
"Nonlinear Equations",
"Voltage",
"Ring Oscillators",
"Frequency Conversion",
"Voltage Controlled Oscillators",
"Resistors",
"Jacobian Matrices"
],
"authors": [
{
"affiliation": "Dept. of Electr. & Comput. Eng., Minnesota Univ., Minneapolis, MN, USA",
"fullName": "K.D. Boianapally",
"givenName": "K.D.",
"surname": "Boianapally",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. & Comput. Eng., Minnesota Univ., Minneapolis, MN, USA",
"fullName": "Ting Mei",
"givenName": null,
"surname": "Ting Mei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. & Comput. Eng., Minnesota Univ., Minneapolis, MN, USA",
"fullName": "J. Roychowdhury",
"givenName": "J.",
"surname": "Roychowdhury",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccad",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-01-01T00:00:00",
"pubType": "proceedings",
"pages": "610-613",
"year": "2005",
"issn": "1092-3152",
"isbn": "0-7803-9254-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9254604",
"articleId": "12OmNxYL5bE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9254614",
"articleId": "12OmNAWH9IV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ssst/2005/8808/0/01460915",
"title": "Robust sliding mode harmonic oscillator suitable for low frequencies",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/2005/01460915/12OmNApu5xg",
"parentPublication": {
"id": "proceedings/ssst/2005/8808/0",
"title": "Proceedings of the Thirty-Seventh Southeastern Symposium on System Theory (SSST05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2015/8146/0/07340508",
"title": "A numerically controlled oscillator based on De Moivre's identity and linear approximation: A ROMless NCO",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2015/07340508/12OmNBRsVxu",
"parentPublication": {
"id": "proceedings/icat/2015/8146/0",
"title": "2015 XXV International Conference on Information, Communication and Automation Technologies (ICAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2012/2145/0/06176677",
"title": "Analysis and design of sub-harmonically injection locked oscillators",
"doi": null,
"abstractUrl": "/proceedings-article/date/2012/06176677/12OmNC2fGqT",
"parentPublication": {
"id": "proceedings/date/2012/2145/0",
"title": "Design, Automation & Test in Europe Conference & Exhibition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aspdac/2004/8175/0/01337641",
"title": "A performance comparison of PLLs for clock generation using ring oscillator VCO and LC oscillator in a digital CMOS process",
"doi": null,
"abstractUrl": "/proceedings-article/aspdac/2004/01337641/12OmNvjgWwn",
"parentPublication": {
"id": "proceedings/aspdac/2004/8175/0",
"title": "ASP-DAC 2004: Asia and South Pacific Design Automation Conference 2004",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccad/2006/3891/0/04110187",
"title": "PPV-HB: Harmonic Balance for Oscillator/PLL Phase Macromodels",
"doi": null,
"abstractUrl": "/proceedings-article/iccad/2006/04110187/12OmNxbW4Pv",
"parentPublication": {
"id": "proceedings/iccad/2006/3891/0",
"title": "Computer-Aided Design, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isvlsid/2007/2896/0/28960467",
"title": "A High Swing Low Power CMOS Differential Voltage-Controlled Ring Oscillator",
"doi": null,
"abstractUrl": "/proceedings-article/isvlsid/2007/28960467/12OmNz3bdM7",
"parentPublication": {
"id": "proceedings/isvlsid/2007/2896/0",
"title": "IEEE Computer Society Annual Symposium on VLSI (ISVLSI '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asp-dac/2004/2543/0/01337641",
"title": "A performance comparison of PLLs for clock generation using ring oscillator VCO and LC oscillator in a digital CMOS process",
"doi": null,
"abstractUrl": "/proceedings-article/asp-dac/2004/01337641/12OmNzCWG76",
"parentPublication": {
"id": "proceedings/asp-dac/2004/2543/0",
"title": "Asia and South Pacific Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2008/3/0/04484699",
"title": "Analysis of Oscillator Injection Locking by Harmonic Balance Method",
"doi": null,
"abstractUrl": "/proceedings-article/date/2008/04484699/12OmNzVGcF2",
"parentPublication": {
"id": "proceedings/date/2008/3/0",
"title": "Design, Automation & Test in Europe. DATE'08",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2020/07/09093963",
"title": "Low-Supply Sensitivity <italic>LC</italic> VCOs With Complementary Varactors",
"doi": null,
"abstractUrl": "/journal/si/2020/07/09093963/1jP8CWIUDpm",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2021/4854/0/485400a342",
"title": "The Design and Implementation of an Oscillator Chip with Negative Resistance",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2021/485400a342/1wB6XA69WUM",
"parentPublication": {
"id": "proceedings/icitbs/2021/4854/0",
"title": "2021 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxdrlqQuc",
"doi": "10.1109/VR46266.2020.00111",
"title": "Outdoor Sound Propagation Based on Adaptive FDTD-PE",
"normalizedTitle": "Outdoor Sound Propagation Based on Adaptive FDTD-PE",
"abstract": "In outdoor scenes, the inhomogeneity of the atmosphere and the ground effect have a great impact on sound propagation, but these two effects are usually ignored in previous methods. We propose an adaptive FDTD-PE method to simulate sound propagation in 3D scenes taking into account atmospheric inhomogeneity and the ground effect to produce more realistic sound propagation results. In the simulation, the ground is considered as a porous medium with a certain thickness. The scene is categorized into a number of two-dimensional vertical ground planes in the three-dimensional cylindrical coordinate system. These planes are decomposed into the near-source complex regions and the far-source regions, which are solved by the FDTD solver and the parabolic equation (PE) solver, respectively. Furthermore, a novel encoding method was designed to process sound pressure data. In the far-source regions, the one-way sound propagation is only affected by the ground and atmosphere inhomogeneity, so we encode sound pressure data through function fitting. Finally, an efficient sound rendering method with this encoding representation is developed to perform auralization in the frequency-domain. We validated our method in various outdoor scenes, and the results indicate that our method can realistically simulate outdoor sound propagation, with quite higher speed and lower storage.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In outdoor scenes, the inhomogeneity of the atmosphere and the ground effect have a great impact on sound propagation, but these two effects are usually ignored in previous methods. We propose an adaptive FDTD-PE method to simulate sound propagation in 3D scenes taking into account atmospheric inhomogeneity and the ground effect to produce more realistic sound propagation results. In the simulation, the ground is considered as a porous medium with a certain thickness. The scene is categorized into a number of two-dimensional vertical ground planes in the three-dimensional cylindrical coordinate system. These planes are decomposed into the near-source complex regions and the far-source regions, which are solved by the FDTD solver and the parabolic equation (PE) solver, respectively. Furthermore, a novel encoding method was designed to process sound pressure data. In the far-source regions, the one-way sound propagation is only affected by the ground and atmosphere inhomogeneity, so we encode sound pressure data through function fitting. Finally, an efficient sound rendering method with this encoding representation is developed to perform auralization in the frequency-domain. We validated our method in various outdoor scenes, and the results indicate that our method can realistically simulate outdoor sound propagation, with quite higher speed and lower storage.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In outdoor scenes, the inhomogeneity of the atmosphere and the ground effect have a great impact on sound propagation, but these two effects are usually ignored in previous methods. We propose an adaptive FDTD-PE method to simulate sound propagation in 3D scenes taking into account atmospheric inhomogeneity and the ground effect to produce more realistic sound propagation results. In the simulation, the ground is considered as a porous medium with a certain thickness. The scene is categorized into a number of two-dimensional vertical ground planes in the three-dimensional cylindrical coordinate system. These planes are decomposed into the near-source complex regions and the far-source regions, which are solved by the FDTD solver and the parabolic equation (PE) solver, respectively. Furthermore, a novel encoding method was designed to process sound pressure data. In the far-source regions, the one-way sound propagation is only affected by the ground and atmosphere inhomogeneity, so we encode sound pressure data through function fitting. Finally, an efficient sound rendering method with this encoding representation is developed to perform auralization in the frequency-domain. We validated our method in various outdoor scenes, and the results indicate that our method can realistically simulate outdoor sound propagation, with quite higher speed and lower storage.",
"fno": "09089553",
"keywords": [
"Atmospheric Modeling",
"Acoustics",
"Finite Difference Methods",
"Nonhomogeneous Media",
"Time Domain Analysis",
"Solid Modeling",
"Adaptation Models",
"Applied Computing",
"Arts And Humanities",
"Sound And Music Computing",
"Computing Methodologies",
"Computer Graphics",
"Animation"
],
"authors": [
{
"affiliation": "Tianjin University,Division of Intelligence and Computing,P.R. China",
"fullName": "Shiguang Liu",
"givenName": "Shiguang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin University,Division of Intelligence and Computing,P.R. China",
"fullName": "Jin Liu",
"givenName": "Jin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "859-867",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089483",
"articleId": "1jIx9FEs132",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089578",
"articleId": "1jIxfimnIaY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sive/2014/5781/0/07006289",
"title": "Wave-based sound propagation for VR applications",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2014/07006289/12OmNAXglTR",
"parentPublication": {
"id": "proceedings/sive/2014/5781/0",
"title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcmp-ugc/2007/3088/0/30880247",
"title": "Verification of Acoustic Propagation Over Natural and Synthetic Terrain",
"doi": null,
"abstractUrl": "/proceedings-article/hpcmp-ugc/2007/30880247/12OmNrnJ6Lg",
"parentPublication": {
"id": "proceedings/hpcmp-ugc/2007/3088/0",
"title": "HPCMP Users Group Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08307458",
"title": "Diffraction Kernels for Interactive Sound Propagation in Dynamic Environments",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08307458/13rRUwh80Hk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07384541",
"title": "SynCoPation: Interactive Synthesis-Coupled Sound Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07384541/13rRUx0xPIM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040567",
"title": "Aural Proxies and Directionally-Varying Reverberation for Interactive Sound Propagation in Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040567/13rRUxD9gXG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07360212",
"title": "Tracing Analytic Ray Curves for Light and Sound Propagation in Non-Linear Media",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07360212/13rRUxYIN4c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061707",
"title": "AD-Frustum: Adaptive Frustum Tracing for Interactive Sound Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061707/13rRUygT7mO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07014276",
"title": "WAVE: Interactive Wave-based Sound Propagation for Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07014276/13rRUygT7yf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404495",
"title": "Source and Listener Directivity for Interactive Wave-Based Sound Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404495/13rRUyogGAb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642450",
"title": "Adaptive Sampling for Sound Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642450/17PYEkUaUec",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC8uRrH",
"title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)",
"acronym": "wmsvm",
"groupId": "1800122",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy3Agyt",
"doi": "10.1109/WMSVM.2010.25",
"title": "BESO of Structures with Constrained Damping",
"normalizedTitle": "BESO of Structures with Constrained Damping",
"abstract": "Optimization of structures with constrained layer damping by bi-directional evolutionary structural optimization???BESO???was studied in this paper. The sensitivity equation of the objective function about deleted and added elements was built for maximization of modal loss factor of constrained layer damping structure. Firstly the FEA model of the metal structure without constrained layer damping are built, and the mode shapes are calculated. Secondly substitute the mode shapes into the sensitivity formula, to calculate the sensitivity of the elements of the metal structure with supposed constrained damping. Thirdly sort to find the element with negative or minimum modulus sensitivity, and stick the material to where those elements are to initialize the optimization configuration. BESO then was import into the optimization base on the initial optimization configuration. If a pair of deleted and added elements increase the certain stage modal damping ratio, the pair then should be implemented until no such pairs, then the optimal layout of the constrained layer damping materials we get. The mass added should be equal to the deleted to assure the mass of the constrained layer damping material. The example shows that BESO can get the optimal layout of the given mass of damping materials to maximize the certain stage loss factor and save 90% time compared to ESO. The reasonable result demonstrates the effectiveness and engineering value of the method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Optimization of structures with constrained layer damping by bi-directional evolutionary structural optimization???BESO???was studied in this paper. The sensitivity equation of the objective function about deleted and added elements was built for maximization of modal loss factor of constrained layer damping structure. Firstly the FEA model of the metal structure without constrained layer damping are built, and the mode shapes are calculated. Secondly substitute the mode shapes into the sensitivity formula, to calculate the sensitivity of the elements of the metal structure with supposed constrained damping. Thirdly sort to find the element with negative or minimum modulus sensitivity, and stick the material to where those elements are to initialize the optimization configuration. BESO then was import into the optimization base on the initial optimization configuration. If a pair of deleted and added elements increase the certain stage modal damping ratio, the pair then should be implemented until no such pairs, then the optimal layout of the constrained layer damping materials we get. The mass added should be equal to the deleted to assure the mass of the constrained layer damping material. The example shows that BESO can get the optimal layout of the given mass of damping materials to maximize the certain stage loss factor and save 90% time compared to ESO. The reasonable result demonstrates the effectiveness and engineering value of the method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Optimization of structures with constrained layer damping by bi-directional evolutionary structural optimization???BESO???was studied in this paper. The sensitivity equation of the objective function about deleted and added elements was built for maximization of modal loss factor of constrained layer damping structure. Firstly the FEA model of the metal structure without constrained layer damping are built, and the mode shapes are calculated. Secondly substitute the mode shapes into the sensitivity formula, to calculate the sensitivity of the elements of the metal structure with supposed constrained damping. Thirdly sort to find the element with negative or minimum modulus sensitivity, and stick the material to where those elements are to initialize the optimization configuration. BESO then was import into the optimization base on the initial optimization configuration. If a pair of deleted and added elements increase the certain stage modal damping ratio, the pair then should be implemented until no such pairs, then the optimal layout of the constrained layer damping materials we get. The mass added should be equal to the deleted to assure the mass of the constrained layer damping material. The example shows that BESO can get the optimal layout of the given mass of damping materials to maximize the certain stage loss factor and save 90% time compared to ESO. The reasonable result demonstrates the effectiveness and engineering value of the method.",
"fno": "05558288",
"keywords": [
"Damping",
"Evolutionary Computation",
"Finite Element Analysis",
"Sensitivity Analysis",
"Constrained Layer Damping Material",
"Bi Directional Evolutionary Structural Optimization",
"BESO",
"Sensitivity Equation",
"Objective Function",
"Maximization",
"Modal Loss Factor",
"FEA Model",
"Metal Structure",
"Mode Shapes",
"Sensitivity Formula",
"Modulus Sensitivity",
"Modal Damping Ratio",
"Certain Stage Loss Factor",
"Damping",
"Optimization",
"Materials",
"Sensitivity",
"Layout",
"Vibrations",
"Shape",
"BESO",
"Constrained Damping",
"Sensitivity",
"Optimization"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhiying Mao",
"givenName": "Zhiying",
"surname": "Mao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guoping Chen",
"givenName": "Guoping",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wmsvm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-05-01T00:00:00",
"pubType": "proceedings",
"pages": "164-167",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-7077-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05558287",
"articleId": "12OmNCvumSX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05558285",
"articleId": "12OmNC4eSxW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2011/4455/0/4455a339",
"title": "Damping Characteristic of Composite Material with Periodic Micro-Tetrahedron Structures",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455a339/12OmNB836Om",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718525",
"title": "Use of Modal Sensitivity to Operating Conditions for Damping Control in Power Systems",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718525/12OmNqIzh2P",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecmsm/2017/5582/0/07945874",
"title": "Self-adapting control structure for active damping of the servomechanism residual vibration",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2017/07945874/12OmNs0C9Cc",
"parentPublication": {
"id": "proceedings/ecmsm/2017/5582/0",
"title": "2017 IEEE International Workshop of Electronics, Control, Measurement, Signals and their Application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2017/3013/0/3013b258",
"title": "Study on Elastic SDOF System Viscous Damping Coefficient Based on Energy Balance Principle",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2017/3013b258/12OmNxwncsU",
"parentPublication": {
"id": "proceedings/icisce/2017/3013/0",
"title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a397",
"title": "Damping Control Study on Concrete Frame Structure with Viscous Damping Walls",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a397/12OmNz5JBWC",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2010/4212/1/4212a683",
"title": "Estimation of Damping Matrices of Cable-Stayed Bridges Based on Known Data of Modal Damping",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/4212a683/12OmNzmclUx",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a118",
"title": "Comparison of Wake Oscillator Models with Different Damping Terms",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a118/1APq9yfO3U4",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmiae/2022/7396/0/739600a011",
"title": "Research on Composite Vibration Damping System Based on Adaptive Gasket",
"doi": null,
"abstractUrl": "/proceedings-article/icmiae/2022/739600a011/1JgrIFnk1nW",
"parentPublication": {
"id": "proceedings/icmiae/2022/7396/0",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2021/0088/0/008800a284",
"title": "Effect of stiffness and damping of spring on power flow in a vibration system",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2021/008800a284/1LHcFEY4EuY",
"parentPublication": {
"id": "proceedings/ifeea/2021/0088/0",
"title": "2021 8th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2020/9627/0/962700a549",
"title": "Vibration characteristics analysis and treatment location optimization of complex cavity with porous media and constrained layer damping",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2020/962700a549/1rvCHm3sAWk",
"parentPublication": {
"id": "proceedings/ifeea/2020/9627/0",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": null,
"article": {
"id": "12OmNzmclUx",
"doi": "10.1109/ISDEA.2010.27",
"title": "Estimation of Damping Matrices of Cable-Stayed Bridges Based on Known Data of Modal Damping",
"normalizedTitle": "Estimation of Damping Matrices of Cable-Stayed Bridges Based on Known Data of Modal Damping",
"abstract": "The composition of damping in cable-stayed bridges is complicated. It is difficult to determine the damping matrices precisely. A method to calculate the estimation of damping matrices of cable-stayed bridges by known damping ratios is proposed in this paper. Based on the assumption that the damping coefficients of substructures (i.e. Girder, tower, cable, support etc.) in cable-stayed bridges don't change with frequency or amplitude of vibration, the complex eigenvector and natural frequency of cable-stayed bridges are derived by non-proportional damping state-space method. The results have high precision, which fulfill the requirement of dynamic calculation of structures. The damping matrices have more information than modal damping ratios, which has wide application in reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The composition of damping in cable-stayed bridges is complicated. It is difficult to determine the damping matrices precisely. A method to calculate the estimation of damping matrices of cable-stayed bridges by known damping ratios is proposed in this paper. Based on the assumption that the damping coefficients of substructures (i.e. Girder, tower, cable, support etc.) in cable-stayed bridges don't change with frequency or amplitude of vibration, the complex eigenvector and natural frequency of cable-stayed bridges are derived by non-proportional damping state-space method. The results have high precision, which fulfill the requirement of dynamic calculation of structures. The damping matrices have more information than modal damping ratios, which has wide application in reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The composition of damping in cable-stayed bridges is complicated. It is difficult to determine the damping matrices precisely. A method to calculate the estimation of damping matrices of cable-stayed bridges by known damping ratios is proposed in this paper. Based on the assumption that the damping coefficients of substructures (i.e. Girder, tower, cable, support etc.) in cable-stayed bridges don't change with frequency or amplitude of vibration, the complex eigenvector and natural frequency of cable-stayed bridges are derived by non-proportional damping state-space method. The results have high precision, which fulfill the requirement of dynamic calculation of structures. The damping matrices have more information than modal damping ratios, which has wide application in reality.",
"fno": "4212a683",
"keywords": [
"Vibrations",
"Bridges Structures",
"Damping",
"Eigenvalues And Eigenfunctions",
"Modal Analysis",
"State Space Methods",
"Nonproportional Damping State Space Method",
"Damping Matrices Estimation",
"Cable Stayed Bridges",
"Modal Damping",
"Damping Ratios",
"Substructures",
"Vibration",
"Complex Eigenvector",
"Natural Frequency",
"Damping",
"Mathematical Model",
"Equations",
"Bridges",
"Shape",
"Vibrations",
"Structural Beams",
"Non Proportional Damping",
"Cable Stayed Bridge",
"Damping Identification"
],
"authors": [],
"idPrefix": "isdea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "683-688",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4212-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4212a680",
"articleId": "12OmNAoUTk3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4212a689",
"articleId": "12OmNvkpl72",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icic/2010/4047/3/4047c013",
"title": "Geometrical Nonlinearities on the Static Analysis of Three Pylons Composite Girder Cable-stayed Bridge",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2010/4047c013/12OmNAo45LF",
"parentPublication": {
"id": "proceedings/icic/2010/4047/3",
"title": "2010 Third International Conference on Information and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270b166",
"title": "Comparison and Analysis of Aseismic Performance of Different Systems of Cable-Stayed Bridge",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270b166/12OmNBlFR1X",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2011/4455/0/4455b317",
"title": "The Research on Cable Broken Wires Inspection for a Cable Stayed Bridge by Discrete Wavelet Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455b317/12OmNscxj5S",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2010/4077/1/4077a651",
"title": "Analysis of Dynamic Performance of Cable-Stayed Bridges with CFRP Cables",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2010/4077a651/12OmNwErpOB",
"parentPublication": {
"id": "proceedings/icicta/2010/4077/1",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icece/2010/4031/0/4031a471",
"title": "Application of Kalman's Filtering Method in Construction Control for Cable Replacement of the Cable-Stayed Bridge",
"doi": null,
"abstractUrl": "/proceedings-article/icece/2010/4031a471/12OmNwbLVsJ",
"parentPublication": {
"id": "proceedings/icece/2010/4031/0",
"title": "Electrical and Control Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270b190",
"title": "The Application of BP Neural Network in Cable-Stayed Bridge Construction Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270b190/12OmNxRnvP8",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itng/2009/3596/0/3596b701",
"title": "Study on the Parameter Vibration of the Large-Span Cable-Stayed Bridges",
"doi": null,
"abstractUrl": "/proceedings-article/itng/2009/3596b701/12OmNxecS8w",
"parentPublication": {
"id": "proceedings/itng/2009/3596/0",
"title": "Information Technology: New Generations, Third International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccie/2010/4026/2/4026b426",
"title": "Study on Cable-Stayed Bridge Flutter Active Control by a Single Group of ADM",
"doi": null,
"abstractUrl": "/proceedings-article/ccie/2010/4026b426/12OmNyQGSgT",
"parentPublication": {
"id": "proceedings/ccie/2010/4026/2",
"title": "Computing, Control and Industrial Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscse/2018/1366/0/08705544",
"title": "Retracted: Research on Estimation Method of Elastic Cable Stiffness of Cable-Stayed Bridge",
"doi": null,
"abstractUrl": "/proceedings-article/icscse/2018/08705544/19RShbntWYE",
"parentPublication": {
"id": "proceedings/icscse/2018/1366/0",
"title": "2018 3rd International Conference on Smart City and Systems Engineering (ICSCSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2020/6698/0/669800a281",
"title": "Energy-Based Seismic Design of Viscous Dampers for Long Span Bridges",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2020/669800a281/1kuHOP8O6GI",
"parentPublication": {
"id": "proceedings/icitbs/2020/6698/0",
"title": "2020 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JgrDl33Nv2",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"acronym": "icmiae",
"groupId": "9980888",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JgrIFnk1nW",
"doi": "10.1109/ICMIAE57032.2022.00010",
"title": "Research on Composite Vibration Damping System Based on Adaptive Gasket",
"normalizedTitle": "Research on Composite Vibration Damping System Based on Adaptive Gasket",
"abstract": "An adaptive small-scale mechanical vibration damping system based on hydraulic gasket is designed in this project. Its main function is to realize the active damping of small moving machinery and the self-adaptive automatic centering of shafting machinery. The system is mainly composed of hydraulic gasket, controller, sensor and external power source. The vibration damping device is small in size, suitable for use in small power machinery, has low cost, and has better applicability than the existing system. The vibration damping platform is designed with a unique hydraulic gasket based on a small power machine, and an active and passive composite vibration damping system is constructed. It has guiding significance for the vibration roduction design of machinery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An adaptive small-scale mechanical vibration damping system based on hydraulic gasket is designed in this project. Its main function is to realize the active damping of small moving machinery and the self-adaptive automatic centering of shafting machinery. The system is mainly composed of hydraulic gasket, controller, sensor and external power source. The vibration damping device is small in size, suitable for use in small power machinery, has low cost, and has better applicability than the existing system. The vibration damping platform is designed with a unique hydraulic gasket based on a small power machine, and an active and passive composite vibration damping system is constructed. It has guiding significance for the vibration roduction design of machinery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An adaptive small-scale mechanical vibration damping system based on hydraulic gasket is designed in this project. Its main function is to realize the active damping of small moving machinery and the self-adaptive automatic centering of shafting machinery. The system is mainly composed of hydraulic gasket, controller, sensor and external power source. The vibration damping device is small in size, suitable for use in small power machinery, has low cost, and has better applicability than the existing system. The vibration damping platform is designed with a unique hydraulic gasket based on a small power machine, and an active and passive composite vibration damping system is constructed. It has guiding significance for the vibration roduction design of machinery.",
"fno": "739600a011",
"keywords": [
"Damping",
"Design Engineering",
"Gaskets",
"Machinery",
"Shafts",
"Vibrations",
"Active Composite Vibration Damping System",
"Adaptive Gasket",
"Controller",
"External Power Source",
"Hydraulic Gasket",
"Mechanical Vibration Damping System",
"Passive Composite Vibration Damping System",
"Self Adaptive Automatic Centering",
"Sensor",
"Shafting Machinery",
"Small Power Machinery",
"Vibration Damping Device",
"Vibration Damping Platform",
"Vibrations",
"Damping",
"Adaptive Systems",
"Costs",
"Automation",
"Gaskets",
"Hydraulic Systems",
"Machinery",
"Shock Absorption",
"Hydraulic Pressure"
],
"authors": [
{
"affiliation": "School of Naval Architecture Ocean and Energy Power Engineering, Wuhan University of Technology,Wuhan,China,430070",
"fullName": "Minyu Xu",
"givenName": "Minyu",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmiae",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-08-01T00:00:00",
"pubType": "proceedings",
"pages": "11-15",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-7396-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "739600a006",
"articleId": "1JgrHYvBMvS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "739600a016",
"articleId": "1JgrOu5NyH6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ecmsm/2017/5582/0/07945874",
"title": "Self-adapting control structure for active damping of the servomechanism residual vibration",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2017/07945874/12OmNs0C9Cc",
"parentPublication": {
"id": "proceedings/ecmsm/2017/5582/0",
"title": "2017 IEEE International Workshop of Electronics, Control, Measurement, Signals and their Application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2016/0693/0/07807740",
"title": "Information-measuring control system of active vibration protection RED",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2016/07807740/12OmNyQGSoS",
"parentPublication": {
"id": "proceedings/ewdts/2016/0693/0",
"title": "2016 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016a397",
"title": "Damping Control Study on Concrete Frame Structure with Viscous Damping Walls",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a397/12OmNz5JBWC",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2008/3305/5/3305e075",
"title": "Fuzzy-PI Damping Control for Hydraulic Crane Tip",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2008/3305e075/12OmNzC5TqY",
"parentPublication": {
"id": "proceedings/fskd/2008/3305/5",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/m2vip/1997/8025/0/80250038",
"title": "An active vibration damping system of a driver's seat for off-road vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/m2vip/1997/80250038/12OmNzZ5oiL",
"parentPublication": {
"id": "proceedings/m2vip/1997/8025/0",
"title": "Mechatronics and Machine Vision in Practice, Annual Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2012/4639/0/4639a647",
"title": "Studies on Vibration Control of Beam by Damping Material",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2012/4639a647/12OmNzaQozE",
"parentPublication": {
"id": "proceedings/cdciem/2012/4639/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2010/4212/1/4212a683",
"title": "Estimation of Damping Matrices of Cable-Stayed Bridges Based on Known Data of Modal Damping",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/4212a683/12OmNzmclUx",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a118",
"title": "Comparison of Wake Oscillator Models with Different Damping Terms",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a118/1APq9yfO3U4",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2021/0088/0/008800a284",
"title": "Effect of stiffness and damping of spring on power flow in a vibration system",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2021/008800a284/1LHcFEY4EuY",
"parentPublication": {
"id": "proceedings/ifeea/2021/0088/0",
"title": "2021 8th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icctec/2017/5784/0/578400b036",
"title": "Semi-active Control of the Quiet Hydraulic Manifold Block for Ship Based on the Magnetorheological Vibration Absorber",
"doi": null,
"abstractUrl": "/proceedings-article/icctec/2017/578400b036/1cks3SYBLzO",
"parentPublication": {
"id": "proceedings/icctec/2017/5784/0",
"title": "2017 International Conference on Computer Technology, Electronics and Communication (ICCTEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1LHcANpxJZu",
"title": "2021 8th International Forum on Electrical Engineering and Automation (IFEEA)",
"acronym": "ifeea",
"groupId": "10067864",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1LHcFEY4EuY",
"doi": "10.1109/IFEEA54171.2021.00064",
"title": "Effect of stiffness and damping of spring on power flow in a vibration system",
"normalizedTitle": "Effect of stiffness and damping of spring on power flow in a vibration system",
"abstract": "This study is related to the vibration transmission properties of a system with stiffness and damping. It aims to investigate the effect of stiffness and damping on the vibration of a single-isolation system based on the power flow approach. A power flow mode method based on the real part of the power is presented, implying the time-average energy flowing into the system. It is future applied to study the effect of stiffness and damping, making it possible to define the energy input, transmission, and dissipation with the vary of stiffness and damping. The results show that the maximum power is supplied when the excitation frequency is equal to the undamped natural frequency. If the excitation is at a fixed frequency, there exists damping or stiffness that the maximum power is supplied. The study provides essential guidelines for designing the stiffness and damping of a system to eliminate its vibration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study is related to the vibration transmission properties of a system with stiffness and damping. It aims to investigate the effect of stiffness and damping on the vibration of a single-isolation system based on the power flow approach. A power flow mode method based on the real part of the power is presented, implying the time-average energy flowing into the system. It is future applied to study the effect of stiffness and damping, making it possible to define the energy input, transmission, and dissipation with the vary of stiffness and damping. The results show that the maximum power is supplied when the excitation frequency is equal to the undamped natural frequency. If the excitation is at a fixed frequency, there exists damping or stiffness that the maximum power is supplied. The study provides essential guidelines for designing the stiffness and damping of a system to eliminate its vibration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study is related to the vibration transmission properties of a system with stiffness and damping. It aims to investigate the effect of stiffness and damping on the vibration of a single-isolation system based on the power flow approach. A power flow mode method based on the real part of the power is presented, implying the time-average energy flowing into the system. It is future applied to study the effect of stiffness and damping, making it possible to define the energy input, transmission, and dissipation with the vary of stiffness and damping. The results show that the maximum power is supplied when the excitation frequency is equal to the undamped natural frequency. If the excitation is at a fixed frequency, there exists damping or stiffness that the maximum power is supplied. The study provides essential guidelines for designing the stiffness and damping of a system to eliminate its vibration.",
"fno": "008800a284",
"keywords": [
"Damping",
"Rigidity",
"Vibration Isolation",
"Vibrations",
"Damping Effect",
"Energy Dissipation",
"Energy Input",
"Excitation Frequency",
"Power Flow Mode Method",
"Single Isolation System",
"Stiffness Effect",
"Time Average Energy Flow",
"Undamped Natural Frequency",
"Vibration System",
"Vibration Transmission Properties",
"Damping",
"Vibrations",
"Electrical Engineering",
"Automation",
"Springs",
"Load Flow",
"Guidelines",
"Vibration System",
"Power Flow",
"Stiffness",
"Damping"
],
"authors": [
{
"affiliation": "Shanghai Research Institute of Materials,Hongkou Shanghai,China",
"fullName": "Xiujie Jiang",
"givenName": "Xiujie",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Research Institute of Materials,Hongkou Shanghai,China",
"fullName": "Yan Liu",
"givenName": "Yan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Research Institute of Materials,Hongkou Shanghai,China",
"fullName": "Qiutong Li",
"givenName": "Qiutong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai Polytechnic University,Faculty of Engineering,Pudong. Shanghai,China",
"fullName": "Huan Liu",
"givenName": "Huan",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ifeea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "284-288",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0088-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "008800a279",
"articleId": "1LHcBwtdQas",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "008800a289",
"articleId": "1LHcKH9ZKak",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ecmsm/2017/5582/0/07945874",
"title": "Self-adapting control structure for active damping of the servomechanism residual vibration",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2017/07945874/12OmNs0C9Cc",
"parentPublication": {
"id": "proceedings/ecmsm/2017/5582/0",
"title": "2017 IEEE International Workshop of Electronics, Control, Measurement, Signals and their Application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/m2vip/1997/8025/0/80250038",
"title": "An active vibration damping system of a driver's seat for off-road vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/m2vip/1997/80250038/12OmNzZ5oiL",
"parentPublication": {
"id": "proceedings/m2vip/1997/8025/0",
"title": "Mechatronics and Machine Vision in Practice, Annual Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2012/4639/0/4639a647",
"title": "Studies on Vibration Control of Beam by Damping Material",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2012/4639a647/12OmNzaQozE",
"parentPublication": {
"id": "proceedings/cdciem/2012/4639/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2010/4212/1/4212a683",
"title": "Estimation of Damping Matrices of Cable-Stayed Bridges Based on Known Data of Modal Damping",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/4212a683/12OmNzmclUx",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a118",
"title": "Comparison of Wake Oscillator Models with Different Damping Terms",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a118/1APq9yfO3U4",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedcs/2022/5541/0/554100a218",
"title": "Experimental Study on Influence of Tension on Self-Damping Characteristics of Large Span Transmission Lines",
"doi": null,
"abstractUrl": "/proceedings-article/icedcs/2022/554100a218/1JC1owBCTDi",
"parentPublication": {
"id": "proceedings/icedcs/2022/5541/0",
"title": "2022 International Conference on Electronics and Devices, Computational Science (ICEDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmiae/2022/7396/0/739600a011",
"title": "Research on Composite Vibration Damping System Based on Adaptive Gasket",
"doi": null,
"abstractUrl": "/proceedings-article/icmiae/2022/739600a011/1JgrIFnk1nW",
"parentPublication": {
"id": "proceedings/icmiae/2022/7396/0",
"title": "2022 International Conference on Manufacturing, Industrial Automation and Electronics (ICMIAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2021/0088/0/008800a238",
"title": "Effect of gap on self-damping characteristics of Large Span Transmission Lines",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2021/008800a238/1LHcHnaBGNy",
"parentPublication": {
"id": "proceedings/ifeea/2021/0088/0",
"title": "2021 8th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2021/0088/0/008800a260",
"title": "Finite Element Analysis of Dynamic Characteristics of Large Span Transmission Line",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2021/008800a260/1LHcOVl6KUU",
"parentPublication": {
"id": "proceedings/ifeea/2021/0088/0",
"title": "2021 8th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2020/9627/0/962700a161",
"title": "Research on the Influence of Throttle Valve Stiffness on Damping Characteristics of Shock Absorber",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2020/962700a161/1rvCEQsrkhG",
"parentPublication": {
"id": "proceedings/ifeea/2020/9627/0",
"title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC943Fw",
"doi": "10.1109/VR.2015.7223345",
"title": "Visual-olfactory immersive environment for product evaluation",
"normalizedTitle": "Visual-olfactory immersive environment for product evaluation",
"abstract": "Today smells are used for communicating information about products as household cleaners and food. However, smells can be also applied to any kind of products. Several researches have focused on integrating smells in virtual environments. The research questions addressed in this work concern whether Virtual Prototypes, including the sense of smell, can be used for evaluating products as effectively as studies performed in real environments, and whether smells can increase the users' sense of presence in the virtual environment. For this purpose, an experimental framework including a wearable olfactory display has been developed, and experimental tests have been performed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Today smells are used for communicating information about products as household cleaners and food. However, smells can be also applied to any kind of products. Several researches have focused on integrating smells in virtual environments. The research questions addressed in this work concern whether Virtual Prototypes, including the sense of smell, can be used for evaluating products as effectively as studies performed in real environments, and whether smells can increase the users' sense of presence in the virtual environment. For this purpose, an experimental framework including a wearable olfactory display has been developed, and experimental tests have been performed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Today smells are used for communicating information about products as household cleaners and food. However, smells can be also applied to any kind of products. Several researches have focused on integrating smells in virtual environments. The research questions addressed in this work concern whether Virtual Prototypes, including the sense of smell, can be used for evaluating products as effectively as studies performed in real environments, and whether smells can increase the users' sense of presence in the virtual environment. For this purpose, an experimental framework including a wearable olfactory display has been developed, and experimental tests have been performed.",
"fno": "07223345",
"keywords": [
"Olfactory",
"Virtual Environments",
"Prototypes",
"Virtual Prototyping",
"Washing Machines",
"Mechanical Engineering",
"Virtual Reality",
"Olfactory Display",
"Wearable Device",
"Presence"
],
"authors": [
{
"affiliation": "Department of Mechanical Engineering, Politecnico di Milano, Italy",
"fullName": "Marina Carulli",
"givenName": "Marina",
"surname": "Carulli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical Engineering, Politecnico di Milano, Italy",
"fullName": "Monica Bordegoni",
"givenName": "Monica",
"surname": "Bordegoni",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mechanical Engineering, Politecnico di Milano, Italy",
"fullName": "Umberto Cugini",
"givenName": "Umberto",
"surname": "Cugini",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "161-162",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223344",
"articleId": "12OmNy2agQe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223346",
"articleId": "12OmNvjyxwr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2007/0905/0/04161021",
"title": "Improvement of olfactory display using solenoid valves",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161021/12OmNqHItu5",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549409",
"title": "Visual-olfactory presentation system using a miniaturized olfactory display based on SAW streaming and electroosmotic pumps",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549409/12OmNwDSduJ",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759464",
"title": "Olfactory display using a miniaturized pump and a SAW atomizer for presenting low-volatile scents",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759464/12OmNy3AgAZ",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444817",
"title": "Visual-olfactory display using olfactory sensory map",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444817/12OmNzd7bip",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642346",
"title": "Audio-Visual-Olfactory Resource Allocation for Tri-modal Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642346/17PYElfKq78",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a057",
"title": "Using Olfactory Stimuli in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a057/1cJ7zFkTA7m",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089617",
"title": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089617/1jIxfcDz7Ak",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090609",
"title": "Exploring Effect Of Different External Stimuli On Body Association In VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090609/1jIxuOtbTAQ",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a490",
"title": "Investigating Individual Differences in Olfactory Adaptation to Pulse Ejection Odor Display by Scaling Olfaction Sensitivity of Intensity",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a490/1tnXnAd9AK4",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzgwmQK",
"doi": "10.1109/VR.2016.7504712",
"title": "Olfactory display using surface acoustic wave device and micropumps for wearable applications",
"normalizedTitle": "Olfactory display using surface acoustic wave device and micropumps for wearable applications",
"abstract": "Olfaction is expected to provide reality and a sense of immersion in multimedia contents. Therefore, an olfactory display, a gadget to present scents to one or more user(s), has been developed. A wearable olfactory display has advantage from the viewpoint of reducing the odorant diffusion into the atmosphere and is suitable for virtual reality applications. In this study, we developed a portable olfactory display using surface acoustic wave (SAW) device and micropumps. In the experiment using quartz crystal microbalance (QCM) gas sensor, we confirmed that the olfactory display can present the odorant with intended intensity.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Olfaction is expected to provide reality and a sense of immersion in multimedia contents. Therefore, an olfactory display, a gadget to present scents to one or more user(s), has been developed. A wearable olfactory display has advantage from the viewpoint of reducing the odorant diffusion into the atmosphere and is suitable for virtual reality applications. In this study, we developed a portable olfactory display using surface acoustic wave (SAW) device and micropumps. In the experiment using quartz crystal microbalance (QCM) gas sensor, we confirmed that the olfactory display can present the odorant with intended intensity.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Olfaction is expected to provide reality and a sense of immersion in multimedia contents. Therefore, an olfactory display, a gadget to present scents to one or more user(s), has been developed. A wearable olfactory display has advantage from the viewpoint of reducing the odorant diffusion into the atmosphere and is suitable for virtual reality applications. In this study, we developed a portable olfactory display using surface acoustic wave (SAW) device and micropumps. In the experiment using quartz crystal microbalance (QCM) gas sensor, we confirmed that the olfactory display can present the odorant with intended intensity.",
"fno": "07504712",
"keywords": [
"Olfactory",
"Micropumps",
"Surface Acoustic Wave Devices",
"Liquids",
"Virtual Reality",
"Surface Acoustic Waves",
"Compounds",
"QCM Gas Sensor",
"Olfactory Display SAW",
"Micropump"
],
"authors": [
{
"affiliation": "Interdisciplinary graduate school of science and engineering, Tokyo Institute of Technology, Japan",
"fullName": "Kazuki Hashimoto",
"givenName": "Kazuki",
"surname": "Hashimoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Interdisciplinary graduate school of science and engineering, Tokyo Institute of Technology, Japan",
"fullName": "Takamichi Nakamoto",
"givenName": "Takamichi",
"surname": "Nakamoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "179-180",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504711",
"articleId": "12OmNyKJie9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504713",
"articleId": "12OmNAS9zzO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811065",
"title": "Demonstration of Improved Olfactory Display using Rapidly-Switching Solenoid Valves",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811065/12OmNAR1aSY",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161021",
"title": "Improvement of olfactory display using solenoid valves",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161021/12OmNqHItu5",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811062",
"title": "Odor Presentation with a Vivid Sense of Reality: Incorporating Fluid Dynamics Simulation into Olfactory Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811062/12OmNs0C9X2",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2013/5089/0/5089a099",
"title": "Olfactory Measurement System to Quantify the Ability to Smell Using Pulse Ejection",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2013/5089a099/12OmNvJXeDw",
"parentPublication": {
"id": "proceedings/ichi/2013/5089/0",
"title": "2013 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549409",
"title": "Visual-olfactory presentation system using a miniaturized olfactory display based on SAW streaming and electroosmotic pumps",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549409/12OmNwDSduJ",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444817",
"title": "Visual-olfactory display using olfactory sensory map",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444817/12OmNzd7bip",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446484",
"title": "Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446484/13bd1fdV4kM",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a057",
"title": "Using Olfactory Stimuli in Virtual Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a057/1cJ7zFkTA7m",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089617",
"title": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089617/1jIxfcDz7Ak",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJbU8KWWTS",
"doi": "10.1109/VR51125.2022.00067",
"title": "Simulating Olfactory Cocktail Party Effect in VR: A Multi-odor Display Approach Based on Attention",
"normalizedTitle": "Simulating Olfactory Cocktail Party Effect in VR: A Multi-odor Display Approach Based on Attention",
"abstract": "Odor display has been a popular approach in virtual reality (VR) to enhance users’ multi-sensory experience. The existing multi-odor presentation methods in VR are mostly based on spatiality of scent sources to produce mixed scents, which will possibly compromise users’ olfactory experience because humans normally have poor ability to analyze distinct odorant components from a mixture. To tackle this problem, we present a VR multi-odor display approach that dynamically changes the intensity combinations of different scent sources in the virtual environment according to the user’s attention, hence simulating a virtual cocktail party effect of smell. We acquire the user’s gaze information as attention from the eye-tracking sensors embedded in the head mounted display (HMD), and increase the display intensity of the scent that the user is focusing on to simulate the cocktail party effect of smell, enabling the user to distinguish their desired scent source. We conducted a user study to validate the perception and experience of 2 ways of intensity settings in response to the user’s attention shift, which were a strong level of focused scent mixed with weak levels of non-focused scents and strong focused scent only. The results showed that both of the two intensity settings were able to improve olfactory experience in VR compared to the non-dynamic odor display method. Meanwhile, only the method of presenting strengthened focused scent while maintaining the weaker mixture of background scents showed significant improvement on simulating the olfactory cocktail party effect by giving the users an enhanced sense of their own olfactory sensitivity.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Odor display has been a popular approach in virtual reality (VR) to enhance users’ multi-sensory experience. The existing multi-odor presentation methods in VR are mostly based on spatiality of scent sources to produce mixed scents, which will possibly compromise users’ olfactory experience because humans normally have poor ability to analyze distinct odorant components from a mixture. To tackle this problem, we present a VR multi-odor display approach that dynamically changes the intensity combinations of different scent sources in the virtual environment according to the user’s attention, hence simulating a virtual cocktail party effect of smell. We acquire the user’s gaze information as attention from the eye-tracking sensors embedded in the head mounted display (HMD), and increase the display intensity of the scent that the user is focusing on to simulate the cocktail party effect of smell, enabling the user to distinguish their desired scent source. We conducted a user study to validate the perception and experience of 2 ways of intensity settings in response to the user’s attention shift, which were a strong level of focused scent mixed with weak levels of non-focused scents and strong focused scent only. The results showed that both of the two intensity settings were able to improve olfactory experience in VR compared to the non-dynamic odor display method. Meanwhile, only the method of presenting strengthened focused scent while maintaining the weaker mixture of background scents showed significant improvement on simulating the olfactory cocktail party effect by giving the users an enhanced sense of their own olfactory sensitivity.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Odor display has been a popular approach in virtual reality (VR) to enhance users’ multi-sensory experience. The existing multi-odor presentation methods in VR are mostly based on spatiality of scent sources to produce mixed scents, which will possibly compromise users’ olfactory experience because humans normally have poor ability to analyze distinct odorant components from a mixture. To tackle this problem, we present a VR multi-odor display approach that dynamically changes the intensity combinations of different scent sources in the virtual environment according to the user’s attention, hence simulating a virtual cocktail party effect of smell. We acquire the user’s gaze information as attention from the eye-tracking sensors embedded in the head mounted display (HMD), and increase the display intensity of the scent that the user is focusing on to simulate the cocktail party effect of smell, enabling the user to distinguish their desired scent source. We conducted a user study to validate the perception and experience of 2 ways of intensity settings in response to the user’s attention shift, which were a strong level of focused scent mixed with weak levels of non-focused scents and strong focused scent only. The results showed that both of the two intensity settings were able to improve olfactory experience in VR compared to the non-dynamic odor display method. Meanwhile, only the method of presenting strengthened focused scent while maintaining the weaker mixture of background scents showed significant improvement on simulating the olfactory cocktail party effect by giving the users an enhanced sense of their own olfactory sensitivity.",
"fno": "961700a474",
"keywords": [
"Chemioception",
"Electronic Noses",
"Gaze Tracking",
"Helmet Mounted Displays",
"Nondynamic Odor Display Method",
"Background Scents",
"Olfactory Cocktail Party Effect",
"Olfactory Sensitivity",
"Virtual Reality Environment",
"Multisensory Experience",
"Distinct Odorant Components",
"VR Multiodor Display Approach",
"Intensity Combinations",
"Scent Sources",
"Multiodor Presentation Methods",
"HMD",
"Head Mounted Display",
"Sensitivity",
"Three Dimensional Displays",
"Conferences",
"Olfactory",
"Virtual Environments",
"Focusing",
"Resists",
"Virtual Reality",
"Odor Presentation",
"Attention"
],
"authors": [
{
"affiliation": "The University of Tokyo,Graduate School of Frontier Sciences",
"fullName": "Shangyin Zou",
"givenName": "Shangyin",
"surname": "Zou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Graduate School of Frontier Sciences",
"fullName": "Xianyin Hu",
"givenName": "Xianyin",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Graduate School of Frontier Sciences",
"fullName": "Yuki Ban",
"givenName": "Yuki",
"surname": "Ban",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo,Graduate School of Frontier Sciences",
"fullName": "Shin’ichi Warisawa",
"givenName": "Shin’ichi",
"surname": "Warisawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "474-482",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "961700a464",
"articleId": "1CJc9xfqBSo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a483",
"articleId": "1CJcsRpGDQI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ichi/2013/5089/0/5089a099",
"title": "Olfactory Measurement System to Quantify the Ability to Smell Using Pulse Ejection",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2013/5089a099/12OmNvJXeDw",
"parentPublication": {
"id": "proceedings/ichi/2013/5089/0",
"title": "2013 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759464",
"title": "Olfactory display using a miniaturized pump and a SAW atomizer for presenting low-volatile scents",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759464/12OmNy3AgAZ",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/saint/2010/4107/0/4107a001",
"title": "Development of a High-Performance Olfactory Display and Measurement of Olfactory Characteristics for Pulse Ejections",
"doi": null,
"abstractUrl": "/proceedings-article/saint/2010/4107a001/12OmNyrIawk",
"parentPublication": {
"id": "proceedings/saint/2010/4107/0",
"title": "2010 10th IEEE/IPSJ International Symposium on Applications and the Internet",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444817",
"title": "Visual-olfactory display using olfactory sensory map",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444817/12OmNzd7bip",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446484",
"title": "Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446484/13bd1fdV4kM",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/01/mcg2008010075",
"title": "Cooking Up an Interactive Olfactory Game Display",
"doi": null,
"abstractUrl": "/magazine/cg/2008/01/mcg2008010075/13rRUwvT9lE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a490",
"title": "Investigating Individual Differences in Olfactory Adaptation to Pulse Ejection Odor Display by Scaling Olfaction Sensitivity of Intensity",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a490/1tnXnAd9AK4",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a279",
"title": "Does Virtual Odor Representation Influence the Perception of Olfactory Intensity and Directionality in VR?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a279/1tuAlZRpf6E",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartiot/2021/4511/0/451100a329",
"title": "The Odor Characterizations and Interactive Olfactory Display: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/smartiot/2021/451100a329/1xDQb2bELm0",
"parentPublication": {
"id": "proceedings/smartiot/2021/4511/0",
"title": "2021 IEEE International Conference on Smart Internet of Things (SmartIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxfcDz7Ak",
"doi": "10.1109/VR46266.2020.00094",
"title": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation",
"normalizedTitle": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation",
"abstract": "It is important to include an olfactory cue to enhance the reality in the virtual environment. We have developed the virtual olfactory environment where a user searches for an odor source. The virtual olfactory environment was prepared using computational fluid dynamics calculation. It enables us to have the dynamic odor concentration distribution even if we have complicated obstacles in the virtual environment. Moreover, we developed the wearable olfactory display made up of multiple micro dispensers and SAW (Surface Acoustic Wave) device so that the rapid switching of the smells could be achieved. The wearable olfactory display was attached beneath a head mount display to present a smell quickly. We made the virtual environment of the two-story building where four rooms were located at each floor. A user searched for the source of smoke smell located at one room among four ones at the second floor since we simulated the fire at the early stage. A half of the users could reach the correct source locations in the experiment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is important to include an olfactory cue to enhance the reality in the virtual environment. We have developed the virtual olfactory environment where a user searches for an odor source. The virtual olfactory environment was prepared using computational fluid dynamics calculation. It enables us to have the dynamic odor concentration distribution even if we have complicated obstacles in the virtual environment. Moreover, we developed the wearable olfactory display made up of multiple micro dispensers and SAW (Surface Acoustic Wave) device so that the rapid switching of the smells could be achieved. The wearable olfactory display was attached beneath a head mount display to present a smell quickly. We made the virtual environment of the two-story building where four rooms were located at each floor. A user searched for the source of smoke smell located at one room among four ones at the second floor since we simulated the fire at the early stage. A half of the users could reach the correct source locations in the experiment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is important to include an olfactory cue to enhance the reality in the virtual environment. We have developed the virtual olfactory environment where a user searches for an odor source. The virtual olfactory environment was prepared using computational fluid dynamics calculation. It enables us to have the dynamic odor concentration distribution even if we have complicated obstacles in the virtual environment. Moreover, we developed the wearable olfactory display made up of multiple micro dispensers and SAW (Surface Acoustic Wave) device so that the rapid switching of the smells could be achieved. The wearable olfactory display was attached beneath a head mount display to present a smell quickly. We made the virtual environment of the two-story building where four rooms were located at each floor. A user searched for the source of smoke smell located at one room among four ones at the second floor since we simulated the fire at the early stage. A half of the users could reach the correct source locations in the experiment.",
"fno": "09089617",
"keywords": [
"Chemioception",
"Computational Fluid Dynamics",
"Computerised Instrumentation",
"Display Instrumentation",
"Surface Acoustic Wave Devices",
"Wearable Olfactory Display",
"Virtual Olfactory Environment",
"Computational Fluid Dynamics Calculation",
"Dynamic Odor Concentration Distribution",
"Microdispensers",
"SAW Device",
"Surface Acoustic Wave Device",
"Olfactory",
"Surface Acoustic Wave Devices",
"Liquids",
"Surface Acoustic Waves",
"Virtual Environments",
"Floors",
"Solid Modeling",
"Wearable Olfactory Display",
"CFD",
"Disaster Simulator",
"Micro Dispenser",
"SAW Atomizer"
],
"authors": [
{
"affiliation": "Tokyo Institute of Technology,Institute of Innovative Research",
"fullName": "Takamichi Nakamoto",
"givenName": "Takamichi",
"surname": "Nakamoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology,School of Engineering",
"fullName": "Tatsuya Hirasawa",
"givenName": "Tatsuya",
"surname": "Hirasawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology,School of Engineering",
"fullName": "Yukiko Hanyu",
"givenName": "Yukiko",
"surname": "Hanyu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "713-720",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089450",
"articleId": "1jIxe2m67ZK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089461",
"articleId": "1jIxbelqcbC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811065",
"title": "Demonstration of Improved Olfactory Display using Rapidly-Switching Solenoid Valves",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811065/12OmNAR1aSY",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811062",
"title": "Odor Presentation with a Vivid Sense of Reality: Incorporating Fluid Dynamics Simulation into Olfactory Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811062/12OmNs0C9X2",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549409",
"title": "Visual-olfactory presentation system using a miniaturized olfactory display based on SAW streaming and electroosmotic pumps",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549409/12OmNwDSduJ",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759464",
"title": "Olfactory display using a miniaturized pump and a SAW atomizer for presenting low-volatile scents",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759464/12OmNy3AgAZ",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504712",
"title": "Olfactory display using surface acoustic wave device and micropumps for wearable applications",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504712/12OmNzgwmQK",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446484",
"title": "Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446484/13bd1fdV4kM",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/01/mcg2008010075",
"title": "Cooking Up an Interactive Olfactory Game Display",
"doi": null,
"abstractUrl": "/magazine/cg/2008/01/mcg2008010075/13rRUwvT9lE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a474",
"title": "Simulating Olfactory Cocktail Party Effect in VR: A Multi-odor Display Approach Based on Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a474/1CJbU8KWWTS",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a241",
"title": "The Smell Engine: A system for artificial odor synthesis in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a241/1CJcfonMlRC",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAlZRpf6E",
"doi": "10.1109/VR50410.2021.00050",
"title": "Does Virtual Odor Representation Influence the Perception of Olfactory Intensity and Directionality in VR?",
"normalizedTitle": "Does Virtual Odor Representation Influence the Perception of Olfactory Intensity and Directionality in VR?",
"abstract": "Introducing olfactory display in the virtual reality (VR) system brings the immersive experience to new heights. However, it is intractable to simulate olfactory features (such as the intensity and the direction) with multiple levels. Visual stimuli have been proved to dominate human perception among multiple sensors in virtual environments. If visual stimuli can be used to guide the olfactory sense in VR, the design of the olfactory display can be simpler but still able to provide olfactory experience with more diversity. To understand the visual-olfactory effect on different olfactory characteristics, a portable olfactory display that can control the intensity and direction of odors was developed. An experimental study was conducted to investigate cross-modal human perception, i.e. how the visually virtual odor representation in VR influences human perception of real odor produced by the proposed olfactory display. The results showed that the perception of odor intensity and directionality can be modulated by visually virtual odor representation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Introducing olfactory display in the virtual reality (VR) system brings the immersive experience to new heights. However, it is intractable to simulate olfactory features (such as the intensity and the direction) with multiple levels. Visual stimuli have been proved to dominate human perception among multiple sensors in virtual environments. If visual stimuli can be used to guide the olfactory sense in VR, the design of the olfactory display can be simpler but still able to provide olfactory experience with more diversity. To understand the visual-olfactory effect on different olfactory characteristics, a portable olfactory display that can control the intensity and direction of odors was developed. An experimental study was conducted to investigate cross-modal human perception, i.e. how the visually virtual odor representation in VR influences human perception of real odor produced by the proposed olfactory display. The results showed that the perception of odor intensity and directionality can be modulated by visually virtual odor representation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Introducing olfactory display in the virtual reality (VR) system brings the immersive experience to new heights. However, it is intractable to simulate olfactory features (such as the intensity and the direction) with multiple levels. Visual stimuli have been proved to dominate human perception among multiple sensors in virtual environments. If visual stimuli can be used to guide the olfactory sense in VR, the design of the olfactory display can be simpler but still able to provide olfactory experience with more diversity. To understand the visual-olfactory effect on different olfactory characteristics, a portable olfactory display that can control the intensity and direction of odors was developed. An experimental study was conducted to investigate cross-modal human perception, i.e. how the visually virtual odor representation in VR influences human perception of real odor produced by the proposed olfactory display. The results showed that the perception of odor intensity and directionality can be modulated by visually virtual odor representation.",
"fno": "255600a279",
"keywords": [
"Chemioception",
"Display Instrumentation",
"Neurophysiology",
"Virtual Reality",
"Visual Stimuli",
"Olfactory Sense",
"VR",
"Olfactory Experience",
"Visual Olfactory Effect",
"Different Olfactory Characteristics",
"Portable Olfactory Display",
"Cross Modal Human Perception",
"Visually Virtual Odor Representation",
"Odor Intensity",
"Virtual Odor Representation Influence",
"Olfactory Intensity",
"Virtual Reality System",
"Olfactory Features",
"Virtual Environments",
"Visualization",
"Three Dimensional Displays",
"Olfactory",
"Virtual Environments",
"Prototypes",
"Immersive Experience",
"User Interfaces",
"Virtual Reality",
"Olfactory Display Human Centered Computing Virtual Reality"
],
"authors": [
{
"affiliation": "National Tsing Hua University,Department of Computer Science",
"fullName": "Shou-En Tsai",
"givenName": "Shou-En",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Cheng Kung University,Department of Computer Science and Information Engineering",
"fullName": "Wan-Lun Tsai",
"givenName": "Wan-Lun",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Department of Computer Science",
"fullName": "Tse-Yu Pan",
"givenName": "Tse-Yu",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CityChaser",
"fullName": "Chia-Ming Kuo",
"givenName": "Chia-Ming",
"surname": "Kuo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Tsing Hua University,Department of Computer Science",
"fullName": "Min-Chun Hu",
"givenName": "Min-Chun",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "279-285",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a270",
"articleId": "1tuAySECsz6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a286",
"articleId": "1tuAr7cnsYw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2009/3943/0/04811062",
"title": "Odor Presentation with a Vivid Sense of Reality: Incorporating Fluid Dynamics Simulation into Olfactory Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811062/12OmNs0C9X2",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240199",
"title": "Wearable Olfactory Display: Using Odor in Outdoor Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240199/12OmNwK7o3V",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811016",
"title": "Selection Method of Odor Components for Olfactory Display Using Mass Spectrum Database",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811016/12OmNxzuMBP",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446484",
"title": "Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446484/13bd1fdV4kM",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040606",
"title": "Smelling Screen: Development and Evaluation of an Olfactory Display System for Presenting a Virtual Odor Source",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040606/13rRUILLkvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a474",
"title": "Simulating Olfactory Cocktail Party Effect in VR: A Multi-odor Display Approach Based on Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a474/1CJbU8KWWTS",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089617",
"title": "Virtual environment with smell using wearable olfactory display and computational fluid dynamics simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089617/1jIxfcDz7Ak",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a490",
"title": "Investigating Individual Differences in Olfactory Adaptation to Pulse Ejection Odor Display by Scaling Olfaction Sensitivity of Intensity",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a490/1tnXnAd9AK4",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartiot/2021/4511/0/451100a329",
"title": "The Odor Characterizations and Interactive Olfactory Display: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/smartiot/2021/451100a329/1xDQb2bELm0",
"parentPublication": {
"id": "proceedings/smartiot/2021/4511/0",
"title": "2021 IEEE International Conference on Smart Internet of Things (SmartIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCbU3aT",
"title": "2014 IEEE 22nd International Requirements Engineering Conference (RE)",
"acronym": "re",
"groupId": "1000630",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNviHKnc",
"doi": "10.1109/RE.2014.6912244",
"title": "Towards a situation awareness design to improve visually impaired orientation in unfamiliar buildings: Requirements elicitation study",
"normalizedTitle": "Towards a situation awareness design to improve visually impaired orientation in unfamiliar buildings: Requirements elicitation study",
"abstract": "Requirements elicitation can be a challenging process in many systems. This challenge can be greater with a non-standard user population, such as visually impaired users. In this work, we report our experience and results of eliciting user requirements for a situation awareness indoor orientation system dedicated to the visually impaired. We elicited our initial system requirements through three different studies that focus on users along with orientation and mobility instructors. Also, we performed a knowledge elicitation through our studies to formulate our system's situation awareness requirements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Requirements elicitation can be a challenging process in many systems. This challenge can be greater with a non-standard user population, such as visually impaired users. In this work, we report our experience and results of eliciting user requirements for a situation awareness indoor orientation system dedicated to the visually impaired. We elicited our initial system requirements through three different studies that focus on users along with orientation and mobility instructors. Also, we performed a knowledge elicitation through our studies to formulate our system's situation awareness requirements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Requirements elicitation can be a challenging process in many systems. This challenge can be greater with a non-standard user population, such as visually impaired users. In this work, we report our experience and results of eliciting user requirements for a situation awareness indoor orientation system dedicated to the visually impaired. We elicited our initial system requirements through three different studies that focus on users along with orientation and mobility instructors. Also, we performed a knowledge elicitation through our studies to formulate our system's situation awareness requirements.",
"fno": "06912244",
"keywords": [
"Interviews",
"Indoor Environments",
"Navigation",
"Assistive Technology",
"Buildings",
"Visualization",
"Auditory System",
"Qualitative Analysis",
"Visual Impairment",
"Requirements Elicitation",
"Situation Awareness Requirements",
"Assistive Technology"
],
"authors": [
{
"affiliation": "Computing and Information Science Program, Rochester Institute of Technology (RIT), New York 14623, USA",
"fullName": "Abdulrhman Alkhanifer",
"givenName": "Abdulrhman",
"surname": "Alkhanifer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Software Engineering Department, Rochester Institute of Technology (RIT), New York 14623, USA",
"fullName": "Stephanie Ludi",
"givenName": "Stephanie",
"surname": "Ludi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "re",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "23-32",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-3031-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06912243",
"articleId": "12OmNCeK2ed",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06912245",
"articleId": "12OmNzICEVd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/reconfig/2008/3474/0/3474a001",
"title": "Reconfigurable PDA for the Visually Impaired Using FPGAs",
"doi": null,
"abstractUrl": "/proceedings-article/reconfig/2008/3474a001/12OmNBCZnSx",
"parentPublication": {
"id": "proceedings/reconfig/2008/3474/0",
"title": "Reconfigurable Computing and FPGAs, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsid/2018/3692/0/3692z031",
"title": "Tutorial T1A: Assistive Technology for Visually Impaired: Embedded & Vision Solutions",
"doi": null,
"abstractUrl": "/proceedings-article/vlsid/2018/3692z031/12OmNCtMM21",
"parentPublication": {
"id": "proceedings/vlsid/2018/3692/0",
"title": "2018 31st International Conference on VLSI Design and 2018 17th International Conference on Embedded Systems (VLSID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2015/7204/0/7204a208",
"title": "A New 3D Interaction Technique Accessible to the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2015/7204a208/12OmNqJ8tlc",
"parentPublication": {
"id": "proceedings/svr/2015/7204/0",
"title": "2015 XVII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a418",
"title": "Improving Indoor Mobility of the Visually Impaired with Depth-Based Spatial Sound",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a418/12OmNxRWIdk",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2014/04/mco2014040052",
"title": "New Opportunities for Computer Vision-Based Assistive Technology Systems for the Visually Impaired",
"doi": null,
"abstractUrl": "/magazine/co/2014/04/mco2014040052/13rRUwh80Os",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/03/07271156",
"title": "Guest Editorial: Haptic assistive technology for individuals who are visually impaired",
"doi": null,
"abstractUrl": "/journal/th/2015/03/07271156/13rRUyuNsx7",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679289",
"title": "Event Venue Navigation for Visually Impaired People",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679289/18XkgEGveJW",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2018/7447/0/744701a326",
"title": "English Writing Support for Japanese EFL Visually-Impaired Learners: A Preliminary Study",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2018/744701a326/19m3FXBkt68",
"parentPublication": {
"id": "proceedings/iiai-aai/2018/7447/0",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icore/2021/0210/0/021000a205",
"title": "PerSEEption: Mobile and Web Application Framework for Visually Impaired Individuals",
"doi": null,
"abstractUrl": "/proceedings-article/icore/2021/021000a205/1AqysLLMJ32",
"parentPublication": {
"id": "proceedings/icore/2021/0210/0",
"title": "2021 1st International Conference in Information and Computing Research (iCORE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400b026",
"title": "Basic Study on Evaluation Method of Orientation and Mobility Skills Consideration for Visually Impaired Persons Based on Brain Activity",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400b026/1jdDX7pg3za",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VsBTZW",
"doi": "10.1109/CVPR.2018.00387",
"title": "Vision-and-Language Navigation: Interpreting Visually-Grounded Navigation Instructions in Real Environments",
"normalizedTitle": "Vision-and-Language Navigation: Interpreting Visually-Grounded Navigation Instructions in Real Environments",
"abstract": "A robot that can carry out a natural-language instruction has been a dream since before the Jetsons cartoon series imagined a life of leisure mediated by a fleet of attentive robot helpers. It is a dream that remains stubbornly distant. However, recent advances in vision and language methods have made incredible progress in closely related areas. This is significant because a robot interpreting a natural-language navigation instruction on the basis of what it sees is carrying out a vision and language process that is similar to Visual Question Answering. Both tasks can be interpreted as visually grounded sequence-to-sequence translation problems, and many of the same methods are applicable. To enable and encourage the application of vision and language methods to the problem of interpreting visually-grounded navigation instructions, we present the Matter-port3D Simulator - a large-scale reinforcement learning environment based on real imagery [11]. Using this simulator, which can in future support a range of embodied vision and language tasks, we provide the first benchmark dataset for visually-grounded natural language navigation in real buildings - the Room-to-Room (R2R) dataset1.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A robot that can carry out a natural-language instruction has been a dream since before the Jetsons cartoon series imagined a life of leisure mediated by a fleet of attentive robot helpers. It is a dream that remains stubbornly distant. However, recent advances in vision and language methods have made incredible progress in closely related areas. This is significant because a robot interpreting a natural-language navigation instruction on the basis of what it sees is carrying out a vision and language process that is similar to Visual Question Answering. Both tasks can be interpreted as visually grounded sequence-to-sequence translation problems, and many of the same methods are applicable. To enable and encourage the application of vision and language methods to the problem of interpreting visually-grounded navigation instructions, we present the Matter-port3D Simulator - a large-scale reinforcement learning environment based on real imagery [11]. Using this simulator, which can in future support a range of embodied vision and language tasks, we provide the first benchmark dataset for visually-grounded natural language navigation in real buildings - the Room-to-Room (R2R) dataset1.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A robot that can carry out a natural-language instruction has been a dream since before the Jetsons cartoon series imagined a life of leisure mediated by a fleet of attentive robot helpers. It is a dream that remains stubbornly distant. However, recent advances in vision and language methods have made incredible progress in closely related areas. This is significant because a robot interpreting a natural-language navigation instruction on the basis of what it sees is carrying out a vision and language process that is similar to Visual Question Answering. Both tasks can be interpreted as visually grounded sequence-to-sequence translation problems, and many of the same methods are applicable. To enable and encourage the application of vision and language methods to the problem of interpreting visually-grounded navigation instructions, we present the Matter-port3D Simulator - a large-scale reinforcement learning environment based on real imagery [11]. Using this simulator, which can in future support a range of embodied vision and language tasks, we provide the first benchmark dataset for visually-grounded natural language navigation in real buildings - the Room-to-Room (R2R) dataset1.",
"fno": "642000d674",
"keywords": [
"Human Robot Interaction",
"Learning Artificial Intelligence",
"Mobile Robots",
"Natural Language Processing",
"Robot Vision",
"Visually Grounded Navigation Instructions",
"Language Tasks",
"Sequence To Sequence Translation Problems",
"Visual Question Answering",
"Language Process",
"Natural Language Navigation Instruction",
"Language Methods",
"Attentive Robot Helpers",
"Jetsons Cartoon Series",
"Natural Language Instruction",
"Visually Grounded Natural Language Navigation",
"Navigation",
"Task Analysis",
"Robots",
"Visualization",
"Cameras",
"Three Dimensional Displays",
"Natural Languages"
],
"authors": [
{
"affiliation": null,
"fullName": "Peter Anderson",
"givenName": "Peter",
"surname": "Anderson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qi Wu",
"givenName": "Qi",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Damien Teney",
"givenName": "Damien",
"surname": "Teney",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jake Bruce",
"givenName": "Jake",
"surname": "Bruce",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mark Johnson",
"givenName": "Mark",
"surname": "Johnson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Niko Sünderhauf",
"givenName": "Niko",
"surname": "Sünderhauf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ian Reid",
"givenName": "Ian",
"surname": "Reid",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stephen Gould",
"givenName": "Stephen",
"surname": "Gould",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Anton van den Hengel",
"givenName": "Anton",
"surname": "van den Hengel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3674-3683",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000d664",
"articleId": "17D45XDIXXI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000d684",
"articleId": "17D45VtKiyr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibe/2001/1423/0/00974434",
"title": "An intelligent assistant for navigation of visually impaired people",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2001/00974434/12OmNCu4ndg",
"parentPublication": {
"id": "proceedings/bibe/2001/1423/0",
"title": "Proceedings 2nd Annual IEEE International Symposium on Bioinformatics and Bioengineering (BIBE 2001)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e985",
"title": "VisualWord2Vec (Vis-W2V): Learning Visually Grounded Word Embeddings Using Abstract Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e985/12OmNx76TJO",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmi/2002/1834/0/18340105",
"title": "Towards Visually-Grounded Spoken Language Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/icmi/2002/18340105/12OmNxIRxVV",
"parentPublication": {
"id": "proceedings/icmi/2002/1834/0",
"title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200b645",
"title": "VLGrammar: Grounded Grammar Induction of Vision and Language",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200b645/1BmGBfsMybK",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5439",
"title": "Cross-modal Map Learning for Vision and Language Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5439/1H0MZqH9FAY",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5407",
"title": "Less is More: Generating Grounded Navigation Instructions from Landmarks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5407/1H1hxJiUhOw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/12/08986691",
"title": "Vision-Language Navigation Policy Learning and Adaptation",
"doi": null,
"abstractUrl": "/journal/tp/2021/12/08986691/1hed5w5d4WY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0737",
"title": "ALFRED: A Benchmark for Interpreting Grounded Instructions for Everyday Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0737/1m3nIzbUrDi",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0727",
"title": "Vision-Dialog Navigation by Exploring Cross-Modal Memory",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0727/1m3ooaBZ0k0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09488322",
"title": "Adversarial Reinforced Instruction Attacker for Robust Vision-Language Navigation",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09488322/1vhIavPG69W",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmFjj4jXva",
"doi": "10.1109/ICCV48922.2021.01581",
"title": "Auxiliary Tasks and Exploration Enable ObjectGoal Navigation",
"normalizedTitle": "Auxiliary Tasks and Exploration Enable ObjectGoal Navigation",
"abstract": "ObjectGoal Navigation (ObjectNav) is an embodied task wherein agents are to navigate to an object instance in an unseen environment. Prior works have shown that end-to-end ObjectNav agents that use vanilla visual and recurrent modules, e.g. a CNN+RNN, perform poorly due to overfitting and sample inefficiency. This has motivated current state-of-the-art methods to mix analytic and learned components and operate on explicit spatial maps of the environment. We instead re-enable a generic learned agent by adding auxiliary learning tasks and an exploration reward. Our agents achieve 24.5% success and 8.1% SPL, a 37% and 8% relative improvement over prior state-of-the-art, respectively, on the Habitat ObjectNav Challenge [35]. From our analysis, we propose that agents will act to simplify their visual inputs so as to smooth their RNN dynamics, and that auxiliary tasks reduce overfitting by minimizing effective RNN dimensionality; i.e. a performant ObjectNav agent that must maintain coherent plans over long horizons does so by learning smooth, low-dimensional recurrent dynamics. Site: joel99.github.io/objectnav/",
"abstracts": [
{
"abstractType": "Regular",
"content": "ObjectGoal Navigation (ObjectNav) is an embodied task wherein agents are to navigate to an object instance in an unseen environment. Prior works have shown that end-to-end ObjectNav agents that use vanilla visual and recurrent modules, e.g. a CNN+RNN, perform poorly due to overfitting and sample inefficiency. This has motivated current state-of-the-art methods to mix analytic and learned components and operate on explicit spatial maps of the environment. We instead re-enable a generic learned agent by adding auxiliary learning tasks and an exploration reward. Our agents achieve 24.5% success and 8.1% SPL, a 37% and 8% relative improvement over prior state-of-the-art, respectively, on the Habitat ObjectNav Challenge [35]. From our analysis, we propose that agents will act to simplify their visual inputs so as to smooth their RNN dynamics, and that auxiliary tasks reduce overfitting by minimizing effective RNN dimensionality; i.e. a performant ObjectNav agent that must maintain coherent plans over long horizons does so by learning smooth, low-dimensional recurrent dynamics. Site: joel99.github.io/objectnav/",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "ObjectGoal Navigation (ObjectNav) is an embodied task wherein agents are to navigate to an object instance in an unseen environment. Prior works have shown that end-to-end ObjectNav agents that use vanilla visual and recurrent modules, e.g. a CNN+RNN, perform poorly due to overfitting and sample inefficiency. This has motivated current state-of-the-art methods to mix analytic and learned components and operate on explicit spatial maps of the environment. We instead re-enable a generic learned agent by adding auxiliary learning tasks and an exploration reward. Our agents achieve 24.5% success and 8.1% SPL, a 37% and 8% relative improvement over prior state-of-the-art, respectively, on the Habitat ObjectNav Challenge [35]. From our analysis, we propose that agents will act to simplify their visual inputs so as to smooth their RNN dynamics, and that auxiliary tasks reduce overfitting by minimizing effective RNN dimensionality; i.e. a performant ObjectNav agent that must maintain coherent plans over long horizons does so by learning smooth, low-dimensional recurrent dynamics. Site: joel99.github.io/objectnav/",
"fno": "281200q6097",
"keywords": [
"Visualization",
"Computer Vision",
"Navigation",
"Task Analysis"
],
"authors": [
{
"affiliation": "Georgia Institute of Technology",
"fullName": "Joel Ye",
"givenName": "Joel",
"surname": "Ye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology",
"fullName": "Dhruv Batra",
"givenName": "Dhruv",
"surname": "Batra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Facebook AI Research",
"fullName": "Abhishek Das",
"givenName": "Abhishek",
"surname": "Das",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology",
"fullName": "Erik Wijmans",
"givenName": "Erik",
"surname": "Wijmans",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "16097-16106",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200q6087",
"articleId": "1BmJNEYstkk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200q6107",
"articleId": "1BmL4OyLuj6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200p5354",
"title": "THDA: Treasure Hunt Data Augmentation for Semantic Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5354/1BmJ4JXGXAs",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200p5671",
"title": "RobustNav: Towards Benchmarking Robustness in Embodied Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200p5671/1BmJL8Ucwh2",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4144",
"title": "CR-Fill: Generative Image Inpainting with Auxiliary Contextual Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4144/1BmKzxGOJeU",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600o4809",
"title": "Simple but Effective: CLIP Embeddings for Embodied AI",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600o4809/1H1llfiVtXa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5386",
"title": "Envedit: Environment Editing for Vision-and-Language Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5386/1H1n6u58BJS",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b104",
"title": "Structure-Encoding Auxiliary Tasks for Improved Visual Representation in Vision-and-Language Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b104/1KxUN8QrZ1C",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2019/3891/0/08925021",
"title": "Robust Emotion Navigation: Few-shot Visual Sentiment Analysis by Auxiliary Noisy Data",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2019/08925021/1fHFeo4qOs0",
"parentPublication": {
"id": "proceedings/aciiw/2019/3891/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0009",
"title": "Vision-Language Navigation With Self-Supervised Auxiliary Reasoning Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0009/1m3nTvLItDG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a717",
"title": "Auxiliary Tasks for Efficient Learning of Point-Goal Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a717/1uqGrMNPGBG",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900j863",
"title": "Pushing it out of the Way: Interactive Visual Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900j863/1yeJ7xA5QB2",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MeoElmyyEo",
"title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"acronym": "sitis",
"groupId": "10089803",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1MeoIXaUWQ0",
"doi": "10.1109/SITIS57111.2022.00029",
"title": "Visual-auditory substitution device for indoor navigation based on fast visual marker detection",
"normalizedTitle": "Visual-auditory substitution device for indoor navigation based on fast visual marker detection",
"abstract": "This paper proposes a new navigation device to assist visually impaired people reach a defined destination safely in the indoor environment. This approach based on visual-auditory substitution provides the user a 2D spatial sound perception of the destination and of nearby and dangerous obstacles. Visual markers are placed at several relevant locations to create a mesh of the building where each marker is visually accessible from another marker. A graph representation of markers locations and their connection to each other defines by a way finding algorithm the shortest path reach to the wished position. The navigation task is achieved by moving from visual marker to visual marker until the desired destination is reached. These markers can be used independently of any other system or in addition to other solutions based on geolocalisation and/or a digital building model. Moreover, further information can be associated to the markers, and therefore verbalize to the user for instance a temporary hazards, a door presence or any other usual displayed information. The passive visual markers enables to deploy easily and quickly a scalable and low-cost solution to \"signpost\" the environment for users. Combined with our realtime implemented obstacle detection, their analysis enables the navigational abilities of visually impaired people to be improved.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a new navigation device to assist visually impaired people reach a defined destination safely in the indoor environment. This approach based on visual-auditory substitution provides the user a 2D spatial sound perception of the destination and of nearby and dangerous obstacles. Visual markers are placed at several relevant locations to create a mesh of the building where each marker is visually accessible from another marker. A graph representation of markers locations and their connection to each other defines by a way finding algorithm the shortest path reach to the wished position. The navigation task is achieved by moving from visual marker to visual marker until the desired destination is reached. These markers can be used independently of any other system or in addition to other solutions based on geolocalisation and/or a digital building model. Moreover, further information can be associated to the markers, and therefore verbalize to the user for instance a temporary hazards, a door presence or any other usual displayed information. The passive visual markers enables to deploy easily and quickly a scalable and low-cost solution to \"signpost\" the environment for users. Combined with our realtime implemented obstacle detection, their analysis enables the navigational abilities of visually impaired people to be improved.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a new navigation device to assist visually impaired people reach a defined destination safely in the indoor environment. This approach based on visual-auditory substitution provides the user a 2D spatial sound perception of the destination and of nearby and dangerous obstacles. Visual markers are placed at several relevant locations to create a mesh of the building where each marker is visually accessible from another marker. A graph representation of markers locations and their connection to each other defines by a way finding algorithm the shortest path reach to the wished position. The navigation task is achieved by moving from visual marker to visual marker until the desired destination is reached. These markers can be used independently of any other system or in addition to other solutions based on geolocalisation and/or a digital building model. Moreover, further information can be associated to the markers, and therefore verbalize to the user for instance a temporary hazards, a door presence or any other usual displayed information. The passive visual markers enables to deploy easily and quickly a scalable and low-cost solution to \"signpost\" the environment for users. Combined with our realtime implemented obstacle detection, their analysis enables the navigational abilities of visually impaired people to be improved.",
"fno": "649500a259",
"keywords": [
"Graph Theory",
"Handicapped Aids",
"Indoor Navigation",
"Mobile Robots",
"Navigation",
"Path Planning",
"Auditory Substitution Device",
"Defined Destination",
"Fast Visual Marker Detection",
"Indoor Navigation",
"Markers Locations",
"Navigation Device",
"Passive Visual Markers",
"Visual Auditory Substitution",
"Visually Impaired People",
"Visualization",
"Indoor Navigation",
"Geology",
"Buildings",
"Hazards",
"Indoor Environment",
"Task Analysis",
"Auditory Sensory Substitution",
"Wearable Assistive Device",
"Navigation Aid",
"Obstacle Avoidance",
"Visual Impairment",
"Sonification",
"Visual Marker Detection"
],
"authors": [
{
"affiliation": "Univ. Bourgogne-Franche-Comté,ImViA EA 7535,Dijon,France",
"fullName": "Florian Scalvini",
"givenName": "Florian",
"surname": "Scalvini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. Bourgogne-Franche-Comté,LEAD CNRS UMR 5022,Dijon,France",
"fullName": "Camille Bordeau",
"givenName": "Camille",
"surname": "Bordeau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. Bourgogne-Franche-Comté,LEAD CNRS UMR 5022,Dijon,France",
"fullName": "Maxime Ambard",
"givenName": "Maxime",
"surname": "Ambard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. Bourgogne-Franche-Comté,ImViA EA 7535,Dijon,France",
"fullName": "Cyrille Migniot",
"givenName": "Cyrille",
"surname": "Migniot",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. Bourgogne-Franche-Comté,LEAD CNRS UMR 5022,Dijon,France",
"fullName": "Stéphane Argon",
"givenName": "Stéphane",
"surname": "Argon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. Bourgogne-Franche-Comté,ImViA EA 7535,Dijon,France",
"fullName": "Julien Dubois",
"givenName": "Julien",
"surname": "Dubois",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sitis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "259-266",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6495-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "649500a254",
"articleId": "1MeoFaJX6ko",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "649500a267",
"articleId": "1MeoMns2fNm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2007/0905/0/04161007",
"title": "Indoor Marker-based Localization Using Coded Seamless Pattern for Interior Decoration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161007/12OmNC0y5HW",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460264",
"title": "A visual marker for precise pose estimation based on a microlens array",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460264/12OmNC2OSLm",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671813",
"title": "Further stabilization of a microlens-array-based fiducial marker",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671813/12OmNCfAPL4",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810097",
"title": "Visual Marker Detection and Decoding in AR Systems: A Comparative Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810097/12OmNrHB1TZ",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2015/7947/0/07346707",
"title": "VISORV: Board reading, getting directions through Marker Detection for partially visually impaired people",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2015/07346707/12OmNvk7K5H",
"parentPublication": {
"id": "proceedings/ic3/2015/7947/0",
"title": "2015 Eighth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2011/4484/0/4484a007",
"title": "Stabilizing Marker-Based Visual Tracking Using Markers with Scattering Materials and Multiple Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2011/4484a007/12OmNwKoZdD",
"parentPublication": {
"id": "proceedings/cgiv/2011/4484/0",
"title": "2011 Eighth International Conference Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836466",
"title": "Mobile Augmented Reality Based on Invisible Marker",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836466/12OmNx7G5Tm",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161037",
"title": "A Nested Marker for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161037/12OmNy7yEcW",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2022/9633/0/963300a017",
"title": "LED Dynamic Marker and Tracking Algorithm for External Camera Positioning",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2022/963300a017/1Lz22xy43PW",
"parentPublication": {
"id": "proceedings/cse/2022/9633/0",
"title": "2022 IEEE 25th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2021/2463/0/246300a520",
"title": "Towards An Indoor Navigation System Using Monocular Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2021/246300a520/1wLcJvglTqw",
"parentPublication": {
"id": "proceedings/compsac/2021/2463/0",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ12QpKiS4",
"doi": "10.1109/VR.2019.8797903",
"title": "Hearing with Eyes in Virtual Reality",
"normalizedTitle": "Hearing with Eyes in Virtual Reality",
"abstract": "Sound and light signal propagation have similar physical properties. This provides inspiration for creating an audio-visual echolocation system, where light is mapped to the sound signal, visually representing auralization of the virtual environment (VE). Some mammals navigate using echolocation; however humans are less successful with this. To the authors' knowledge, it remains to be seen if sound propagation and its visualization have been implemented in a perceptually pleasant way and is used for navigation purposes in the VE. Therefore, the core novelty of this research is navigation with visualized echolocation signal using a cognitive mental mapping activity in the VE.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sound and light signal propagation have similar physical properties. This provides inspiration for creating an audio-visual echolocation system, where light is mapped to the sound signal, visually representing auralization of the virtual environment (VE). Some mammals navigate using echolocation; however humans are less successful with this. To the authors' knowledge, it remains to be seen if sound propagation and its visualization have been implemented in a perceptually pleasant way and is used for navigation purposes in the VE. Therefore, the core novelty of this research is navigation with visualized echolocation signal using a cognitive mental mapping activity in the VE.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sound and light signal propagation have similar physical properties. This provides inspiration for creating an audio-visual echolocation system, where light is mapped to the sound signal, visually representing auralization of the virtual environment (VE). Some mammals navigate using echolocation; however humans are less successful with this. To the authors' knowledge, it remains to be seen if sound propagation and its visualization have been implemented in a perceptually pleasant way and is used for navigation purposes in the VE. Therefore, the core novelty of this research is navigation with visualized echolocation signal using a cognitive mental mapping activity in the VE.",
"fno": "08797903",
"keywords": [
"Audio Signal Processing",
"Bioacoustics",
"Cognition",
"Hearing",
"Mechanoception",
"Virtual Reality",
"Sound Propagation",
"Navigation Purposes",
"Visualized Echolocation Signal",
"Cognitive Mental Mapping Activity",
"Virtual Reality",
"Audio Visual Echolocation System",
"Sound Signal",
"Auralization",
"Virtual Environment",
"Mammals",
"Physical Properties",
"VE",
"Light Signal Propagation",
"Visualization",
"Navigation",
"Reverberation",
"Virtual Environments",
"Three Dimensional Displays",
"Solid Modeling",
"Human Centered Computing",
"Visualization Design And Evaluation Methods",
"Sound",
"Echolocation",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "Amalie Rosenkvist",
"givenName": "Amalie",
"surname": "Rosenkvist",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "David Sebastian Eriksen",
"givenName": "David Sebastian",
"surname": "Eriksen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "Jeppe Koehlert",
"givenName": "Jeppe",
"surname": "Koehlert",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "Miicha Valimaa",
"givenName": "Miicha",
"surname": "Valimaa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "Mikkel Brogaard Vittrup",
"givenName": "Mikkel Brogaard",
"surname": "Vittrup",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "Anastasia Andreasen",
"givenName": "Anastasia",
"surname": "Andreasen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Design and Media Technology, Aalborg University, Copenhagen",
"fullName": "George Palamas",
"givenName": "George",
"surname": "Palamas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1349-1350",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798062",
"articleId": "1cJ0QAGmFc4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798005",
"articleId": "1cJ0JlRqqLm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549386",
"title": "Flexible spaces: A virtual step outside of reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549386/12OmNBOllfZ",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892331",
"title": "Advertising perception with immersive virtual reality devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892331/12OmNvk7JO0",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2080",
"title": "Navigation with Auditory Cues in a Virtual Environment",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2080/13rRUwInvi2",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08643846",
"title": "Auditory Feedback for Navigation with Echoes in Virtual Environments: Training Procedure and Orientation Strategies",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08643846/17PYEjJvAVJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a794",
"title": "An Investigation on the Relationship between Cybersickness and Heart Rate Variability When Navigating a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a794/1J7We4du3FC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a082",
"title": "WiM-Based Group Navigation for Collaborative Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a082/1KmFfzv6fWo",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797807",
"title": "Multi-Ray Jumping: Comprehensible Group Navigation for Collocated Users in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797807/1cJ0MXFzine",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798345",
"title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2021/3827/0/382700a137",
"title": "Vibrotactile feedback models to explore virtual reality without going round in circles",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2021/382700a137/1y4oKqbRCtq",
"parentPublication": {
"id": "proceedings/iv/2021/3827/0",
"title": "2021 25th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a364",
"title": "Positive Computing in Virtual Reality Industrial Training",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a364/1yeQPzAwwog",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ16WXXAVa",
"doi": "10.1109/VR.2019.8797914",
"title": "P-Reverb: Perceptual Characterization of Early and Late Reflections for Auditory Displays",
"normalizedTitle": "P-Reverb: Perceptual Characterization of Early and Late Reflections for Auditory Displays",
"abstract": "We introduce a novel, perceptually derived metric (P - Reverb) that relates the just-noticeable difference (JND) of the early sound field (also called early reflections) to the late sound field (known as late reflections or reverberation). Early and late reflections are crucial components of the sound field and provide multiple perceptual cues for auditory displays. We conduct two extensive user evaluations that relate the JNDs of early reflections and late reverberation in terms of the mean-free path of the environment and present a novel P - Reverb metric. Our metric is used to estimate dynamic reverberation characteristics efficiently in terms of important parameters like reverberation time (RT60). We show the numerical accuracy of our P - Reverb metric in estimating RT60. Finally, we use our metric to design an interactive sound propagation algorithm and demonstrate its effectiveness on various benchmarks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a novel, perceptually derived metric (P - Reverb) that relates the just-noticeable difference (JND) of the early sound field (also called early reflections) to the late sound field (known as late reflections or reverberation). Early and late reflections are crucial components of the sound field and provide multiple perceptual cues for auditory displays. We conduct two extensive user evaluations that relate the JNDs of early reflections and late reverberation in terms of the mean-free path of the environment and present a novel P - Reverb metric. Our metric is used to estimate dynamic reverberation characteristics efficiently in terms of important parameters like reverberation time (RT60). We show the numerical accuracy of our P - Reverb metric in estimating RT60. Finally, we use our metric to design an interactive sound propagation algorithm and demonstrate its effectiveness on various benchmarks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a novel, perceptually derived metric (P - Reverb) that relates the just-noticeable difference (JND) of the early sound field (also called early reflections) to the late sound field (known as late reflections or reverberation). Early and late reflections are crucial components of the sound field and provide multiple perceptual cues for auditory displays. We conduct two extensive user evaluations that relate the JNDs of early reflections and late reverberation in terms of the mean-free path of the environment and present a novel P - Reverb metric. Our metric is used to estimate dynamic reverberation characteristics efficiently in terms of important parameters like reverberation time (RT60). We show the numerical accuracy of our P - Reverb metric in estimating RT60. Finally, we use our metric to design an interactive sound propagation algorithm and demonstrate its effectiveness on various benchmarks.",
"fno": "08797914",
"keywords": [
"Auditory Displays",
"Reverberation",
"Late Reflections",
"Multiple Perceptual Cues",
"Auditory Displays",
"Early Reflections",
"Late Reverberation",
"Reverb Metric",
"Dynamic Reverberation Characteristics",
"Interactive Sound Propagation Algorithm",
"P Reverb",
"Early Sound Field",
"Late Sound Field",
"Reverberation",
"Measurement",
"Erbium",
"Rendering Computer Graphics",
"Ray Tracing",
"Auditory Displays"
],
"authors": [
{
"affiliation": "University of North Carolina, Chapel Hill",
"fullName": "Atul Rungta",
"givenName": "Atul",
"surname": "Rungta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina, Chapel Hill",
"fullName": "Nicholas Rewkowski",
"givenName": "Nicholas",
"surname": "Rewkowski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Roberta Klatzky",
"givenName": "Roberta",
"surname": "Klatzky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Maryland, College Park",
"fullName": "Dinesh Manocha",
"givenName": "Dinesh",
"surname": "Manocha",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "455-463",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798177",
"articleId": "1cJ13xpYvE4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797816",
"articleId": "1cJ0ZXuMnCM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2009/2353/0/04960502",
"title": "A blind speech enhancement algorithm for the suppression of late reverberation and noise",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04960502/12OmNBkP3xy",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480787",
"title": "Extending X3D with Perceptual Auditory Properties",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480787/12OmNC3Xhz8",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2015/0481/0/07394317",
"title": "Statistical modeling for suppression of late reverberation with inverse filtering for early reflections",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2015/07394317/12OmNqGA58J",
"parentPublication": {
"id": "proceedings/isspit/2015/0481/0",
"title": "2015 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1990/2083/0/00146393",
"title": "A numerical method for rendering spherical reflections",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1990/00146393/12OmNx2QULm",
"parentPublication": {
"id": "proceedings/visual/1990/2083/0",
"title": "1990 First IEEE Conference on Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2012/2120/0/06299338",
"title": "Evaluation of Realism of Dynamic Sound Space Using a Virtual Auditory Display",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2012/06299338/12OmNz4SOxH",
"parentPublication": {
"id": "proceedings/snpd/2012/2120/0",
"title": "2012 13th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel & Distributed Computing (SNPD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2014/8065/0/8065a240",
"title": "Ray-Traced Reflections in Real-Time Using Heuristic Based Hybrid Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2014/8065a240/12OmNzTYC4n",
"parentPublication": {
"id": "proceedings/sbgames/2014/8065/0",
"title": "2014 Brazilian Symposium on Computer Games and Digital Entertainment (SBGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/09/06803934",
"title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/03/v0253",
"title": "Perturbation Methods for Interactive Specular Reflections",
"doi": null,
"abstractUrl": "/journal/tg/2000/03/v0253/13rRUwj7coZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040567",
"title": "Aural Proxies and Directionally-Varying Reverberation for Interactive Sound Propagation in Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040567/13rRUxD9gXG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08643846",
"title": "Auditory Feedback for Navigation with Echoes in Virtual Environments: Training Procedure and Orientation Strategies",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08643846/17PYEjJvAVJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBKTPQ",
"doi": "10.1109/VR.2016.7504706",
"title": "Animated self-avatars for motor rehabilitation applications that are biomechanically accurate, low-latency and easy to use",
"normalizedTitle": "Animated self-avatars for motor rehabilitation applications that are biomechanically accurate, low-latency and easy to use",
"abstract": "The emerging use of self-avatars for physical and motor rehabilitation leads to specific requirements for their real-time animation that combine properties from the fields of computer graphics and of biomechanics. We present a method for animating a self-avatar in real-time that allows for high-fidelity representation of whole-body kinematics using anatomical and reproducible bone-segment definition. The method requires little setup time and has low motion-to-photon latency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The emerging use of self-avatars for physical and motor rehabilitation leads to specific requirements for their real-time animation that combine properties from the fields of computer graphics and of biomechanics. We present a method for animating a self-avatar in real-time that allows for high-fidelity representation of whole-body kinematics using anatomical and reproducible bone-segment definition. The method requires little setup time and has low motion-to-photon latency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The emerging use of self-avatars for physical and motor rehabilitation leads to specific requirements for their real-time animation that combine properties from the fields of computer graphics and of biomechanics. We present a method for animating a self-avatar in real-time that allows for high-fidelity representation of whole-body kinematics using anatomical and reproducible bone-segment definition. The method requires little setup time and has low motion-to-photon latency.",
"fno": "07504706",
"keywords": [
"Avatars",
"Animation",
"Real Time Systems",
"Kinematics",
"Knee",
"Biomechanics",
"Calibration",
"Self Avatar",
"Animation",
"Forward Kinematics",
"Rehabilitation"
],
"authors": [
{
"affiliation": "École de technologie supérieure, Montreal, Canada",
"fullName": "Mikael Dallaire-Côté",
"givenName": "Mikael",
"surname": "Dallaire-Côté",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "École de technologie supérieure, Montreal, Canada",
"fullName": "Philippe Charbonneau",
"givenName": "Philippe",
"surname": "Charbonneau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "École de technologie supérieure, Montreal, Canada",
"fullName": "Sara St-Pierre Côté",
"givenName": "Sara St-Pierre",
"surname": "Côté",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "École de technologie supérieure, Montreal, Canada",
"fullName": "Rachid Aissaoui",
"givenName": "Rachid",
"surname": "Aissaoui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "École de technologie supérieure, Montreal, Canada",
"fullName": "David R. Labbe",
"givenName": "David R.",
"surname": "Labbe",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "167-168",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504705",
"articleId": "12OmNrJ11ys",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504707",
"articleId": "12OmNx3HI96",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802113",
"title": "Automatic acquisition and animation of virtual avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670a051",
"title": "Introducing Avatarification: An Experimental Examination of How Avatars Influence Student Motivation",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670a051/12OmNvDqsPL",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a237",
"title": "Integrating Biomechanical and Animation Motion Capture Methods in the Production of Participant Specific, Scaled Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a237/17D45XeKgqk",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a666",
"title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042759",
"title": "Signing Avatars - Multimodal Challenges for Text-to-sign Generation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042759/1KOuYqnUIRa",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797907",
"title": "Digital Demons: Psychological Effects of Creating, and Engaging with, Virtual Avatars Representing Undesirable Aspects of the Self",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797907/1cJ1eAkUYNO",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a091",
"title": "MoveBox: Democratizing MoCap for the Microsoft Rocketbox Avatar Library",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a091/1qpzzqGXwA0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwGZNLp",
"doi": "10.1109/VR.2017.7892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"normalizedTitle": "Rapid one-shot acquisition of dynamic VR avatars",
"abstract": "We present a system for rapid acquisition of bespoke, animatable, full-body avatars including face texture and shape. A blendshape rig with a skeleton is used as a template for customization. Identity blendshapes are used to customize the body and face shape at the fitting stage, while animation blendshapes allow the face to be animated. The subject assumes a T-pose and a single snapshot is captured using a stereo RGB plus depth sensor rig. Our system automatically aligns a photo texture and fits the 3D shape of the face. The body shape is stylized according to body dimensions estimated from segmented depth. The face identity blendweights are optimised according to image-based facial landmarks, while a custom texture map for the face is generated by warping the input images to a reference texture according to the facial landmarks. The total capture and processing time is under 10 seconds and the output is a light-weight, game-engine-ready avatar which is recognizable as the subject. We demonstrate our system in a VR environment in which each user sees the other users' animated avatars through a VR headset with real-time audio-based facial animation and live body motion tracking, affording an enhanced level of presence and social engagement compared to generic avatars.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a system for rapid acquisition of bespoke, animatable, full-body avatars including face texture and shape. A blendshape rig with a skeleton is used as a template for customization. Identity blendshapes are used to customize the body and face shape at the fitting stage, while animation blendshapes allow the face to be animated. The subject assumes a T-pose and a single snapshot is captured using a stereo RGB plus depth sensor rig. Our system automatically aligns a photo texture and fits the 3D shape of the face. The body shape is stylized according to body dimensions estimated from segmented depth. The face identity blendweights are optimised according to image-based facial landmarks, while a custom texture map for the face is generated by warping the input images to a reference texture according to the facial landmarks. The total capture and processing time is under 10 seconds and the output is a light-weight, game-engine-ready avatar which is recognizable as the subject. We demonstrate our system in a VR environment in which each user sees the other users' animated avatars through a VR headset with real-time audio-based facial animation and live body motion tracking, affording an enhanced level of presence and social engagement compared to generic avatars.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a system for rapid acquisition of bespoke, animatable, full-body avatars including face texture and shape. A blendshape rig with a skeleton is used as a template for customization. Identity blendshapes are used to customize the body and face shape at the fitting stage, while animation blendshapes allow the face to be animated. The subject assumes a T-pose and a single snapshot is captured using a stereo RGB plus depth sensor rig. Our system automatically aligns a photo texture and fits the 3D shape of the face. The body shape is stylized according to body dimensions estimated from segmented depth. The face identity blendweights are optimised according to image-based facial landmarks, while a custom texture map for the face is generated by warping the input images to a reference texture according to the facial landmarks. The total capture and processing time is under 10 seconds and the output is a light-weight, game-engine-ready avatar which is recognizable as the subject. We demonstrate our system in a VR environment in which each user sees the other users' animated avatars through a VR headset with real-time audio-based facial animation and live body motion tracking, affording an enhanced level of presence and social engagement compared to generic avatars.",
"fno": "07892240",
"keywords": [
"Face",
"Avatars",
"Cameras",
"Three Dimensional Displays",
"Shape",
"Solid Modeling",
"Animation",
"Avatars",
"Capture",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Disney Research, UK",
"fullName": "Charles Malleson",
"givenName": "Charles",
"surname": "Malleson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Maggie Kosek",
"givenName": "Maggie",
"surname": "Kosek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Martin Klaudiny",
"givenName": "Martin",
"surname": "Klaudiny",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Ivan Huerta",
"givenName": "Ivan",
"surname": "Huerta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, CH",
"fullName": "Jean-Charles Bazin",
"givenName": "Jean-Charles",
"surname": "Bazin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, CH",
"fullName": "Alexander Sorkine-Hornung",
"givenName": "Alexander",
"surname": "Sorkine-Hornung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Walt Disney Imagineering, USA",
"fullName": "Mark Mine",
"givenName": "Mark",
"surname": "Mine",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Kenny Mitchell",
"givenName": "Kenny",
"surname": "Mitchell",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "131-140",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892239",
"articleId": "12OmNxWcHbV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892241",
"articleId": "12OmNxy4N0w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802113",
"title": "Automatic acquisition and animation of virtual avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a078",
"title": "The Development of a Facial Animation System Based on Performance and the Use of an RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a078/12OmNxV4iA1",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a098",
"title": "Detailed Human Avatars from Monocular Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a098/17D45Vw15t7",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08648222",
"title": "The Virtual Caliper: Rapid Creation of Metrically Accurate Avatars from 3D Measurements",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08648222/17QjJf0qqr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4508",
"title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600n3535",
"title": "I M Avatar: Implicit Morphable Head Avatars from Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600n3535/1H1j2BWBE2c",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a347",
"title": "Fully Automatic Blendshape Generation for Stylized Characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a347/1MNgXaINwAg",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798353",
"title": "Rapid 3D Avatar Creation System Using a Single Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798353/1cJ11TRykmY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300c382",
"title": "Textured Neural Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300c382/1gyrdPZ8U92",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNz2C1zq",
"doi": "10.1109/VR.2017.7892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"normalizedTitle": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"abstract": "In this demonstration, we showcase a system for rapid acquisition of bespoke avatars for each participant (subject) in a social VR environment is presented. For each subject, the system automatically customizes a parametric avatar model to match the captured subject by adjusting its overall height, body and face shape parameters and generating a custom face texture.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this demonstration, we showcase a system for rapid acquisition of bespoke avatars for each participant (subject) in a social VR environment is presented. For each subject, the system automatically customizes a parametric avatar model to match the captured subject by adjusting its overall height, body and face shape parameters and generating a custom face texture.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this demonstration, we showcase a system for rapid acquisition of bespoke avatars for each participant (subject) in a social VR environment is presented. For each subject, the system automatically customizes a parametric avatar model to match the captured subject by adjusting its overall height, body and face shape parameters and generating a custom face texture.",
"fno": "07892372",
"keywords": [
"Face",
"Avatars",
"Shape",
"Three Dimensional Displays",
"Cameras",
"Solid Modeling",
"Animation",
"Avatars",
"Capture",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Disney Research, UK",
"fullName": "Charles Malleson",
"givenName": "Charles",
"surname": "Malleson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Maggie Kosek",
"givenName": "Maggie",
"surname": "Kosek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Martin Klaudiny",
"givenName": "Martin",
"surname": "Klaudiny",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Ivan Huerta",
"givenName": "Ivan",
"surname": "Huerta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, CH",
"fullName": "Jean-Charles Bazin",
"givenName": "Jean-Charles",
"surname": "Bazin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, CH",
"fullName": "Alexander Sorkine-Hornung",
"givenName": "Alexander",
"surname": "Sorkine-Hornung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Walt Disney Imagineering, USA",
"fullName": "Mark Mine",
"givenName": "Mark",
"surname": "Mine",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Disney Research, UK",
"fullName": "Kenny Mitchell",
"givenName": "Kenny",
"surname": "Mitchell",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "447-448",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892371",
"articleId": "12OmNz61djR",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892373",
"articleId": "12OmNxG1ySA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802113",
"title": "Automatic acquisition and animation of virtual avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2013/5001/0/06655773",
"title": "Avatars Dance: Dynamic Control of Multiples 3D Articulated Characters through an Integration Layer",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655773/12OmNvnOwwM",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811044",
"title": "Crafting Personalized Facial Avatars Using Editable Portrait and Photograph Example",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811044/12OmNx7ouYf",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549424",
"title": "Rapid generation of personalized avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549424/12OmNyQGShm",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b626",
"title": "Eyemotion: Classifying Facial Expressions in VR Using Eye-Tracking Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b626/18j8FIomLfi",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0395",
"title": "gDNA: Towards Generative Detailed Neural Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0395/1H1kTUQK6Xe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798318",
"title": "Evaluating Teacher Avatar Appearances in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798318/1cJ0P8vBhhm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798353",
"title": "Rapid 3D Avatar Creation System Using a Single Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798353/1cJ11TRykmY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "14qdcP8Ivdv",
"title": "2018 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Vw15t7",
"doi": "10.1109/3DV.2018.00022",
"title": "Detailed Human Avatars from Monocular Video",
"normalizedTitle": "Detailed Human Avatars from Monocular Video",
"abstract": "We present a novel method for high detail-preserving human avatar creation from monocular video. A parameterized body model is refined and optimized to maximally resemble subjects from a video showing them from all sides. Our avatars feature a natural face, hairstyle, clothes with garment wrinkles, and high-resolution texture. Our paper contributes facial landmark and shading-based human body shape refinement, a semantic texture prior, and a novel texture stitching strategy, resulting in the most sophisticated-looking human avatars obtained from a single video to date. Numerous results show the robustness and versatility of our method. A user study illustrates its superiority over the state-of-the-art in terms of identity preservation, level of detail, realism, and overall user preference.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel method for high detail-preserving human avatar creation from monocular video. A parameterized body model is refined and optimized to maximally resemble subjects from a video showing them from all sides. Our avatars feature a natural face, hairstyle, clothes with garment wrinkles, and high-resolution texture. Our paper contributes facial landmark and shading-based human body shape refinement, a semantic texture prior, and a novel texture stitching strategy, resulting in the most sophisticated-looking human avatars obtained from a single video to date. Numerous results show the robustness and versatility of our method. A user study illustrates its superiority over the state-of-the-art in terms of identity preservation, level of detail, realism, and overall user preference.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel method for high detail-preserving human avatar creation from monocular video. A parameterized body model is refined and optimized to maximally resemble subjects from a video showing them from all sides. Our avatars feature a natural face, hairstyle, clothes with garment wrinkles, and high-resolution texture. Our paper contributes facial landmark and shading-based human body shape refinement, a semantic texture prior, and a novel texture stitching strategy, resulting in the most sophisticated-looking human avatars obtained from a single video to date. Numerous results show the robustness and versatility of our method. A user study illustrates its superiority over the state-of-the-art in terms of identity preservation, level of detail, realism, and overall user preference.",
"fno": "842500a098",
"keywords": [
"Avatars",
"Face Recognition",
"Image Texture",
"Video Signal Processing",
"Facial Landmark",
"Shading Based Human Body Shape Refinement",
"Texture Stitching Strategy",
"Garment Wrinkles",
"Natural Face",
"Parameterized Body Model",
"High Detail Preserving Human Avatar Creation",
"Monocular Video",
"Detailed Human Avatars",
"Identity Preservation",
"Novel Texture Stitching Strategy",
"Semantic Texture",
"High Resolution Texture",
"Shape",
"Three Dimensional Displays",
"Clothing",
"Avatars",
"Image Reconstruction",
"Adaptation Models",
"Biological System Modeling",
"Human Shape",
"3 D Reconstruction",
"Monocular"
],
"authors": [
{
"affiliation": null,
"fullName": "Thiemo Alldieck",
"givenName": "Thiemo",
"surname": "Alldieck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marcus Magnor",
"givenName": "Marcus",
"surname": "Magnor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Weipeng Xu",
"givenName": "Weipeng",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christian Theobalt",
"givenName": "Christian",
"surname": "Theobalt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gerard Pons-Moll",
"givenName": "Gerard",
"surname": "Pons-Moll",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "98-109",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8425-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "842500a089",
"articleId": "17D45X2fUGj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "842500a110",
"articleId": "17D45WrVfZF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391c300",
"title": "Detailed Full-Body Reconstructions of Moving People from Monocular RGB-D Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c300/12OmNC4wtFD",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549424",
"title": "Rapid generation of personalized avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549424/12OmNyQGShm",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4508",
"title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600s8632",
"title": "Neural Head Avatars from Monocular RGB Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600s8632/1H1htwlAaNa",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0438",
"title": "PINA: Learning a Personalized Implicit Neural Avatar from a Single RGB-D Video Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0438/1H1k1cLWWWI",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0395",
"title": "gDNA: Towards Generative Detailed Neural Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0395/1H1kTUQK6Xe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09209171",
"title": "MulayCap: Multi-Layer Human Performance Capture Using a Monocular Video Camera",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09209171/1nwbhfo8G52",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a322",
"title": "MonoClothCap: Towards Temporally Coherent Clothing Capture from Monocular RGB Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a322/1qyxk1bcV5S",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f147",
"title": "StylePeople: A Generative Model of Fullbody Human Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f147/1yeILFPUeE8",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a889",
"title": "Human Performance Capture from Monocular Video in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a889/1zWE8unArNC",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ11TRykmY",
"doi": "10.1109/VR.2019.8798353",
"title": "Rapid 3D Avatar Creation System Using a Single Depth Camera",
"normalizedTitle": "Rapid 3D Avatar Creation System Using a Single Depth Camera",
"abstract": "We present a rapid and fully automatic 3D avatar creation system that can produce personalized 3D avatars within two minutes using a single depth camera and a motorized turntable. The created 3D avatar is able to make all the details of facial expressions and whole body motions including fingers. To our best knowledge, it is the first completely automatic system that can generate realistic 3D avatars in the common 3D file format, which is ready for the direct use in virtual reality applications or various services.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a rapid and fully automatic 3D avatar creation system that can produce personalized 3D avatars within two minutes using a single depth camera and a motorized turntable. The created 3D avatar is able to make all the details of facial expressions and whole body motions including fingers. To our best knowledge, it is the first completely automatic system that can generate realistic 3D avatars in the common 3D file format, which is ready for the direct use in virtual reality applications or various services.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a rapid and fully automatic 3D avatar creation system that can produce personalized 3D avatars within two minutes using a single depth camera and a motorized turntable. The created 3D avatar is able to make all the details of facial expressions and whole body motions including fingers. To our best knowledge, it is the first completely automatic system that can generate realistic 3D avatars in the common 3D file format, which is ready for the direct use in virtual reality applications or various services.",
"fno": "08798353",
"keywords": [
"Avatars",
"Cameras",
"Computer Animation",
"Face Recognition",
"Image Motion Analysis",
"Solid Modelling",
"Single Depth Camera",
"Realistic 3 D Avatars",
"Automatic 3 D Avatar Creation System",
"Facial Expressions",
"3 D File Format",
"Virtual Reality Application",
"Solid Modeling",
"Avatars",
"Three Dimensional Displays",
"Deformable Models",
"Optimization",
"Face",
"Animation",
"Computing Methodologies",
"Artificial Intelligence",
"Computer Vision",
"Computer Vision Problems",
"Computing Methodologies",
"Computer Graphics",
"Shape Modeling",
"Mesh Models"
],
"authors": [
{
"affiliation": "Korea Institute of Science and Technology",
"fullName": "Hwasup Lim",
"givenName": "Hwasup",
"surname": "Lim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea Institute of Science and Technology",
"fullName": "Junseok Kang",
"givenName": "Junseok",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea Institute of Science and Technology",
"fullName": "Sang Chul Ahn",
"givenName": "Sang Chul",
"surname": "Ahn",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1329-1330",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798001",
"articleId": "1cJ0G0mmKti",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798049",
"articleId": "1cJ0UaezhG8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2015/9403/0/9403a325",
"title": "Instant Messenger with Personalized 3D Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a325/12OmNAkEU6d",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc-scalcom/2015/7211/0/07518396",
"title": "Creating Personal 3D Avatar from a Single Depth Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518396/12OmNzZWbKh",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom/2015/7211/0",
"title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08648222",
"title": "The Virtual Caliper: Rapid Creation of Metrically Accurate Avatars from 3D Measurements",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08648222/17QjJf0qqr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a429",
"title": "Real-time Expressive Avatar Animation Generation based on Monocular Videos",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a429/1J7Wj0kJrJm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798318",
"title": "Evaluating Teacher Avatar Appearances in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798318/1cJ0P8vBhhm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c100",
"title": "Landmark-Guided Deformation Transfer of Template Facial Expressions for Automatic Generation of Avatar Blendshapes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c100/1i5mNnnOzlu",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyr6w5YIIU",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyrdPZ8U92",
"doi": "10.1109/CVPR.2019.00249",
"title": "Textured Neural Avatars",
"normalizedTitle": "Textured Neural Avatars",
"abstract": "We present a system for learning full body neural avatars, i.e. deep networks that produce full body renderings of a person for varying body pose and varying camera pose. Our system takes the middle path between the classical graphics pipeline and the recent deep learning approaches that generate images of humans using image-to-image translation. In particular, our system estimates an explicit two-dimensional texture map of the model surface. At the same time, it abstains from explicit shape modeling in 3D. Instead, at test time, the system uses a fully-convolutional network to directly map the configuration of body feature points w.r.t. the camera to the 2D texture coordinates of individual pixels in the image frame. We show that such system is capable of learning to generate realistic renderings while being trained on videos annotated with 3D poses and foreground masks. We also demonstrate that maintaining an explicit texture representation helps our system to achieve better generalization compared to systems that use direct image-to-image translation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a system for learning full body neural avatars, i.e. deep networks that produce full body renderings of a person for varying body pose and varying camera pose. Our system takes the middle path between the classical graphics pipeline and the recent deep learning approaches that generate images of humans using image-to-image translation. In particular, our system estimates an explicit two-dimensional texture map of the model surface. At the same time, it abstains from explicit shape modeling in 3D. Instead, at test time, the system uses a fully-convolutional network to directly map the configuration of body feature points w.r.t. the camera to the 2D texture coordinates of individual pixels in the image frame. We show that such system is capable of learning to generate realistic renderings while being trained on videos annotated with 3D poses and foreground masks. We also demonstrate that maintaining an explicit texture representation helps our system to achieve better generalization compared to systems that use direct image-to-image translation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a system for learning full body neural avatars, i.e. deep networks that produce full body renderings of a person for varying body pose and varying camera pose. Our system takes the middle path between the classical graphics pipeline and the recent deep learning approaches that generate images of humans using image-to-image translation. In particular, our system estimates an explicit two-dimensional texture map of the model surface. At the same time, it abstains from explicit shape modeling in 3D. Instead, at test time, the system uses a fully-convolutional network to directly map the configuration of body feature points w.r.t. the camera to the 2D texture coordinates of individual pixels in the image frame. We show that such system is capable of learning to generate realistic renderings while being trained on videos annotated with 3D poses and foreground masks. We also demonstrate that maintaining an explicit texture representation helps our system to achieve better generalization compared to systems that use direct image-to-image translation.",
"fno": "329300c382",
"keywords": [
"Avatars",
"Convolutional Neural Nets",
"Feature Extraction",
"Image Classification",
"Image Representation",
"Image Segmentation",
"Image Texture",
"Learning Artificial Intelligence",
"Rendering Computer Graphics",
"Deep Learning Approach",
"Direct Image To Image Translation",
"Texture Representation",
"Image Frame",
"2 D Texture Coordinates",
"Body Feature Points",
"Fully Convolutional Network",
"Two Dimensional Texture Map",
"Classical Graphics Pipeline",
"Body Renderings",
"Deep Networks",
"Body Neural Avatars",
"Textured Neural Avatars",
"Solid Modeling",
"Three Dimensional Displays",
"Shape",
"Avatars",
"Pipelines",
"Rendering Computer Graphics",
"Cameras",
"Image And Video Synthesis",
"Deep Learning",
"Vision Graphics",
"Vision Applications And Systems"
],
"authors": [
{
"affiliation": "Samsung",
"fullName": "Aliaksandra Shysheya",
"givenName": "Aliaksandra",
"surname": "Shysheya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Skoltech",
"fullName": "Egor Zakharov",
"givenName": "Egor",
"surname": "Zakharov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Kara-Ali Aliev",
"givenName": "Kara-Ali",
"surname": "Aliev",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Renat Bashirov",
"givenName": "Renat",
"surname": "Bashirov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Skoltech",
"fullName": "Egor Burkov",
"givenName": "Egor",
"surname": "Burkov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Skoltech",
"fullName": "Karim Iskakov",
"givenName": "Karim",
"surname": "Iskakov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Aleksei Ivakhnenko",
"givenName": "Aleksei",
"surname": "Ivakhnenko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Yury Malkov",
"givenName": "Yury",
"surname": "Malkov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Igor Pasechnik",
"givenName": "Igor",
"surname": "Pasechnik",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Dmitry Ulyanov",
"givenName": "Dmitry",
"surname": "Ulyanov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung AI Research Center",
"fullName": "Alexander Vakhitov",
"givenName": "Alexander",
"surname": "Vakhitov",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung",
"fullName": "Victor Lempitsky",
"givenName": "Victor",
"surname": "Lempitsky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2382-2392",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3293-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "329300c372",
"articleId": "1gyrcAk9eIE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "329300c393",
"articleId": "1gyrIdPKpZS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892240",
"title": "Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892240/12OmNwGZNLp",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892372",
"title": "Demonstration: Rapid one-shot acquisition of dynamic VR avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892372/12OmNz2C1zq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08447549",
"title": "Real-Time Re-Textured Geometry Modeling Using Microsoft HoloLens",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08447549/13bd1gQYgE7",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a495",
"title": "3DBodyTex: Textured 3D Body Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a495/17D45VsBTTP",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a098",
"title": "Detailed Human Avatars from Monocular Video",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a098/17D45Vw15t7",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4508",
"title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600u0395",
"title": "gDNA: Towards Generative Detailed Neural Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600u0395/1H1kTUQK6Xe",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2023/4544/0/10042763",
"title": "CoNFies: Controllable Neural Face Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2023/10042763/1KOv2T2hLLG",
"parentPublication": {
"id": "proceedings/fg/2023/4544/0",
"title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h827",
"title": "Neural Point Cloud Rendering via Multi-Plane Projection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h827/1m3nmJqUK8o",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d721",
"title": "ANR: Articulated Neural Rendering for Virtual Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d721/1yeKjbSjlao",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC3Xhhw",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"acronym": "wi-iat",
"groupId": "1001411",
"volume": "2",
"displayVolume": "2",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAoDi5k",
"doi": "10.1109/WI-IAT.2011.170",
"title": "Influence of Personality Traits on the Rational Process of Cognitive Agents",
"normalizedTitle": "Influence of Personality Traits on the Rational Process of Cognitive Agents",
"abstract": "In this paper we present an approach based on the principle that psychological capacities, especially personality traits, influence the decision making process of rational agents. While using the FFM/NEO PI-R taxonomy, we propose a model for the expression of personality traits in terms of so-called influence operators that add meta control rules to the cycle of rational BDI agents.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present an approach based on the principle that psychological capacities, especially personality traits, influence the decision making process of rational agents. While using the FFM/NEO PI-R taxonomy, we propose a model for the expression of personality traits in terms of so-called influence operators that add meta control rules to the cycle of rational BDI agents.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present an approach based on the principle that psychological capacities, especially personality traits, influence the decision making process of rational agents. While using the FFM/NEO PI-R taxonomy, we propose a model for the expression of personality traits in terms of so-called influence operators that add meta control rules to the cycle of rational BDI agents.",
"fno": "4513b081",
"keywords": [
"Agent Cognitive Modeling",
"Computational Personality",
"Personality Traits",
"Rational And Psychological Agents"
],
"authors": [
{
"affiliation": null,
"fullName": "François Bouchet",
"givenName": "François",
"surname": "Bouchet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jean-Paul Sansonnet",
"givenName": "Jean-Paul",
"surname": "Sansonnet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wi-iat",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-08-01T00:00:00",
"pubType": "proceedings",
"pages": "81-88",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4513-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4513b072",
"articleId": "12OmNvAiSbc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4513b089",
"articleId": "12OmNyrIazp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icee/2010/3997/0/3997a999",
"title": "Research on Senior Manager's Personality Traits and Corporate Lifecycles in Different Stages",
"doi": null,
"abstractUrl": "/proceedings-article/icee/2010/3997a999/12OmNC943xC",
"parentPublication": {
"id": "proceedings/icee/2010/3997/0",
"title": "International Conference on E-Business and E-Government",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icss/2010/4017/0/4017a148",
"title": "How do Consumer Personality Traits Affect their Perceptions and Evaluations of Service Quality?",
"doi": null,
"abstractUrl": "/proceedings-article/icss/2010/4017a148/12OmNqFJhCE",
"parentPublication": {
"id": "proceedings/icss/2010/4017/0",
"title": "Service Sciences, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892e551",
"title": "Management of Technical Security Measures: An Empirical Examination of Personality Traits and Behavioral Intentions",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892e551/12OmNqFrGHq",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2008/3496/3/3496c477",
"title": "Effects of Autonomy, Traffic Conditions and Driver Personality Traits on Attitudes and Trust towards In-Vehicle Agents",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2008/3496c477/12OmNrYlmSI",
"parentPublication": {
"id": "proceedings/wi-iat/2008/3496/3",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcss/2011/4421/0/4421a145",
"title": "Fairness is in the Eye of the Beholder: How Personality Traits Affect Perceived Fairness and Satisfaction in Hotel Service",
"doi": null,
"abstractUrl": "/proceedings-article/ijcss/2011/4421a145/12OmNvrvj9g",
"parentPublication": {
"id": "proceedings/ijcss/2011/4421/0",
"title": "Service Sciences, International Joint Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2011/4346/0/4346a088",
"title": "A Trust-Personality Mechanism for Emotion Compensation",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2011/4346a088/12OmNxG1yCo",
"parentPublication": {
"id": "proceedings/icalt/2011/4346/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccit/2008/3407/1/3407a220",
"title": "Graphology and Cattell's 16PF Traits Matrix (HoloCatT Matrix)",
"doi": null,
"abstractUrl": "/proceedings-article/iccit/2008/3407a220/12OmNyGbIeU",
"parentPublication": {
"id": "iccit/2008/3407/1",
"title": "Convergence Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2009/3823/4/3823e607",
"title": "Personality in Social Group Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2009/3823e607/12OmNyRg4hQ",
"parentPublication": {
"id": "proceedings/cse/2009/3823/2",
"title": "2009 International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2011/4375/0/4375a629",
"title": "Effects of Personality Traits on Usage of Social Networking Service",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2011/4375a629/12OmNz61d8N",
"parentPublication": {
"id": "proceedings/asonam/2011/4375/0",
"title": "2011 International Conference on Advances in Social Networks Analysis and Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2012/03/tta2012030273",
"title": "Automatic Personality Perception: Prediction of Trait Attribution Based on Prosodic Features",
"doi": null,
"abstractUrl": "/journal/ta/2012/03/tta2012030273/13rRUxBJhEa",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrMHOd6",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvStcNn",
"doi": "10.1109/HICSS.2016.697",
"title": "Decisions and Disasters: Modeling Decisions that Contribute to Mishaps",
"normalizedTitle": "Decisions and Disasters: Modeling Decisions that Contribute to Mishaps",
"abstract": "Ever since the decision to launch the Challenger -- and the deadly explosion that followed -- it has been widely known that \"decision failure\" can lead to disaster. But despite this awareness and the availability of a wide variety of decision models, we found no single model that adequately describes all the ways that decisions can fail and how flawed decisions contribute to mishaps. In this paper, we present our model of decision failure. Then we show how we used this model to gain insight into that decisions that have contributed to NASA mishaps (including the Challenger). This work presents both the model and the insights from its application. The theoretical contribution is a new way to encode and analyze the decision data found in mishap reports, providing insight into the causes of decision failure. The practical contribution is the potential for using this to improve decision-making at NASA and other high-reliability organizations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Ever since the decision to launch the Challenger -- and the deadly explosion that followed -- it has been widely known that \"decision failure\" can lead to disaster. But despite this awareness and the availability of a wide variety of decision models, we found no single model that adequately describes all the ways that decisions can fail and how flawed decisions contribute to mishaps. In this paper, we present our model of decision failure. Then we show how we used this model to gain insight into that decisions that have contributed to NASA mishaps (including the Challenger). This work presents both the model and the insights from its application. The theoretical contribution is a new way to encode and analyze the decision data found in mishap reports, providing insight into the causes of decision failure. The practical contribution is the potential for using this to improve decision-making at NASA and other high-reliability organizations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Ever since the decision to launch the Challenger -- and the deadly explosion that followed -- it has been widely known that \"decision failure\" can lead to disaster. But despite this awareness and the availability of a wide variety of decision models, we found no single model that adequately describes all the ways that decisions can fail and how flawed decisions contribute to mishaps. In this paper, we present our model of decision failure. Then we show how we used this model to gain insight into that decisions that have contributed to NASA mishaps (including the Challenger). This work presents both the model and the insights from its application. The theoretical contribution is a new way to encode and analyze the decision data found in mishap reports, providing insight into the causes of decision failure. The practical contribution is the potential for using this to improve decision-making at NASA and other high-reliability organizations.",
"fno": "5670f635",
"keywords": [
"NASA",
"Decision Making",
"Biological System Modeling",
"Accidents",
"Uncertainty",
"Modeling",
"Mishaps",
"Decisions",
"Decision Theory",
"Accidents"
],
"authors": [
{
"affiliation": null,
"fullName": "Joel Wilf",
"givenName": "Joel",
"surname": "Wilf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dan Port",
"givenName": "Dan",
"surname": "Port",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2016-01-01T00:00:00",
"pubType": "proceedings",
"pages": "5635-5641",
"year": "2016",
"issn": "1530-1605",
"isbn": "978-0-7695-5670-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5670f628",
"articleId": "12OmNAObbC4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5670f642",
"articleId": "12OmNAOKnR9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/march/2016/2573/0/2573a030",
"title": "DecDoc: A Tool for Documenting Design Decisions Collaboratively and Incrementally",
"doi": null,
"abstractUrl": "/proceedings-article/march/2016/2573a030/12OmNA1Vntr",
"parentPublication": {
"id": "proceedings/march/2016/2573/0",
"title": "2016 1st International Workshop on decision Making in Software ARCHitecture (MARCH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670f642",
"title": "Developing a Value-Based Methodology for Satisfying NASA Software Assurance Requirements",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670f642/12OmNAOKnR9",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a332",
"title": "Dynamic Model Based on the Theory of Purchasing Decisions of the Full Life Cycle Costs",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a332/12OmNvjQ8UT",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2007/2845/0/04148716",
"title": "Facilitating Group Decisions through Multicriteria Analysis and Agent Based Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2007/04148716/12OmNyoiZeN",
"parentPublication": {
"id": "proceedings/ams/2007/2845/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/edoc/2013/5081/0/5081a139",
"title": "Relating Decisions in Enterprise Architecture Using Decision Design Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/edoc/2013/5081a139/12OmNzYNN9J",
"parentPublication": {
"id": "proceedings/edoc/2013/5081/0",
"title": "2013 17th IEEE International Enterprise Distributed Object Computing Conference (EDOC 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2016/3845/0/07582835",
"title": "Developer targeted analytics: Supporting software development decisions with runtime information",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2016/07582835/12OmNzuZUsw",
"parentPublication": {
"id": "proceedings/ase/2016/3845/0",
"title": "2016 31st IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08651471",
"title": "You or Me? Personality Traits Predict Sacrificial Decisions in an Accident Situation",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08651471/17WX58zhpC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bracis/2019/4253/0/425300a467",
"title": "Argumentation-Based Agents that Explain Their Decisions",
"doi": null,
"abstractUrl": "/proceedings-article/bracis/2019/425300a467/1fHkH71PDFu",
"parentPublication": {
"id": "proceedings/bracis/2019/4253/0",
"title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/06/09585145",
"title": "Decision-Making Principles for Better Software Design Decisions",
"doi": null,
"abstractUrl": "/magazine/so/2021/06/09585145/1xZlyVOzQWY",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a014",
"title": "Making and Trusting Decisions in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a014/1yQB6h3HL6o",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwwd2Jt",
"doi": "10.1109/VR.2016.7504690",
"title": "Personality differences predict decision-making in an accident situation in virtual driving",
"normalizedTitle": "Personality differences predict decision-making in an accident situation in virtual driving",
"abstract": "Understanding how humans make decisions in challenging situations — such as trying to save peoples' lives even though this endangers one's own life — is crucial in optimizing rescue operations and dealing with natural disasters and other crises. The experimental study of these decisions, however, has often been done using text-based surveys, which is known to emphasize rational and reflective judgments. Here, we used virtual reality to investigate decision-making in a real-world context, in which a decision needs to be made intuitively under time pressure — for this we simulated an accident situation in an immersive virtual driving scenario. In the scenario, participants were told to race a course as fast as possible. After training, participants were confronted with the sudden appearance of pedestrians on the race course. We observed three different behaviors: group one ignored the pedestrians and/or hit the accelerator, group two hit the brake, and group three tried to steer the car to avoid pedestrians. We found that most Avoid-group participants had more real and virtual driving experience compared to the other two groups and they also felt more competent during the game as measured by subjective game experience questionnaires. Importantly, results from established personality questionnaires showed that participants who did not brake (therefore potentially harming the pedestrians) had significantly lower scores in perspective-taking and higher scores in psychopathy compared to participants who tried to avoid the accident situation. Our results demonstrate that personality differences to some degree are able to predict intuitive decision-making and that such processes can be studied in a controlled, immersive VR simulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Understanding how humans make decisions in challenging situations — such as trying to save peoples' lives even though this endangers one's own life — is crucial in optimizing rescue operations and dealing with natural disasters and other crises. The experimental study of these decisions, however, has often been done using text-based surveys, which is known to emphasize rational and reflective judgments. Here, we used virtual reality to investigate decision-making in a real-world context, in which a decision needs to be made intuitively under time pressure — for this we simulated an accident situation in an immersive virtual driving scenario. In the scenario, participants were told to race a course as fast as possible. After training, participants were confronted with the sudden appearance of pedestrians on the race course. We observed three different behaviors: group one ignored the pedestrians and/or hit the accelerator, group two hit the brake, and group three tried to steer the car to avoid pedestrians. We found that most Avoid-group participants had more real and virtual driving experience compared to the other two groups and they also felt more competent during the game as measured by subjective game experience questionnaires. Importantly, results from established personality questionnaires showed that participants who did not brake (therefore potentially harming the pedestrians) had significantly lower scores in perspective-taking and higher scores in psychopathy compared to participants who tried to avoid the accident situation. Our results demonstrate that personality differences to some degree are able to predict intuitive decision-making and that such processes can be studied in a controlled, immersive VR simulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Understanding how humans make decisions in challenging situations — such as trying to save peoples' lives even though this endangers one's own life — is crucial in optimizing rescue operations and dealing with natural disasters and other crises. The experimental study of these decisions, however, has often been done using text-based surveys, which is known to emphasize rational and reflective judgments. Here, we used virtual reality to investigate decision-making in a real-world context, in which a decision needs to be made intuitively under time pressure — for this we simulated an accident situation in an immersive virtual driving scenario. In the scenario, participants were told to race a course as fast as possible. After training, participants were confronted with the sudden appearance of pedestrians on the race course. We observed three different behaviors: group one ignored the pedestrians and/or hit the accelerator, group two hit the brake, and group three tried to steer the car to avoid pedestrians. We found that most Avoid-group participants had more real and virtual driving experience compared to the other two groups and they also felt more competent during the game as measured by subjective game experience questionnaires. Importantly, results from established personality questionnaires showed that participants who did not brake (therefore potentially harming the pedestrians) had significantly lower scores in perspective-taking and higher scores in psychopathy compared to participants who tried to avoid the accident situation. Our results demonstrate that personality differences to some degree are able to predict intuitive decision-making and that such processes can be studied in a controlled, immersive VR simulation.",
"fno": "07504690",
"keywords": [
"Brakes",
"Games",
"Decision Making",
"Training",
"Accidents",
"Solid Modeling",
"Virtual Reality",
"Personality Differences",
"Virtual Reality",
"Decision Making",
"Driving Simulation",
"Moral Judgments"
],
"authors": [
{
"affiliation": "Department of Brain and Cognitive Engineering, Korea University, Seoul, Korea",
"fullName": "Uijong Ju",
"givenName": "Uijong",
"surname": "Ju",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Biomedical Science, Korea University, Seoul, Korea",
"fullName": "June Kang",
"givenName": "June",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Brain and Cognitive Engineering, Korea University, Seoul, Korea",
"fullName": "Christian Wallraven",
"givenName": "Christian",
"surname": "Wallraven",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "77-82",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504689",
"articleId": "12OmNviHKla",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504691",
"articleId": "12OmNBQC89A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2015/7367/0/7367a610",
"title": "Individual Differences that Predict Interactions in Mixed-Initiative Teams",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2015/7367a610/12OmNBU1jQX",
"parentPublication": {
"id": "proceedings/hicss/2015/7367/0",
"title": "2015 48th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2010/3869/0/03-07-02",
"title": "Using Personality Factors to Predict Interface Learning Performance",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2010/03-07-02/12OmNvD8RDj",
"parentPublication": {
"id": "proceedings/hicss/2010/3869/0",
"title": "2010 43rd Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a288",
"title": "Knowledge-Driven Approach to Predict Personality Traits by Leveraging Social Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a288/12OmNvTjZRz",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460690",
"title": "Traffic accident risk analysis based on relation of Common Route Models",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460690/12OmNxecRVc",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08651471",
"title": "You or Me? Personality Traits Predict Sacrificial Decisions in an Accident Situation",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08651471/17WX58zhpC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictech/2022/9694/0/969400a133",
"title": "Research on Simulation Test Method of Advanced Emergency Braking System of Commercial Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/ictech/2022/969400a133/1FWmofQbpeM",
"parentPublication": {
"id": "proceedings/ictech/2022/9694/0",
"title": "2022 11th International Conference of Information and Communication Technology (ICTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a019",
"title": "Personality Traits Estimation of Participants Based on Multimodal Information in Knowledge-Transfer-type Discussion",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a019/1GU73HQwDy8",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2019/9151/0/08730779",
"title": "Analysis of Personality Dependent Differences in Pupillary Response and its Relation to Stress Recovery Ability",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2019/08730779/1aDSKxbLAwo",
"parentPublication": {
"id": "proceedings/percom-workshops/2019/9151/0",
"title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2019/6934/0/08909523",
"title": "Personality Recognition in Conversations using Capsule Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2019/08909523/1febm2L5qgM",
"parentPublication": {
"id": "proceedings/wi/2019/6934/0",
"title": "2019 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090622",
"title": "Assessing Personality Traits of Team Athletes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090622/1jIxq3qAbba",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqG0SWe",
"title": "2013 International Symposium on Computational and Business Intelligence (ISCBI)",
"acronym": "iscbi",
"groupId": "1803001",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxXCGEa",
"doi": "10.1109/ISCBI.2013.44",
"title": "Personality Traits Identification Using Rough Sets Based Machine Learning",
"normalizedTitle": "Personality Traits Identification Using Rough Sets Based Machine Learning",
"abstract": "Prediction of human behavior from his/her traits has long been sought by cognitive scientists. Human traits are often embedded in one's writings. Although some work has been done on identification of traits from essays, very little work can be found on extracting personality traits from written texts. Psychological studies suggest that extraction and prediction of rules from a data has been long pursued, and several methods have been proposed. In the present work we used Rough sets to extract the rules for prediction of personality traits. Rough Set is a comparatively recent method that has been effective in various fields such as medical, geological and other fields where intelligent decision making is required. Our experiments with rough sets in predicting personality traits produced encouraging results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Prediction of human behavior from his/her traits has long been sought by cognitive scientists. Human traits are often embedded in one's writings. Although some work has been done on identification of traits from essays, very little work can be found on extracting personality traits from written texts. Psychological studies suggest that extraction and prediction of rules from a data has been long pursued, and several methods have been proposed. In the present work we used Rough sets to extract the rules for prediction of personality traits. Rough Set is a comparatively recent method that has been effective in various fields such as medical, geological and other fields where intelligent decision making is required. Our experiments with rough sets in predicting personality traits produced encouraging results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Prediction of human behavior from his/her traits has long been sought by cognitive scientists. Human traits are often embedded in one's writings. Although some work has been done on identification of traits from essays, very little work can be found on extracting personality traits from written texts. Psychological studies suggest that extraction and prediction of rules from a data has been long pursued, and several methods have been proposed. In the present work we used Rough sets to extract the rules for prediction of personality traits. Rough Set is a comparatively recent method that has been effective in various fields such as medical, geological and other fields where intelligent decision making is required. Our experiments with rough sets in predicting personality traits produced encouraging results.",
"fno": "5066a182",
"keywords": [
"Machine Learning",
"Personality Recongnition",
"Big Five Model Of Personality",
"Rough Sets"
],
"authors": [
{
"affiliation": null,
"fullName": "Umang Gupta",
"givenName": "Umang",
"surname": "Gupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Niladri Chatterjee",
"givenName": "Niladri",
"surname": "Chatterjee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iscbi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-08-01T00:00:00",
"pubType": "proceedings",
"pages": "182-185",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5066-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5066a178",
"articleId": "12OmNyPQ4FV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5066a186",
"articleId": "12OmNCvLXYo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/agile/2006/2562/0/25620089",
"title": "Critical Personality Traits in Successful Pair Programming",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2006/25620089/12OmNBiygtm",
"parentPublication": {
"id": "proceedings/agile/2006/2562/0",
"title": "AGILE 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a288",
"title": "Knowledge-Driven Approach to Predict Personality Traits by Leveraging Social Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a288/12OmNvTjZRz",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seaa/2018/7383/0/738300a214",
"title": "Linking Personality Traits and Interpersonal Skills to Gamification Awards",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2018/738300a214/17D45WaTkgv",
"parentPublication": {
"id": "proceedings/seaa/2018/7383/0",
"title": "2018 44th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/07/08747565",
"title": "Predicting Personality Traits From Physical Activity Intensity",
"doi": null,
"abstractUrl": "/magazine/co/2019/07/08747565/1bcFjqwvpLO",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a047",
"title": "Personality Traits Impacts in Virtual Reality’s User Experience",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a047/1cJ7ycHKjcc",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/11/08935389",
"title": "Effects of Personality Traits on Pull Request Acceptance",
"doi": null,
"abstractUrl": "/journal/ts/2021/11/08935389/1fPUhns9Zlu",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2019/0990/0/08909839",
"title": "Personality Traits Classification on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2019/08909839/1febNVOlteg",
"parentPublication": {
"id": "proceedings/avss/2019/0990/0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047374",
"title": "Facial-Based Personality Prediction Models for Estimating Individuals Private Traits",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047374/1iC6BNzuL3G",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scc/2020/8789/0/878900a054",
"title": "Personality Traits Prediction Based on Users’ Digital Footprints in Social Networks via Attention RNN",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2020/878900a054/1pttWLfhuRG",
"parentPublication": {
"id": "proceedings/scc/2020/8789/0",
"title": "2020 IEEE International Conference on Services Computing (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09417627",
"title": "Incorporating Forthcoming Events and Personality Traits in Social Media Based Stress Prediction",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09417627/1taAGhGKixG",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoif",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"acronym": "icse-companion",
"groupId": "1002125",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1sv5NyW",
"doi": null,
"title": "The Relationship between Personality and Value-Based Decision-Making",
"normalizedTitle": "The Relationship between Personality and Value-Based Decision-Making",
"abstract": "Within the context of software engineering, many decisions take place and such decisions should employ value propositions that focus on short as well as long-term goals. In 2003, Boehm coined the term Value-Based Software Engineering (VBSE), which entails the change from a value-neutral to a value-based approach. VBSE argues that decisions should be based on all key stakeholders' value propositions, and to balance both reach the short as well as long-term goal(s) the ones. This paper details a PhD research plan that aims to study the relationship between personality and decision-making within the context of VBSE. The research focuses on group decision-making, considering three aspects: the interaction among decision-makers, their perception of the decision value and their personality traits. The research methodology will be experiment which will revolve around a hypothetical software development project and some decisions that need to be made, for example, the features to be included and the priority of each one. The contribution from a theoretical perspective is to understand the relationship among three main aspects: personality traits, decision-making process, and decision value. From practitioners' perspective, the contribution is to provide help on improving software project decision-making.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Within the context of software engineering, many decisions take place and such decisions should employ value propositions that focus on short as well as long-term goals. In 2003, Boehm coined the term Value-Based Software Engineering (VBSE), which entails the change from a value-neutral to a value-based approach. VBSE argues that decisions should be based on all key stakeholders' value propositions, and to balance both reach the short as well as long-term goal(s) the ones. This paper details a PhD research plan that aims to study the relationship between personality and decision-making within the context of VBSE. The research focuses on group decision-making, considering three aspects: the interaction among decision-makers, their perception of the decision value and their personality traits. The research methodology will be experiment which will revolve around a hypothetical software development project and some decisions that need to be made, for example, the features to be included and the priority of each one. The contribution from a theoretical perspective is to understand the relationship among three main aspects: personality traits, decision-making process, and decision value. From practitioners' perspective, the contribution is to provide help on improving software project decision-making.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Within the context of software engineering, many decisions take place and such decisions should employ value propositions that focus on short as well as long-term goals. In 2003, Boehm coined the term Value-Based Software Engineering (VBSE), which entails the change from a value-neutral to a value-based approach. VBSE argues that decisions should be based on all key stakeholders' value propositions, and to balance both reach the short as well as long-term goal(s) the ones. This paper details a PhD research plan that aims to study the relationship between personality and decision-making within the context of VBSE. The research focuses on group decision-making, considering three aspects: the interaction among decision-makers, their perception of the decision value and their personality traits. The research methodology will be experiment which will revolve around a hypothetical software development project and some decisions that need to be made, for example, the features to be included and the priority of each one. The contribution from a theoretical perspective is to understand the relationship among three main aspects: personality traits, decision-making process, and decision value. From practitioners' perspective, the contribution is to provide help on improving software project decision-making.",
"fno": "566301a460",
"keywords": [
"Decision Making",
"Software Engineering",
"Value Engineering",
"VBSE",
"Ph D Research Plan",
"Group Decision Making",
"Hypothetical Software Development Project",
"Software Project Decision Making",
"Value Neutral",
"Value Based Decision Making",
"Value Based Software Engineering",
"Decision Making",
"Software Engineering",
"Software",
"Companies",
"Stakeholders",
"Bibliographies",
"Systematics",
"Value Based Software Engineering",
"Personality",
"Decision Making"
],
"authors": [
{
"affiliation": null,
"fullName": "Fabiana Freitas Mendes",
"givenName": "Fabiana Freitas",
"surname": "Mendes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse-companion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "460-461",
"year": "2018",
"issn": "2574-1934",
"isbn": "978-1-4503-5663-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "566301a458",
"articleId": "13bd1eNNYnz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "566301a462",
"articleId": "13bd1tl2olV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsa/2018/6398/0/639801a085",
"title": "An Expert Recommendation System for Design Decision Making: Who Should be Involved in Making a Design Decision?",
"doi": null,
"abstractUrl": "/proceedings-article/icsa/2018/639801a085/12OmNqHItJr",
"parentPublication": {
"id": "proceedings/icsa/2018/6398/0",
"title": "2018 IEEE International Conference on Software Architecture (ICSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mtd/2015/7378/0/07332627",
"title": "Decision-making framework for refactoring",
"doi": null,
"abstractUrl": "/proceedings-article/mtd/2015/07332627/12OmNqIzgYS",
"parentPublication": {
"id": "proceedings/mtd/2015/7378/0",
"title": "2015 IEEE 7th International Workshop on Managing Technical Debt (MTD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2017/3681/0/3681a279",
"title": "Value-Based Decision-Making Using a Web-Based Tool: A Multiple Case Study",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2017/3681a279/12OmNvqmUIm",
"parentPublication": {
"id": "proceedings/apsec/2017/3681/0",
"title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504690",
"title": "Personality differences predict decision-making in an accident situation in virtual driving",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504690/12OmNwwd2Jt",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsa/2017/5729/0/5729a107",
"title": "Human Aspects in Software Architecture Decision Making: A Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/icsa/2017/5729a107/12OmNx1qV0K",
"parentPublication": {
"id": "proceedings/icsa/2017/5729/0",
"title": "2017 IEEE International Conference on Software Architecture (ICSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2020/12/08509148",
"title": "Key Stakeholders’ Value Propositions for Feature Selection in Software-Intensive Products: An Industrial Case Study",
"doi": null,
"abstractUrl": "/journal/ts/2020/12/08509148/14Fq0WXjmAs",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/techdebt/2018/5713/0/571301a085",
"title": "Trade-Off Decisions across Time in Technical Debt Management: A Systematic Literature Review",
"doi": null,
"abstractUrl": "/proceedings-article/techdebt/2018/571301a085/17D45VTRovf",
"parentPublication": {
"id": "proceedings/techdebt/2018/5713/0",
"title": "2018 IEEE/ACM International Conference on Technical Debt (TechDebt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2019/4648/0/464800a323",
"title": "Decisions and Their Making in OSS Development: An Exploratory Study Using the Hibernate Developer Mailing List",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2019/464800a323/1gjRTkkDu36",
"parentPublication": {
"id": "proceedings/apsec/2019/4648/0",
"title": "2019 26th Asia-Pacific Software Engineering Conference (APSEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2022/02/09088281",
"title": "A Theory of Value for Value-Based Feature Selection in Software Engineering",
"doi": null,
"abstractUrl": "/journal/ts/2022/02/09088281/1jBRuMi77sQ",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/06/09585145",
"title": "Decision-Making Principles for Better Software Design Decisions",
"doi": null,
"abstractUrl": "/magazine/so/2021/06/09585145/1xZlyVOzQWY",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1bzYnKROnN6",
"title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1bzYs0vbPUc",
"doi": "10.1109/FG.2019.8756531",
"title": "Analyzing group performance in small group interaction: Linking personality traits and group performance through the verbal content",
"normalizedTitle": "Analyzing group performance in small group interaction: Linking personality traits and group performance through the verbal content",
"abstract": "In this paper, we investigate the link between the personality traits and group performance in terms of the verbal content. We further study the variability in the verbal interaction between different performance groups. Towards this goal, we extract topics representing the content of meetings as well as term-frequencies of items that play a critical role in the decision task. We use a dataset where each group performs the winter survival task, in which the task is to decide on the ranking of different items with respect to the importance of each item for their survival. In the experiments, we contrast the ranking of items with respect to their term frequencies and compare the differences between topics both for distinct personality traits and group performances. Results of the term-frequency based approach show that influential people put correct emphasis on items more than dominant people. The topic-based method reveals that influential people consider the majority of items by providing usage instructions for alternative scenarios and that dominant people focus only on a small subset of items by stressing their significance. High-performance groups assess items in a similar manner to influential and dominant people, i.e. a wide range of items are considered and their importance is explained. Low-performance groups, on the other hand, concentrate on the situation they are in rather than the items and their usages.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we investigate the link between the personality traits and group performance in terms of the verbal content. We further study the variability in the verbal interaction between different performance groups. Towards this goal, we extract topics representing the content of meetings as well as term-frequencies of items that play a critical role in the decision task. We use a dataset where each group performs the winter survival task, in which the task is to decide on the ranking of different items with respect to the importance of each item for their survival. In the experiments, we contrast the ranking of items with respect to their term frequencies and compare the differences between topics both for distinct personality traits and group performances. Results of the term-frequency based approach show that influential people put correct emphasis on items more than dominant people. The topic-based method reveals that influential people consider the majority of items by providing usage instructions for alternative scenarios and that dominant people focus only on a small subset of items by stressing their significance. High-performance groups assess items in a similar manner to influential and dominant people, i.e. a wide range of items are considered and their importance is explained. Low-performance groups, on the other hand, concentrate on the situation they are in rather than the items and their usages.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we investigate the link between the personality traits and group performance in terms of the verbal content. We further study the variability in the verbal interaction between different performance groups. Towards this goal, we extract topics representing the content of meetings as well as term-frequencies of items that play a critical role in the decision task. We use a dataset where each group performs the winter survival task, in which the task is to decide on the ranking of different items with respect to the importance of each item for their survival. In the experiments, we contrast the ranking of items with respect to their term frequencies and compare the differences between topics both for distinct personality traits and group performances. Results of the term-frequency based approach show that influential people put correct emphasis on items more than dominant people. The topic-based method reveals that influential people consider the majority of items by providing usage instructions for alternative scenarios and that dominant people focus only on a small subset of items by stressing their significance. High-performance groups assess items in a similar manner to influential and dominant people, i.e. a wide range of items are considered and their importance is explained. Low-performance groups, on the other hand, concentrate on the situation they are in rather than the items and their usages.",
"fno": "08756531",
"keywords": [
"Feature Extraction",
"Psychology",
"Social Sciences Computing",
"Speech Processing",
"Group Interaction",
"Verbal Content",
"Verbal Interaction",
"Term Frequencies",
"Personality Traits",
"Group Performance",
"Social Psychology",
"Speech Features",
"Task Analysis",
"Computational Modeling",
"Organizations",
"Linguistics",
"Analytical Models",
"Accidents",
"Audio Recording"
],
"authors": [
{
"affiliation": "Faculty of Engineering, Yasar University, Izmir, Turkey",
"fullName": "Umut Avci",
"givenName": "Umut",
"surname": "Avci",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Computer Studies, De La Salle University, Manila, Philippines",
"fullName": "Oya Aran",
"givenName": "Oya",
"surname": "Aran",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0089-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08756565",
"articleId": "1bzYvBv5HuE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08756577",
"articleId": "1bzYqLuvcWY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a386",
"title": "Verbal Behavior in Digital Board Game: The Association between Personality Traits and Lying Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a386/12OmNButq6V",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seaa/2018/7383/0/738300a214",
"title": "Linking Personality Traits and Interpersonal Skills to Gamification Awards",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2018/738300a214/17D45WaTkgv",
"parentPublication": {
"id": "proceedings/seaa/2018/7383/0",
"title": "2018 44th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icime/2018/7616/0/761600a077",
"title": "Impact of Weibo User's Personality Traits on Loyalty",
"doi": null,
"abstractUrl": "/proceedings-article/icime/2018/761600a077/17D45Wc1IM7",
"parentPublication": {
"id": "proceedings/icime/2018/7616/0",
"title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08651471",
"title": "You or Me? Personality Traits Predict Sacrificial Decisions in an Accident Situation",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08651471/17WX58zhpC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icore/2021/0210/0/021000a163",
"title": "Dr. Speaks: A Mobile and Web Application Framework for Filipinos with Verbal Apraxia",
"doi": null,
"abstractUrl": "/proceedings-article/icore/2021/021000a163/1AqyrhOSYFi",
"parentPublication": {
"id": "proceedings/icore/2021/0210/0",
"title": "2021 1st International Conference in Information and Computing Research (iCORE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2022/5661/0/10068566",
"title": "Discovering Affinity Relationships between Personality Types",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2022/10068566/1LKwZl6tTJC",
"parentPublication": {
"id": "proceedings/asonam/2022/5661/0",
"title": "2022 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/11/08935389",
"title": "Effects of Personality Traits on Pull Request Acceptance",
"doi": null,
"abstractUrl": "/journal/ts/2021/11/08935389/1fPUhns9Zlu",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2019/0990/0/08909839",
"title": "Personality Traits Classification on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2019/08909839/1febNVOlteg",
"parentPublication": {
"id": "proceedings/avss/2019/0990/0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000b332",
"title": "Extraction and Use of Personality Traits from Written Commentary",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000b332/1gjRyA33Xz2",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093642",
"title": "Multiparty Visual Co-Occurrences for Estimating Personality Traits in Group Meetings",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093642/1jPbnmUJEw8",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cJ7xyunODu",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ7ycHKjcc",
"doi": "10.1109/SVR.2018.00019",
"title": "Personality Traits Impacts in Virtual Reality’s User Experience",
"normalizedTitle": "Personality Traits Impacts in Virtual Reality’s User Experience",
"abstract": "Psychological aspects of human beings, such as emotions and personality traits, have been investigated in order to improve computer applications and provide adequate responses to each individual. In Virtual Reality environments, specially when reproducing real situations, user experience can be impacted by the way his/her personality traits relates to the environment characteristics. By other hand, these environments are useful to simulate situations that may promote changes in user's behavior. Although there is a considerable number of researches that establish relationships between personality traits and virtual reality environments, no compilation of them was presented until now. This article aims to fill this gap, presenting a systematic literature review about this theme. A total of 387 articles were retrieved, from which 20 were included in the review, after application of inclusion and exclusion criteria. The main results indicate that user personality traits can influence their user experience with RV environments in various directions, including the perception of immersion and decision making. Additionally, VR environments could be used to help treat people with social disorders.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Psychological aspects of human beings, such as emotions and personality traits, have been investigated in order to improve computer applications and provide adequate responses to each individual. In Virtual Reality environments, specially when reproducing real situations, user experience can be impacted by the way his/her personality traits relates to the environment characteristics. By other hand, these environments are useful to simulate situations that may promote changes in user's behavior. Although there is a considerable number of researches that establish relationships between personality traits and virtual reality environments, no compilation of them was presented until now. This article aims to fill this gap, presenting a systematic literature review about this theme. A total of 387 articles were retrieved, from which 20 were included in the review, after application of inclusion and exclusion criteria. The main results indicate that user personality traits can influence their user experience with RV environments in various directions, including the perception of immersion and decision making. Additionally, VR environments could be used to help treat people with social disorders.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Psychological aspects of human beings, such as emotions and personality traits, have been investigated in order to improve computer applications and provide adequate responses to each individual. In Virtual Reality environments, specially when reproducing real situations, user experience can be impacted by the way his/her personality traits relates to the environment characteristics. By other hand, these environments are useful to simulate situations that may promote changes in user's behavior. Although there is a considerable number of researches that establish relationships between personality traits and virtual reality environments, no compilation of them was presented until now. This article aims to fill this gap, presenting a systematic literature review about this theme. A total of 387 articles were retrieved, from which 20 were included in the review, after application of inclusion and exclusion criteria. The main results indicate that user personality traits can influence their user experience with RV environments in various directions, including the perception of immersion and decision making. Additionally, VR environments could be used to help treat people with social disorders.",
"fno": "060400a047",
"keywords": [
"Human Factors",
"Psychology",
"User Experience",
"Virtual Reality",
"User Experience",
"Virtual Reality Environments",
"User Personality Traits",
"RV Environments",
"VR Environments",
"Psychological Aspects",
"People With Social Disorder Treatment",
"Virtual Reality",
"Personality Computing",
"Affective Computing"
],
"authors": [
{
"affiliation": "Escola Politécnica Universidade de São Paulo",
"fullName": "Renan Vinicius Aranha",
"givenName": "Renan Vinicius",
"surname": "Aranha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Escola Politécnica Universidade de São Paulo",
"fullName": "Ricardo Nakamura",
"givenName": "Ricardo",
"surname": "Nakamura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Escola Politécnica Universidade de São Paulo",
"fullName": "Romero Tori",
"givenName": "Romero",
"surname": "Tori",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Escola Politécnica Universidade de São Paulo",
"fullName": "Fátima L.S. Nunes",
"givenName": "Fátima L.S.",
"surname": "Nunes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "47-56",
"year": "2018",
"issn": null,
"isbn": "978-1-7281-0604-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "060400a027",
"articleId": "1cJ7zf7Q2s0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "060400a082",
"articleId": "1cJ7yFMMlq0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/agile/2006/2562/0/25620089",
"title": "Critical Personality Traits in Successful Pair Programming",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2006/25620089/12OmNBiygtm",
"parentPublication": {
"id": "proceedings/agile/2006/2562/0",
"title": "AGILE 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a288",
"title": "Knowledge-Driven Approach to Predict Personality Traits by Leveraging Social Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a288/12OmNvTjZRz",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscbi/2013/5066/0/5066a182",
"title": "Personality Traits Identification Using Rough Sets Based Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iscbi/2013/5066a182/12OmNxXCGEa",
"parentPublication": {
"id": "proceedings/iscbi/2013/5066/0",
"title": "2013 International Symposium on Computational and Business Intelligence (ISCBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a049",
"title": "A multiplatform application for automatic recognition of personality traits for Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a049/1FUU8Pm6jEQ",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/07/08747565",
"title": "Predicting Personality Traits From Physical Activity Intensity",
"doi": null,
"abstractUrl": "/magazine/co/2019/07/08747565/1bcFjqwvpLO",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/11/08935389",
"title": "Effects of Personality Traits on Pull Request Acceptance",
"doi": null,
"abstractUrl": "/journal/ts/2021/11/08935389/1fPUhns9Zlu",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2019/0990/0/08909839",
"title": "Personality Traits Classification on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2019/08909839/1febNVOlteg",
"parentPublication": {
"id": "proceedings/avss/2019/0990/0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090622",
"title": "Assessing Personality Traits of Team Athletes in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090622/1jIxq3qAbba",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbdie/2020/5900/0/09150263",
"title": "Study on the Influence of College Students’ Personality Traits on Employment Intention Choice",
"doi": null,
"abstractUrl": "/proceedings-article/icbdie/2020/09150263/1lPGMIcrFAY",
"parentPublication": {
"id": "proceedings/icbdie/2020/5900/0",
"title": "2020 International Conference on Big Data and Informatization Education (ICBDIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scc/2020/8789/0/878900a054",
"title": "Personality Traits Prediction Based on Users’ Digital Footprints in Social Networks via Attention RNN",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2020/878900a054/1pttWLfhuRG",
"parentPublication": {
"id": "proceedings/scc/2020/8789/0",
"title": "2020 IEEE International Conference on Services Computing (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1febI2L3NK0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"acronym": "avss",
"groupId": "1001307",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1febNVOlteg",
"doi": "10.1109/AVSS.2019.8909839",
"title": "Personality Traits Classification on Twitter",
"normalizedTitle": "Personality Traits Classification on Twitter",
"abstract": "Personality traits have been shown to have strong influences on important aspects of life such as success in the workplace, political temperament, and general emotional stability. Computer-based personality assessments using information from social networking platforms have shown to be more accurate than judgments made by people close to the subject. This paper presents a personality traits classification system that incorporates language-based features, based on count-based vectorization (TF-IDF) and the GloVe word embedding technique, with an ensemble prediction system consisting of gradient-boosted decision trees and an SVM classifier. This combination allows to reliably estimate certain personality traits using only the latest 50 tweets from a user's profile. The performance of the proposed system is validated on a large, publicly available dataset and compares favourably with other state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Personality traits have been shown to have strong influences on important aspects of life such as success in the workplace, political temperament, and general emotional stability. Computer-based personality assessments using information from social networking platforms have shown to be more accurate than judgments made by people close to the subject. This paper presents a personality traits classification system that incorporates language-based features, based on count-based vectorization (TF-IDF) and the GloVe word embedding technique, with an ensemble prediction system consisting of gradient-boosted decision trees and an SVM classifier. This combination allows to reliably estimate certain personality traits using only the latest 50 tweets from a user's profile. The performance of the proposed system is validated on a large, publicly available dataset and compares favourably with other state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Personality traits have been shown to have strong influences on important aspects of life such as success in the workplace, political temperament, and general emotional stability. Computer-based personality assessments using information from social networking platforms have shown to be more accurate than judgments made by people close to the subject. This paper presents a personality traits classification system that incorporates language-based features, based on count-based vectorization (TF-IDF) and the GloVe word embedding technique, with an ensemble prediction system consisting of gradient-boosted decision trees and an SVM classifier. This combination allows to reliably estimate certain personality traits using only the latest 50 tweets from a user's profile. The performance of the proposed system is validated on a large, publicly available dataset and compares favourably with other state-of-the-art methods.",
"fno": "08909839",
"keywords": [
"Decision Trees",
"Gradient Methods",
"Natural Language Processing",
"Pattern Classification",
"Social Networking Online",
"Support Vector Machines",
"Computer Based Personality Assessments",
"Language Based Features",
"Count Based Vectorization",
"Glo Ve Word Embedding Technique",
"Ensemble Prediction System",
"Gradient Boosted Decision Trees",
"Personality Traits Classification",
"Political Temperament",
"Emotional Stability",
"Twitter",
"Workplace",
"Social Networking",
"TF IDF",
"SVM Classifier",
"Twitter",
"Feature Extraction",
"Biological System Modeling",
"Support Vector Machines",
"Linguistics",
"Decision Trees"
],
"authors": [
{
"affiliation": "University of Calgary,Department of Computer Science,Calgary,Alberta,Canada",
"fullName": "K. N. Pavan Kumar",
"givenName": "K. N. Pavan",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Calgary,Department of Computer Science,Calgary,Alberta,Canada",
"fullName": "Marina L. Gavrilova",
"givenName": "Marina L.",
"surname": "Gavrilova",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "avss",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0990-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08909867",
"articleId": "1febJx9uDK0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08909888",
"articleId": "1febIVLPQHe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/agile/2006/2562/0/25620089",
"title": "Critical Personality Traits in Successful Pair Programming",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2006/25620089/12OmNBiygtm",
"parentPublication": {
"id": "proceedings/agile/2006/2562/0",
"title": "AGILE 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/issre/2016/9002/0/9002a077",
"title": "On the Personality Traits of GitHub Contributors",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2016/9002a077/12OmNvzJFY8",
"parentPublication": {
"id": "proceedings/issre/2016/9002/0",
"title": "2016 IEEE 27th International Symposium on Software Reliability Engineering (ISSRE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0/07847160",
"title": "A Mobile Security-Related Behavior Prevention Model Based on Speech Personality Traits",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2016/07847160/12OmNwdL7pY",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0",
"title": "2016 IEEE Trustcom/BigDataSE/ISPA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscbi/2013/5066/0/5066a182",
"title": "Personality Traits Identification Using Rough Sets Based Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iscbi/2013/5066a182/12OmNxXCGEa",
"parentPublication": {
"id": "proceedings/iscbi/2013/5066/0",
"title": "2013 International Symposium on Computational and Business Intelligence (ISCBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seaa/2018/7383/0/738300a214",
"title": "Linking Personality Traits and Interpersonal Skills to Gamification Awards",
"doi": null,
"abstractUrl": "/proceedings-article/seaa/2018/738300a214/17D45WaTkgv",
"parentPublication": {
"id": "proceedings/seaa/2018/7383/0",
"title": "2018 44th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a019",
"title": "Personality Traits Estimation of Participants Based on Multimodal Information in Knowledge-Transfer-type Discussion",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a019/1GU73HQwDy8",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/07/08747565",
"title": "Predicting Personality Traits From Physical Activity Intensity",
"doi": null,
"abstractUrl": "/magazine/co/2019/07/08747565/1bcFjqwvpLO",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/11/08935389",
"title": "Effects of Personality Traits on Pull Request Acceptance",
"doi": null,
"abstractUrl": "/journal/ts/2021/11/08935389/1fPUhns9Zlu",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000b332",
"title": "Extraction and Use of Personality Traits from Written Commentary",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000b332/1gjRyA33Xz2",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047374",
"title": "Facial-Based Personality Prediction Models for Estimating Individuals Private Traits",
"doi": null,
"abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047374/1iC6BNzuL3G",
"parentPublication": {
"id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0",
"title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxq3qAbba",
"doi": "10.1109/VRW50115.2020.00024",
"title": "Assessing Personality Traits of Team Athletes in Virtual Reality",
"normalizedTitle": "Assessing Personality Traits of Team Athletes in Virtual Reality",
"abstract": "Assessment of personality traits is highly relevant in team sports in order to analyze the performance of an athlete under pressure when in competitive situations, for team-strategic decisions, to optimize command transmission, and ultimately to understand top-level performers. It further facilitates the development and application of personalized exercises, coaching to improve performance in competition, and can be considered a valuable criterion for talent scouting and development. The current state of the art method to assess personality traits in sports relies on validated questionnaires. However, these often provide non-sport-specific, subjective self-reported information and lack the ability to measure how these characteristics are reflected in context-based performance.We developed a virtual reality (VR) tool for the assessment of personality traits in team sports, in our case for soccer. An evaluation of this tool within a study with 24 subjects yielded a benchmark of its immersion through user experience and provided an objective description of athletes’ personalities based on performance indicators extracted from activity-tracking. Within the tool, we implemented two realistic virtual soccer environments to assess the motivational orientation of soccer players (i.e. action- and state-orientation) which we discerned from the gold standard questionnaire.Results show that user experience and presence of the implemented virtual environments scored significantly higher compared to benchmark measurements. Additionally, a significant difference between the two groups of action and state-oriented athletes could be observed. Measures of failure rate, pass accuracy, number of perceived opponents, and achieved bonus goals are parameters that differ significantly among the two athlete groups. These findings show that VR technology is applicable for the assessment of athletes’ motivational orientation and thus demonstrate the feasibility of virtual environments as functional game scenario-based assessment tools for athletes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Assessment of personality traits is highly relevant in team sports in order to analyze the performance of an athlete under pressure when in competitive situations, for team-strategic decisions, to optimize command transmission, and ultimately to understand top-level performers. It further facilitates the development and application of personalized exercises, coaching to improve performance in competition, and can be considered a valuable criterion for talent scouting and development. The current state of the art method to assess personality traits in sports relies on validated questionnaires. However, these often provide non-sport-specific, subjective self-reported information and lack the ability to measure how these characteristics are reflected in context-based performance.We developed a virtual reality (VR) tool for the assessment of personality traits in team sports, in our case for soccer. An evaluation of this tool within a study with 24 subjects yielded a benchmark of its immersion through user experience and provided an objective description of athletes’ personalities based on performance indicators extracted from activity-tracking. Within the tool, we implemented two realistic virtual soccer environments to assess the motivational orientation of soccer players (i.e. action- and state-orientation) which we discerned from the gold standard questionnaire.Results show that user experience and presence of the implemented virtual environments scored significantly higher compared to benchmark measurements. Additionally, a significant difference between the two groups of action and state-oriented athletes could be observed. Measures of failure rate, pass accuracy, number of perceived opponents, and achieved bonus goals are parameters that differ significantly among the two athlete groups. These findings show that VR technology is applicable for the assessment of athletes’ motivational orientation and thus demonstrate the feasibility of virtual environments as functional game scenario-based assessment tools for athletes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Assessment of personality traits is highly relevant in team sports in order to analyze the performance of an athlete under pressure when in competitive situations, for team-strategic decisions, to optimize command transmission, and ultimately to understand top-level performers. It further facilitates the development and application of personalized exercises, coaching to improve performance in competition, and can be considered a valuable criterion for talent scouting and development. The current state of the art method to assess personality traits in sports relies on validated questionnaires. However, these often provide non-sport-specific, subjective self-reported information and lack the ability to measure how these characteristics are reflected in context-based performance.We developed a virtual reality (VR) tool for the assessment of personality traits in team sports, in our case for soccer. An evaluation of this tool within a study with 24 subjects yielded a benchmark of its immersion through user experience and provided an objective description of athletes’ personalities based on performance indicators extracted from activity-tracking. Within the tool, we implemented two realistic virtual soccer environments to assess the motivational orientation of soccer players (i.e. action- and state-orientation) which we discerned from the gold standard questionnaire.Results show that user experience and presence of the implemented virtual environments scored significantly higher compared to benchmark measurements. Additionally, a significant difference between the two groups of action and state-oriented athletes could be observed. Measures of failure rate, pass accuracy, number of perceived opponents, and achieved bonus goals are parameters that differ significantly among the two athlete groups. These findings show that VR technology is applicable for the assessment of athletes’ motivational orientation and thus demonstrate the feasibility of virtual environments as functional game scenario-based assessment tools for athletes.",
"fno": "09090622",
"keywords": [
"Training",
"Task Analysis",
"Tools",
"Decision Making",
"Games",
"Virtual Reality",
"Human Centered Computing",
"Interactive Systems And Tools",
"Systems And Tools",
"Human Centered Computing",
"Computing Ubiquitous And Mobile Computing",
"Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Univ. of Erlangen-Nürnberg (FAU),Machine Learning & Data Analytics,Erlangen,Germany",
"fullName": "Markus Wirth",
"givenName": "Markus",
"surname": "Wirth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Erlangen-Nürnberg (FAU),Machine Learning & Data Analytics,Erlangen,Germany",
"fullName": "Stefan Gradl",
"givenName": "Stefan",
"surname": "Gradl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Erlangen-Nürnberg (FAU),Machine Learning & Data Analytics,Erlangen,Germany",
"fullName": "Wolfgang A. Mehringer",
"givenName": "Wolfgang A.",
"surname": "Mehringer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Rennes 2,MM2S Lab,Rennes,France",
"fullName": "Richard Kulpa",
"givenName": "Richard",
"surname": "Kulpa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Erlangen-Nürnberg (FAU),Machine Learning & Data Analytics,Erlangen,Germany",
"fullName": "Hannes Rupprecht",
"givenName": "Hannes",
"surname": "Rupprecht",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "FC Red Bull Salzburg Academy,Salzburg,Austria",
"fullName": "Dino Poimann",
"givenName": "Dino",
"surname": "Poimann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Waterloo,Department of Kinesiology,Waterloo,Ontario,Canada",
"fullName": "Annemarie F. Laudanski",
"givenName": "Annemarie F.",
"surname": "Laudanski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Erlangen-Nürnberg (FAU),Machine Learning & Data Analytics,Erlangen,Germany",
"fullName": "Bjoern M. Eskofier",
"givenName": "Bjoern M.",
"surname": "Eskofier",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "101-108",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090440",
"articleId": "1jIxmMZKrSw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090551",
"articleId": "1jIxzA3zSuc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/agile/2006/2562/0/25620089",
"title": "Critical Personality Traits in Successful Pair Programming",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2006/25620089/12OmNBiygtm",
"parentPublication": {
"id": "proceedings/agile/2006/2562/0",
"title": "AGILE 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a288",
"title": "Knowledge-Driven Approach to Predict Personality Traits by Leveraging Social Media Data",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a288/12OmNvTjZRz",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscbi/2013/5066/0/5066a182",
"title": "Personality Traits Identification Using Rough Sets Based Machine Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iscbi/2013/5066a182/12OmNxXCGEa",
"parentPublication": {
"id": "proceedings/iscbi/2013/5066/0",
"title": "2013 International Symposium on Computational and Business Intelligence (ISCBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2010/4154/0/4154a134",
"title": "Automatically Assessing Personality from Speech",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2010/4154a134/12OmNyQYtpW",
"parentPublication": {
"id": "proceedings/icsc/2010/4154/0",
"title": "2010 IEEE Fourth International Conference on Semantic Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08651471",
"title": "You or Me? Personality Traits Predict Sacrificial Decisions in an Accident Situation",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08651471/17WX58zhpC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2019/1198/0/119800a303",
"title": "Automated Athlete Haptic Training System for Soccer Sprinting",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2019/119800a303/19wB3VmXAnS",
"parentPublication": {
"id": "proceedings/mipr/2019/1198/0",
"title": "2019 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2019/07/08747565",
"title": "Predicting Personality Traits From Physical Activity Intensity",
"doi": null,
"abstractUrl": "/magazine/co/2019/07/08747565/1bcFjqwvpLO",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/11/08935389",
"title": "Effects of Personality Traits on Pull Request Acceptance",
"doi": null,
"abstractUrl": "/journal/ts/2021/11/08935389/1fPUhns9Zlu",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2019/0990/0/08909839",
"title": "Personality Traits Classification on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2019/08909839/1febNVOlteg",
"parentPublication": {
"id": "proceedings/avss/2019/0990/0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2021/0189/0/018900a117",
"title": "Perception of Personality Traits in Crowds of Virtual Humans",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2021/018900a117/1zusqJ6D3Ne",
"parentPublication": {
"id": "proceedings/sbgames/2021/0189/0",
"title": "2021 20th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pcSFPwAoww",
"title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"acronym": "icse-companion",
"groupId": "1002125",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pcSMstuKZy",
"doi": null,
"title": "Real-world Ethics for Self-Driving Cars",
"normalizedTitle": "Real-world Ethics for Self-Driving Cars",
"abstract": "Ethical and social problems of the emerging technology of self-driving cars can best be addressed through an applied engineering ethical approach. However, currently social and ethical problems are typically being presented in terms of an idealized unsolvable decision-making problem, the so-called Trolley Problem. Instead, we propose that ethical analysis should focus on the study of ethics of complex real-world engineering problems. As software plays a crucial role in the control of self-driving cars, software engineering solutions should handle actual ethical and social considerations. We take a closer look at the regulative instruments, standards, design, and implementations of components, systems, and services and we present practical social and ethical challenges that must be met in the ecology of the socio-technological system of self-driving cars which implies novel expectations for software engineering in the automotive industry.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Ethical and social problems of the emerging technology of self-driving cars can best be addressed through an applied engineering ethical approach. However, currently social and ethical problems are typically being presented in terms of an idealized unsolvable decision-making problem, the so-called Trolley Problem. Instead, we propose that ethical analysis should focus on the study of ethics of complex real-world engineering problems. As software plays a crucial role in the control of self-driving cars, software engineering solutions should handle actual ethical and social considerations. We take a closer look at the regulative instruments, standards, design, and implementations of components, systems, and services and we present practical social and ethical challenges that must be met in the ecology of the socio-technological system of self-driving cars which implies novel expectations for software engineering in the automotive industry.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Ethical and social problems of the emerging technology of self-driving cars can best be addressed through an applied engineering ethical approach. However, currently social and ethical problems are typically being presented in terms of an idealized unsolvable decision-making problem, the so-called Trolley Problem. Instead, we propose that ethical analysis should focus on the study of ethics of complex real-world engineering problems. As software plays a crucial role in the control of self-driving cars, software engineering solutions should handle actual ethical and social considerations. We take a closer look at the regulative instruments, standards, design, and implementations of components, systems, and services and we present practical social and ethical challenges that must be met in the ecology of the socio-technological system of self-driving cars which implies novel expectations for software engineering in the automotive industry.",
"fno": "712200a328",
"keywords": [
"Decision Making",
"Ethical Aspects",
"Mobile Robots",
"Philosophical Aspects",
"Software Agents",
"Software Engineering",
"World Ethics",
"Self Driving Cars",
"Ethical Problems",
"Social Problems",
"Idealized Unsolvable Decision Making Problem",
"Trolley Problem",
"Ethical Analysis",
"Software Engineering Solutions",
"Social Considerations",
"Ethical Challenges",
"Real World Ethics",
"Engineering Ethical Approach",
"Ethics",
"Autonomous Automobiles",
"Automobiles",
"Software Engineering",
"Stakeholders",
"Decision Making",
"Accidents",
"Self Driving Cars",
"Autonomous Cars",
"Trolley Problem",
"Decision Making",
"Ethics",
"Social Aspects",
"Software Engineering",
"Challenges"
],
"authors": [
{
"affiliation": "Mälardalen University,Västerås,Sweden",
"fullName": "Tobias Holstein",
"givenName": "Tobias",
"surname": "Holstein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mälardalen University,Västerås,Sweden",
"fullName": "Gordana Dodig-Crnkovic",
"givenName": "Gordana",
"surname": "Dodig-Crnkovic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chalmers University of Technology | University of Gothenburg,Gothenburg,Sweden",
"fullName": "Patrizio Pelliccione",
"givenName": "Patrizio",
"surname": "Pelliccione",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse-companion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-10-01T00:00:00",
"pubType": "proceedings",
"pages": "328-329",
"year": "2020",
"issn": "2574-1926",
"isbn": "978-1-4503-7122-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "712200a326",
"articleId": "1pcSGZiPvO0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "712200a331",
"articleId": "1pcSIlDGkSs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icse-companion/2018/5663/0/566301a228",
"title": "Poster: Ethics-Aware Software Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2018/566301a228/13bd1tl2omc",
"parentPublication": {
"id": "proceedings/icse-companion/2018/5663/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fairware/2018/5746/0/574601a015",
"title": "A Roadmap for Ethics-Aware Software Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/fairware/2018/574601a015/13l5NWNrH6M",
"parentPublication": {
"id": "proceedings/fairware/2018/5746/0",
"title": "2018 IEEE/ACM International Workshop on Software Fairness (FairWare)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fairware/2018/5746/0/574601a032",
"title": "Avoiding the Intrinsic Unfairness of the Trolley Problem",
"doi": null,
"abstractUrl": "/proceedings-article/fairware/2018/574601a032/13l5NYHwPdv",
"parentPublication": {
"id": "proceedings/fairware/2018/5746/0",
"title": "2018 IEEE/ACM International Workshop on Software Fairness (FairWare)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sefaias/2018/5739/0/573901a035",
"title": "Deep Learning for Self-Driving Cars: Chances and Challenges",
"doi": null,
"abstractUrl": "/proceedings-article/sefaias/2018/573901a035/13l5O9MauRB",
"parentPublication": {
"id": "proceedings/sefaias/2018/5739/0",
"title": "2018 IEEE/ACM 1st International Workshop on Software Engineering for AI in Autonomous Systems (SEFAIAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2018/7418/0/741800a382",
"title": "Software Transparency as a Key Requirement for Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/re/2018/741800a382/17D45WaTkcU",
"parentPublication": {
"id": "proceedings/re/2018/7418/0",
"title": "2018 IEEE 26th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-companion/2019/1764/0/176400a290",
"title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/icse-companion/2019/176400a290/1cJ7nMlYJLW",
"parentPublication": {
"id": "proceedings/icse-companion/2019/1764/0",
"title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2020/8450/0/845000a174",
"title": "Blockchain Software System Proposal Applied to Electric Self-driving Cars Charging Stations: A TSP Academic Project",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2020/845000a174/1q0FQeeGFNK",
"parentPublication": {
"id": "proceedings/conisoft/2020/8450/0",
"title": "2020 8th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pmis/2021/3872/0/387200a292",
"title": "Tort Liability for Damage Caused by Self-driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/pmis/2021/387200a292/1t2mYKPDYEE",
"parentPublication": {
"id": "proceedings/pmis/2021/3872/0",
"title": "2021 International Conference on Public Management and Intelligent Society (PMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2020/7624/0/762400a655",
"title": "Self Driving Cars: All You Need to Know",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2020/762400a655/1uGYOT1cbDi",
"parentPublication": {
"id": "proceedings/csci/2020/7624/0",
"title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/metrocad/2021/4594/0/459400a062",
"title": "A Survey on Simulators for Testing Self-Driving Cars",
"doi": null,
"abstractUrl": "/proceedings-article/metrocad/2021/459400a062/1vNjOxiNoK4",
"parentPublication": {
"id": "proceedings/metrocad/2021/4594/0",
"title": "2021 Fourth International Conference on Connected and Autonomous Driving (MetroCAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNylborE",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy68EOY",
"doi": "10.1109/WACV.2018.00222",
"title": "Delay Compensation for Actuated Stereoscopic 360 Degree Telepresence Systems with Probabilistic Head Motion Prediction",
"normalizedTitle": "Delay Compensation for Actuated Stereoscopic 360 Degree Telepresence Systems with Probabilistic Head Motion Prediction",
"abstract": "Communication delay is a major challenge for the acceptance of telepresence applications. It is particularly critical when the user experiences the remote environment via a Head-Mounted-Display. The lag between head motion and display response results in motion sickness, indisposition, and, at worst, abortion of the telepresence session. In this paper, we propose a delay compensation approach for 3D 360° telepresence systems realized with a mechanically actuated stereoscopic vision system. We further introduce a novel metric to evaluate the achievable level of delay compensation. We investigate state-of-the-art head motion predictors and propose a novel probabilistic prediction paradigm, which can half the mean prediction error and improve the level of delay compensation by up to 26%. The general validity of our approach is shown by means of two independent real head motion datasets. The experimental results verify that average compensation rates of more than 99% can be achieved for communication delays between 100-500ms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Communication delay is a major challenge for the acceptance of telepresence applications. It is particularly critical when the user experiences the remote environment via a Head-Mounted-Display. The lag between head motion and display response results in motion sickness, indisposition, and, at worst, abortion of the telepresence session. In this paper, we propose a delay compensation approach for 3D 360° telepresence systems realized with a mechanically actuated stereoscopic vision system. We further introduce a novel metric to evaluate the achievable level of delay compensation. We investigate state-of-the-art head motion predictors and propose a novel probabilistic prediction paradigm, which can half the mean prediction error and improve the level of delay compensation by up to 26%. The general validity of our approach is shown by means of two independent real head motion datasets. The experimental results verify that average compensation rates of more than 99% can be achieved for communication delays between 100-500ms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Communication delay is a major challenge for the acceptance of telepresence applications. It is particularly critical when the user experiences the remote environment via a Head-Mounted-Display. The lag between head motion and display response results in motion sickness, indisposition, and, at worst, abortion of the telepresence session. In this paper, we propose a delay compensation approach for 3D 360° telepresence systems realized with a mechanically actuated stereoscopic vision system. We further introduce a novel metric to evaluate the achievable level of delay compensation. We investigate state-of-the-art head motion predictors and propose a novel probabilistic prediction paradigm, which can half the mean prediction error and improve the level of delay compensation by up to 26%. The general validity of our approach is shown by means of two independent real head motion datasets. The experimental results verify that average compensation rates of more than 99% can be achieved for communication delays between 100-500ms.",
"fno": "488601c010",
"keywords": [
"Compensation",
"Delays",
"Helmet Mounted Displays",
"Probability",
"Stereo Image Processing",
"Average Compensation Rates",
"Communication Delay",
"Actuated Stereoscopic 360 Degree Telepresence Systems",
"Probabilistic Head Motion Prediction",
"Telepresence Applications",
"Head Mounted Display",
"Display Response Results",
"Motion Sickness",
"Telepresence Session",
"Delay Compensation Approach",
"3 D 360 Telepresence Systems",
"Mechanically Actuated Stereoscopic Vision System",
"Mean Prediction Error",
"Probabilistic Prediction Paradigm",
"Delays",
"Head",
"Cameras",
"Telepresence",
"Three Dimensional Displays",
"Resists",
"Stereo Image Processing"
],
"authors": [
{
"affiliation": null,
"fullName": "Tamay Aykut",
"givenName": "Tamay",
"surname": "Aykut",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christoph Burgmair",
"givenName": "Christoph",
"surname": "Burgmair",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mojtaba Karimi",
"givenName": "Mojtaba",
"surname": "Karimi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jingyi Xu",
"givenName": "Jingyi",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Eckehard Steinbach",
"givenName": "Eckehard",
"surname": "Steinbach",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "2010-2018",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-4886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "488601c001",
"articleId": "12OmNzTH0XN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "488601c019",
"articleId": "12OmNyS6RGf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446471",
"title": "Towards Mobile 3D Telepresence Using Head-Worn Devices and Dual-Purpose Screens",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446471/13bd1AITn9Y",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07792698",
"title": "JackIn Head: Immersive Visual Telepresence System with Omnidirectional Wearable Camera",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07792698/13rRUx0geq0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08314105",
"title": "Detection Thresholds for Rotation and Translation Gains in 360° Video-Based Telepresence Systems",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08314105/13rRUxASubD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642375",
"title": "Immersive Telepresence and Remote Collaboration using Mobile and Wearable Devices",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642375/17PYEk3WIil",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714051",
"title": "Augmenting Immersive Telepresence Experience with a Virtual Body",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2022/9774/0/977400a167",
"title": "Attention based Occlusion Removal for Hybrid Telepresence Systems",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2022/977400a167/1GeCvG8dPna",
"parentPublication": {
"id": "proceedings/crv/2022/9774/0",
"title": "2022 19th Conference on Robots and Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797876",
"title": "Live Stereoscopic 3D Image with Constant Capture Direction of 360° Cameras for High-Quality Visual Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797876/1cJ0HMTqjOU",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798011",
"title": "Hybrid Camera System for Telepresence with Foveated Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798011/1cJ0KGEU288",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797819",
"title": "Localizing Teleoperator Gaze in 360° Hosted Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797819/1cJ1d3MdShi",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0KGEU288",
"doi": "10.1109/VR.2019.8798011",
"title": "Hybrid Camera System for Telepresence with Foveated Imaging",
"normalizedTitle": "Hybrid Camera System for Telepresence with Foveated Imaging",
"abstract": "To improve the telepresence sense of a local HMD user, a high-resolution view of the remote environment is necessary. However, current commodity omnidirectional camera could not support enough resolution for the human eye. Using a higher resolution omnidirectional camera is also infeasible because it will increase the streaming bandwidth. We propose a hybrid camera system that can convey a higher resolution for the HMD user viewport ROI region in available bandwidth range. The hybrid camera consists of a pair of omnidirectional and PTZ camera which is close to each other. The HMD user head orientation controls the PTZ camera orientation. The HMD user also controls the zooming level of the PTZ camera to achieve higher resolution up to PTZ camera maximum optical zoom level. The remote environment view obtained from each camera is streamed to the HMD user and then stitched into one combined view. This combined view simulates human visual system (HVS) phenomenon called foveation, where only a small part in the human view is in high resolution, and the rests are in low resolution.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To improve the telepresence sense of a local HMD user, a high-resolution view of the remote environment is necessary. However, current commodity omnidirectional camera could not support enough resolution for the human eye. Using a higher resolution omnidirectional camera is also infeasible because it will increase the streaming bandwidth. We propose a hybrid camera system that can convey a higher resolution for the HMD user viewport ROI region in available bandwidth range. The hybrid camera consists of a pair of omnidirectional and PTZ camera which is close to each other. The HMD user head orientation controls the PTZ camera orientation. The HMD user also controls the zooming level of the PTZ camera to achieve higher resolution up to PTZ camera maximum optical zoom level. The remote environment view obtained from each camera is streamed to the HMD user and then stitched into one combined view. This combined view simulates human visual system (HVS) phenomenon called foveation, where only a small part in the human view is in high resolution, and the rests are in low resolution.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To improve the telepresence sense of a local HMD user, a high-resolution view of the remote environment is necessary. However, current commodity omnidirectional camera could not support enough resolution for the human eye. Using a higher resolution omnidirectional camera is also infeasible because it will increase the streaming bandwidth. We propose a hybrid camera system that can convey a higher resolution for the HMD user viewport ROI region in available bandwidth range. The hybrid camera consists of a pair of omnidirectional and PTZ camera which is close to each other. The HMD user head orientation controls the PTZ camera orientation. The HMD user also controls the zooming level of the PTZ camera to achieve higher resolution up to PTZ camera maximum optical zoom level. The remote environment view obtained from each camera is streamed to the HMD user and then stitched into one combined view. This combined view simulates human visual system (HVS) phenomenon called foveation, where only a small part in the human view is in high resolution, and the rests are in low resolution.",
"fno": "08798011",
"keywords": [
"Biomimetics",
"Cameras",
"Helmet Mounted Displays",
"Image Resolution",
"Image Sensors",
"Remote Environment View",
"Hybrid Camera System",
"Foveated Imaging",
"Local HMD User",
"Human Visual System Phenomenon",
"Omnidirectional Camera",
"PTZ Camera",
"Optical Zoom Level",
"Human Visual System",
"Foveation Phenomenon",
"Cameras",
"Resists",
"Telepresence",
"Bandwidth",
"Visualization",
"Servomotors",
"Integrated Optics",
"Telepresence Hybrid Camera Foveation HMD"
],
"authors": [
{
"affiliation": "Korea Institute of Science and Technology, University of Science and Technology, Korea",
"fullName": "Muhammad Firdaus Syawaludin",
"givenName": "Muhammad Firdaus",
"surname": "Syawaludin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea Institute of Science and Technology, University of Science and Technology, Korea",
"fullName": "Chanho Kim",
"givenName": "Chanho",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Korea Institute of Science and Technology",
"fullName": "Jae-In Hwana",
"givenName": "Jae-In",
"surname": "Hwana",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1173-1174",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797956",
"articleId": "1cJ17BLEK88",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798351",
"articleId": "1cJ0W5YaAtq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2004/2122/0/21220427",
"title": "Hierarchical Face Tracking by Using PTZ camera",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2004/21220427/12OmNCwlag3",
"parentPublication": {
"id": "proceedings/fg/2004/2122/0",
"title": "Sixth IEEE International Conference on Automatic Face and Gesture Recognition, 2004. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295109",
"title": "Control of a PTZ camera in a hybrid vision system",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295109/12OmNqNos76",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2006/2688/0/26880046",
"title": "Fusion of Omnidirectional and PTZ Cameras for Accurate Cooperative Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2006/26880046/12OmNxX3uJG",
"parentPublication": {
"id": "proceedings/avss/2006/2688/0",
"title": "2006 IEEE International Conference on Video and Signal Based Surveillance",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714051",
"title": "Augmenting Immersive Telepresence Experience with a Virtual Body",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714051/1B0Y0I5xWyk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2022/9774/0/977400a167",
"title": "Attention based Occlusion Removal for Hybrid Telepresence Systems",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2022/977400a167/1GeCvG8dPna",
"parentPublication": {
"id": "proceedings/crv/2022/9774/0",
"title": "2022 19th Conference on Robots and Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956197",
"title": "On Depth Error from Spherical Camera Calibration within Omnidirectional Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956197/1IHqnsyNjbO",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798156",
"title": "Self Bird's Eye View with Omnidirectional Camera on HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798156/1cJ0GmpY7a8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089437",
"title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090433",
"title": "Virtual Tour: An Immersive Low Cost Telepresence System",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a451",
"title": "The Owl: Immersive Telepresence Communication for Hybrid Conferences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a451/1yeQG4fi6Dm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysxMcaE2Q",
"doi": "10.1109/ISMAR50242.2020.00080",
"title": "Evaluating Remote Virtual Hands Models on Social Presence in Hand-based 3D Remote Collaboration",
"normalizedTitle": "Evaluating Remote Virtual Hands Models on Social Presence in Hand-based 3D Remote Collaboration",
"abstract": "This study investigates the effects of a virtual hand representation on the user experience including social presence during hand-based 3D remote collaboration. Although a remote hand appearance is a critical parts of a hand-based telepresence, it has been rarely studied in comparison to studies on the self-embodiment of virtual hands in a 3D environment. Thus, we conducted a user study comparing the three virtual hands models (Skeleton, Low Polygon and Realistic) while performing a remote collaborative task based on the American Sign Language (ASL) using both Augmented Reality (AR) and Virtual Reality (VR) environments. We found that the realistic type was perceived as the most sense of being together, human-like, and trustable representation. The low polygon model could also convey a clear sign and moderate level of social presence. Although the system was configured asymmetrically in AR and VR, little difference in perception was found except for the participant's mental load and message understanding. We then discuss the results and suggest design implications for future hand-based 3D telepresence systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study investigates the effects of a virtual hand representation on the user experience including social presence during hand-based 3D remote collaboration. Although a remote hand appearance is a critical parts of a hand-based telepresence, it has been rarely studied in comparison to studies on the self-embodiment of virtual hands in a 3D environment. Thus, we conducted a user study comparing the three virtual hands models (Skeleton, Low Polygon and Realistic) while performing a remote collaborative task based on the American Sign Language (ASL) using both Augmented Reality (AR) and Virtual Reality (VR) environments. We found that the realistic type was perceived as the most sense of being together, human-like, and trustable representation. The low polygon model could also convey a clear sign and moderate level of social presence. Although the system was configured asymmetrically in AR and VR, little difference in perception was found except for the participant's mental load and message understanding. We then discuss the results and suggest design implications for future hand-based 3D telepresence systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study investigates the effects of a virtual hand representation on the user experience including social presence during hand-based 3D remote collaboration. Although a remote hand appearance is a critical parts of a hand-based telepresence, it has been rarely studied in comparison to studies on the self-embodiment of virtual hands in a 3D environment. Thus, we conducted a user study comparing the three virtual hands models (Skeleton, Low Polygon and Realistic) while performing a remote collaborative task based on the American Sign Language (ASL) using both Augmented Reality (AR) and Virtual Reality (VR) environments. We found that the realistic type was perceived as the most sense of being together, human-like, and trustable representation. The low polygon model could also convey a clear sign and moderate level of social presence. Although the system was configured asymmetrically in AR and VR, little difference in perception was found except for the participant's mental load and message understanding. We then discuss the results and suggest design implications for future hand-based 3D telepresence systems.",
"fno": "850800a520",
"keywords": [
"Groupware",
"Image Representation",
"Sign Language Recognition",
"Solid Modelling",
"Virtual Reality",
"Social Presence",
"Hand Based 3 D Remote Collaboration",
"Virtual Hand Representation",
"Remote Hand Appearance",
"Hand Based Telepresence",
"Remote Collaborative Task",
"Virtual Reality Environments",
"Remote Virtual Hands Models",
"American Sign Language",
"Augmented Reality",
"Low Polygon Model",
"Hand Based 3 D Telepresence Systems",
"Solid Modeling",
"Three Dimensional Displays",
"Telepresence",
"Collaboration",
"User Experience",
"Augmented Reality",
"Load Modeling",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 HCI Design And Evaluation Methods X 2014 User Studies"
],
"authors": [
{
"affiliation": "KAIST UVR Lab",
"fullName": "Boram Yoon",
"givenName": "Boram",
"surname": "Yoon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Hyung-il Kim",
"givenName": "Hyung-il",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Seo Young Oh",
"givenName": "Seo Young",
"surname": "Oh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KAIST UVR Lab",
"fullName": "Woontack Woo",
"givenName": "Woontack",
"surname": "Woo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "520-532",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a509",
"articleId": "1pysyoGwmze",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a533",
"articleId": "1pysvpmrClG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/tg/2019/05/08642375",
"title": "Immersive Telepresence and Remote Collaboration using Mobile and Wearable Devices",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642375/17PYEk3WIil",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a524",
"title": "Synthesizing Novel Spaces for Remote Telepresence Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a718",
"title": "How Far is It? Distance Estimation and Reproduction Through a Double 3 Telepresence Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a718/1J7Wq3RYsx2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090552",
"title": "[DC] Quality, Presence, and Emotions in Virtual Reality Communications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090552/1jIxiyFygXS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090433",
"title": "Virtual Tour: An Immersive Low Cost Telepresence System",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09206143",
"title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a346",
"title": "Tactile Telepresence for Isolated Patients",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a346/1yeQGRM0HLi",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAR1b0Z",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy7yEfW",
"doi": "10.1109/CVPRW.2017.209",
"title": "Optical Acceleration for Motion Description in Videos",
"normalizedTitle": "Optical Acceleration for Motion Description in Videos",
"abstract": "Modern techniques for describing motion in videos are centred around velocity descriptors based on optical flow. Realizing that acceleration is as important as velocity for describing motion information, in this paper first we propose two different algorithms to compute optical acceleration. Delving deeper into the concept of optical acceleration, we use two descriptors: histogram of optical acceleration (HOA) and histogram of spatial gradient of acceleration (HSGA), to effectively encode the motion information. To assess the effectiveness of these descriptors for motion encoding, we applied it for human action recognition and abnormal event detection in videos. In fact, we used acceleration descriptors in conjunction with velocity descriptors to get a better description of motion in videos. Our experiments reveal that acceleration descriptors could provide additional information that velocity descriptors missed and hence combining them results in a superior motion descriptor.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern techniques for describing motion in videos are centred around velocity descriptors based on optical flow. Realizing that acceleration is as important as velocity for describing motion information, in this paper first we propose two different algorithms to compute optical acceleration. Delving deeper into the concept of optical acceleration, we use two descriptors: histogram of optical acceleration (HOA) and histogram of spatial gradient of acceleration (HSGA), to effectively encode the motion information. To assess the effectiveness of these descriptors for motion encoding, we applied it for human action recognition and abnormal event detection in videos. In fact, we used acceleration descriptors in conjunction with velocity descriptors to get a better description of motion in videos. Our experiments reveal that acceleration descriptors could provide additional information that velocity descriptors missed and hence combining them results in a superior motion descriptor.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern techniques for describing motion in videos are centred around velocity descriptors based on optical flow. Realizing that acceleration is as important as velocity for describing motion information, in this paper first we propose two different algorithms to compute optical acceleration. Delving deeper into the concept of optical acceleration, we use two descriptors: histogram of optical acceleration (HOA) and histogram of spatial gradient of acceleration (HSGA), to effectively encode the motion information. To assess the effectiveness of these descriptors for motion encoding, we applied it for human action recognition and abnormal event detection in videos. In fact, we used acceleration descriptors in conjunction with velocity descriptors to get a better description of motion in videos. Our experiments reveal that acceleration descriptors could provide additional information that velocity descriptors missed and hence combining them results in a superior motion descriptor.",
"fno": "0733b642",
"keywords": [
"Gradient Methods",
"Motion Estimation",
"Video Signal Processing",
"Optical Acceleration",
"Video Motion Description",
"Velocity Descriptors",
"Optical Flow",
"Motion Information",
"Histogram Of Optical Acceleration",
"HOA",
"Histogram Of Spatial Gradient Of Acceleration",
"Motion Encoding",
"Acceleration",
"Optical Imaging",
"Videos",
"Histograms",
"Biomedical Optical Imaging",
"Adaptive Optics",
"High Speed Optical Techniques"
],
"authors": [
{
"affiliation": "Coll. of Eng., Trivandrum, India",
"fullName": "Anitha Edison",
"givenName": "Anitha",
"surname": "Edison",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coll. of Eng., Trivandrum, India",
"fullName": "C.V. Jiji",
"givenName": "C.V.",
"surname": "Jiji",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1642-1650",
"year": "2017",
"issn": "2160-7516",
"isbn": "978-1-5386-0733-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0733b632",
"articleId": "12OmNqJ8tdD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0733b651",
"articleId": "12OmNqFrGIR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457b406",
"title": "Slow Flow: Exploiting High-Speed Cameras for Accurate and Diverse Optical Flow Reference Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b406/12OmNAnMut3",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892293",
"title": "Measurement of 3D-velocity by high-frame-rate optical mouse sensors to extrapolate 3D position captured by a low-frame-rate stereo camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892293/12OmNrJAe6h",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a289",
"title": "Analysing Acceleration for Motion Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a289/12OmNvD8RCX",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apcase/2015/7588/0/7588a173",
"title": "Optical Flow as a Tool for Cardiac Motion Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/apcase/2015/7588a173/12OmNweBUHh",
"parentPublication": {
"id": "proceedings/apcase/2015/7588/0",
"title": "2015 Asia-Pacific Conference on Computer Aided System Engineering (APCASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a298",
"title": "A Tensor Based on Optical Flow for Global Description of Motion in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a298/12OmNxxdZFF",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a212",
"title": "Optical Flow for Rigid Multi-Motion Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a212/12OmNxzMnZg",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeae/2016/5290/0/07796023",
"title": "Calculation of Optical Flow Using Color Models to Improve the Accuracy of the Identification of Objects in Motion",
"doi": null,
"abstractUrl": "/proceedings-article/icmeae/2016/07796023/12OmNyRxFnv",
"parentPublication": {
"id": "proceedings/icmeae/2016/5290/0",
"title": "2016 International Conference on Mechatronics, Electronics and Automotive Engineering (ICMEAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/enc/2013/5087/0/5087a105",
"title": "Description of Motion of Segmented Regions",
"doi": null,
"abstractUrl": "/proceedings-article/enc/2013/5087a105/12OmNyUWQYo",
"parentPublication": {
"id": "proceedings/enc/2013/5087/0",
"title": "2013 Mexican International Conference on Computer Science (ENC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/03/ttp2011030500",
"title": "Large Displacement Optical Flow: Descriptor Matching in Variational Motion Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2011/03/ttp2011030500/13rRUyYSWm8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsd/2018/7377/0/737700a672",
"title": "On Designing All-Optical Multipliers Using Mach-Zender Interferometers",
"doi": null,
"abstractUrl": "/proceedings-article/dsd/2018/737700a672/17D45Xq6dA4",
"parentPublication": {
"id": "proceedings/dsd/2018/7377/0",
"title": "2018 21st Euromicro Conference on Digital System Design (DSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBSBk7e",
"title": "2015 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"acronym": "cyberc",
"groupId": "1002974",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNySosMj",
"doi": "10.1109/CyberC.2015.19",
"title": "Video Minor Motion Magnification System for Telemedicine",
"normalizedTitle": "Video Minor Motion Magnification System for Telemedicine",
"abstract": "Telemedicine system requires video support in which many key information of patients is included such as pulse motion and skin color changes. But those motion are too small to be well perceived by the naked eye. In this paper we proposed a video minor motion magnification system that can magnify the minor motion of pulse of neck and wrist and magnify the color changes of human skin in videos of patient. The experiments show that our system can magnify the pulse motion skin color changes to the extent that can be obviously perceived by naked eye. So that experts can fetch more information from the video of patients through the proposed video minor motion magnification system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Telemedicine system requires video support in which many key information of patients is included such as pulse motion and skin color changes. But those motion are too small to be well perceived by the naked eye. In this paper we proposed a video minor motion magnification system that can magnify the minor motion of pulse of neck and wrist and magnify the color changes of human skin in videos of patient. The experiments show that our system can magnify the pulse motion skin color changes to the extent that can be obviously perceived by naked eye. So that experts can fetch more information from the video of patients through the proposed video minor motion magnification system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Telemedicine system requires video support in which many key information of patients is included such as pulse motion and skin color changes. But those motion are too small to be well perceived by the naked eye. In this paper we proposed a video minor motion magnification system that can magnify the minor motion of pulse of neck and wrist and magnify the color changes of human skin in videos of patient. The experiments show that our system can magnify the pulse motion skin color changes to the extent that can be obviously perceived by naked eye. So that experts can fetch more information from the video of patients through the proposed video minor motion magnification system.",
"fno": "9200a350",
"keywords": [
"Image Color Analysis",
"Spatiotemporal Phenomena",
"Skin",
"Telemedicine",
"Optical Filters",
"Wrist",
"Optical Imaging",
"Color Magnification",
"Telemedicine",
"Video Motion Magnification",
"Steerable Pyramid"
],
"authors": [
{
"affiliation": null,
"fullName": "Yi Zhang",
"givenName": "Yi",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tao Yang",
"givenName": "Tao",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cyberc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "350-353",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-9200-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "9200a343",
"articleId": "12OmNzRHOTe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "9200a354",
"articleId": "12OmNCdk2FG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cmvit/2017/4993/0/07878717",
"title": "Scope of Video Magnification in Human Pulse Rate Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cmvit/2017/07878717/12OmNApLGLE",
"parentPublication": {
"id": "proceedings/cmvit/2017/4993/0",
"title": "2017 International Conference on Machine Vision and Information Technology (CMVIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a261",
"title": "Preprocessing Realistic Video for Contactless Heart Rate Monitoring Using Video Magnification",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a261/12OmNBpmDR2",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a502",
"title": "Video Acceleration Magnification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a502/12OmNCu4nc3",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icesssymposia/2008/3288/0/3288a386",
"title": "Adaptive Motion Gesture Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icesssymposia/2008/3288a386/12OmNwudQSG",
"parentPublication": {
"id": "proceedings/icesssymposia/2008/3288/0",
"title": "Embedded Software and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2015/6026/1/07163160",
"title": "Optical flow based lip reading using non rectangular ROI and head motion reduction",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2015/07163160/12OmNxeM46r",
"parentPublication": {
"id": "proceedings/fg/2015/6026/5",
"title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07299039",
"title": "Video magnification in presence of large motions",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07299039/12OmNzuZUBe",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2010/03/tth2010030166",
"title": "Rotational Skin Stretch Feedback: A Wearable Haptic Display for Motion",
"doi": null,
"abstractUrl": "/journal/th/2010/03/tth2010030166/13rRUILtJr1",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600r7348",
"title": "Bilateral Video Magnification Filter",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600r7348/1H0NBEO2ZC8",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2021/0424/0/09430986",
"title": "WristO2: Reliable Peripheral Oxygen Saturation Readings from Wrist-Worn Pulse Oximeters",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2021/09430986/1tROOUD6jJe",
"parentPublication": {
"id": "proceedings/percom-workshops/2021/0424/0",
"title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c385",
"title": "MANet: a Motion-Driven Attention Network for Detecting the Pulse from a Facial Video with Drastic Motions",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c385/1yNhZ2bBxHa",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WZZ7Cv",
"doi": "10.1109/CVPR.2018.00707",
"title": "PoseFlow: A Deep Motion Representation for Understanding Human Behaviors in Videos",
"normalizedTitle": "PoseFlow: A Deep Motion Representation for Understanding Human Behaviors in Videos",
"abstract": "Motion of the human body is the critical cue for understanding and characterizing human behavior in videos. Most existing approaches explore the motion cue using optical flows. However, optical flow usually contains motion on both the interested human bodies and the undesired background. This \"noisy\" motion representation makes it very challenging for pose estimation and action recognition in real scenarios. To address this issue, this paper presents a novel deep motion representation, called PoseFlow, which reveals human motion in videos while suppressing background and motion blur, and being robust to occlusion. For learning PoseFlow with mild computational cost, we propose a functionally structured spatial-temporal deep network, PoseFlow Net (PFN), to jointly solve the skeleton localization and matching problems of PoseFlow. Comprehensive experiments show that PFN outperforms the state-of-the-art deep flow estimation models in generating PoseFlow. Moreover, PoseFlow demonstrates its potential on improving two challenging tasks in human video analysis: pose estimation and action recognition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion of the human body is the critical cue for understanding and characterizing human behavior in videos. Most existing approaches explore the motion cue using optical flows. However, optical flow usually contains motion on both the interested human bodies and the undesired background. This \"noisy\" motion representation makes it very challenging for pose estimation and action recognition in real scenarios. To address this issue, this paper presents a novel deep motion representation, called PoseFlow, which reveals human motion in videos while suppressing background and motion blur, and being robust to occlusion. For learning PoseFlow with mild computational cost, we propose a functionally structured spatial-temporal deep network, PoseFlow Net (PFN), to jointly solve the skeleton localization and matching problems of PoseFlow. Comprehensive experiments show that PFN outperforms the state-of-the-art deep flow estimation models in generating PoseFlow. Moreover, PoseFlow demonstrates its potential on improving two challenging tasks in human video analysis: pose estimation and action recognition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion of the human body is the critical cue for understanding and characterizing human behavior in videos. Most existing approaches explore the motion cue using optical flows. However, optical flow usually contains motion on both the interested human bodies and the undesired background. This \"noisy\" motion representation makes it very challenging for pose estimation and action recognition in real scenarios. To address this issue, this paper presents a novel deep motion representation, called PoseFlow, which reveals human motion in videos while suppressing background and motion blur, and being robust to occlusion. For learning PoseFlow with mild computational cost, we propose a functionally structured spatial-temporal deep network, PoseFlow Net (PFN), to jointly solve the skeleton localization and matching problems of PoseFlow. Comprehensive experiments show that PFN outperforms the state-of-the-art deep flow estimation models in generating PoseFlow. Moreover, PoseFlow demonstrates its potential on improving two challenging tasks in human video analysis: pose estimation and action recognition.",
"fno": "642000g762",
"keywords": [
"Computer Vision",
"Feature Extraction",
"Image Motion Analysis",
"Image Representation",
"Image Sequences",
"Learning Artificial Intelligence",
"Motion Estimation",
"Object Detection",
"Pose Estimation",
"Video Signal Processing",
"Motion Cue",
"Optical Flow",
"Noisy Motion Representation",
"Action Recognition",
"Deep Motion Representation",
"Human Motion",
"Spatial Temporal Deep Network",
"Pose Flow Net",
"State Of The Art Deep Flow Estimation Models",
"Human Video Analysis",
"Human Body",
"Human Behavior",
"Videos",
"Optical Imaging",
"Pose Estimation",
"Task Analysis",
"Skeleton",
"Optical Computing"
],
"authors": [
{
"affiliation": null,
"fullName": "Dingwen Zhang",
"givenName": "Dingwen",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guangyu Guo",
"givenName": "Guangyu",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dong Huang",
"givenName": "Dong",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Junwei Han",
"givenName": "Junwei",
"surname": "Han",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "6762-6770",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000g752",
"articleId": "17D45WODaoG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000g771",
"articleId": "17D45XfSEUL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032c877",
"title": "Learning View-Invariant Features for Person Identification in Temporally Synchronized Videos Taken by Wearable Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c877/12OmNxGAL1n",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733b642",
"title": "Optical Acceleration for Motion Description in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b642/12OmNy7yEfW",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000d282",
"title": "Boundary Flow: A Siamese Network that Predicts Boundary Motion Without Training on Motion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000d282/17D45WKWnHY",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1699",
"title": "Motion Adaptive Pose Estimation from Compressed Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1699/1BmH7IcYI6Y",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200l1512",
"title": "Physics-based Human Motion Estimation and Synthesis from Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200l1512/1BmIT74w9rO",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859776",
"title": "Unpaired Motion Style Transfer with Motion-Oriented Projection Flow Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859776/1G9E21CBr44",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08769907",
"title": "Every Pixel Counts ++: Joint Learning of Geometry and Motion with 3D Holistic Understanding",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08769907/1bTR1RCJO4E",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798178",
"title": "Real-time Human Motion Forecasting using a RGB Camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798178/1cJ0Zj1wlMs",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093395",
"title": "Do As I Do: Transferring Human Motion and Appearance between Monocular Videos with Spatial and Temporal Constraints",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093395/1jPbrEovs3e",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e653",
"title": "Distilled Semantics for Comprehensive Scene Understanding from Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e653/1m3os132gw0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1U8eRyMw",
"doi": "10.1109/ISMAR-Adjunct.2018.00077",
"title": "Visually Induced Motion Sickness in 360° Videos: Comparing and Combining Visual Optimization Techniques",
"normalizedTitle": "Visually Induced Motion Sickness in 360° Videos: Comparing and Combining Visual Optimization Techniques",
"abstract": "As head mounted displays (HMDs) become everyday consumer items, the potential of immersive Virtual Reality (VR) as a design space becomes ever more promising. However, their usage is impeded by human factors inherent to the technology itself, such as visually induced motion sickness (VIMS), caused by the disconnect between what is visually and physically perceived. Previous work on VIMS reduction has explored techniques targeting HMDs, while others explored techniques that target the multimedia content itself through visual optimization. The latter are often studied individually and cannot be applied to certain VR content such as 360° video. Consequently, this paper describes an exploratory study comparing and combining such techniques (independent visual background and restricted field of view) in 360° video. The work provides constructive insights for VR designers, while also exploring how analytics of VR content and user experience can be used for VIMS prevention and evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As head mounted displays (HMDs) become everyday consumer items, the potential of immersive Virtual Reality (VR) as a design space becomes ever more promising. However, their usage is impeded by human factors inherent to the technology itself, such as visually induced motion sickness (VIMS), caused by the disconnect between what is visually and physically perceived. Previous work on VIMS reduction has explored techniques targeting HMDs, while others explored techniques that target the multimedia content itself through visual optimization. The latter are often studied individually and cannot be applied to certain VR content such as 360° video. Consequently, this paper describes an exploratory study comparing and combining such techniques (independent visual background and restricted field of view) in 360° video. The work provides constructive insights for VR designers, while also exploring how analytics of VR content and user experience can be used for VIMS prevention and evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As head mounted displays (HMDs) become everyday consumer items, the potential of immersive Virtual Reality (VR) as a design space becomes ever more promising. However, their usage is impeded by human factors inherent to the technology itself, such as visually induced motion sickness (VIMS), caused by the disconnect between what is visually and physically perceived. Previous work on VIMS reduction has explored techniques targeting HMDs, while others explored techniques that target the multimedia content itself through visual optimization. The latter are often studied individually and cannot be applied to certain VR content such as 360° video. Consequently, this paper describes an exploratory study comparing and combining such techniques (independent visual background and restricted field of view) in 360° video. The work provides constructive insights for VR designers, while also exploring how analytics of VR content and user experience can be used for VIMS prevention and evaluation.",
"fno": "08699261",
"keywords": [
"Data Visualisation",
"Helmet Mounted Displays",
"Human Factors",
"Virtual Reality",
"Visually Induced Motion Sickness",
"360 X 00 B 0 Videos",
"VIMS Reduction",
"Visual Optimization",
"Immersive Virtual Reality",
"Head Mounted Displays",
"Human Factors",
"Visualization",
"Optical Flow",
"Optimization",
"Prototypes",
"Streaming Media",
"User Experience",
"Resists",
"VR Sickness",
"Cybersickness",
"360 X 00 B 0 Video",
"Cinematic VR",
"Field Of View",
"Content Analysis",
"Optical Flow",
"H 5 1 Information Interfaces And Presentation E G HCI Multimedia Information Systems"
],
"authors": [
{
"affiliation": "U. Nova Lisboa, Madeira-ITI",
"fullName": "Paulo Bala",
"givenName": "Paulo",
"surname": "Bala",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Madeira-ITI",
"fullName": "Dina Dionísio",
"givenName": "Dina",
"surname": "Dionísio",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "U. Madeira, Madeira-ITI",
"fullName": "Valentina Nisi",
"givenName": "Valentina",
"surname": "Nisi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IST - U. Lisbon, Madeira-ITI",
"fullName": "Nuno Nunes",
"givenName": "Nuno",
"surname": "Nunes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "244-249",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699283",
"articleId": "19F1V9Ax9Be",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699259",
"articleId": "19F1SLp4sx2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmew/2018/4195/0/08551577",
"title": "Viewport-Driven Rate-Distortion Optimized Scalable Live 360° Video Network Multicast",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2018/08551577/17D45WZZ7Db",
"parentPublication": {
"id": "proceedings/icmew/2018/4195/0",
"title": "2018 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2022/0876/0/087600a281",
"title": "Head Movement-aware MPEG-DASH SRD-based 360° Video VR Streaming System over Wireless Network",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2022/087600a281/1FHqcfLbws0",
"parentPublication": {
"id": "proceedings/wowmom/2022/0876/0",
"title": "2022 IEEE 23rd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a491",
"title": "Implementation of Attention-Based Spatial Audio for 360° Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a491/1J7Wlf9IrNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090670",
"title": "SiSiMo: Towards Simulator Sickness Modeling for 360<sup>°</sup> Videos Viewed with an HMD",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090670/1jIxwAw9Z9C",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090490",
"title": "Evaluation of Simulator Sickness for 360° Videos on an HMD Subject to Participants’ Experience with Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090490/1jIxwgIdgsw",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2020/7374/0/737400a191",
"title": "A QoE and Visual Attention Evaluation on the Influence of Audio in 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2020/737400a191/1nMQCKTCoeY",
"parentPublication": {
"id": "proceedings/wowmom/2020/7374/0",
"title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ucc/2020/2394/0/239400a414",
"title": "Accuracy Analysis on 360° Virtual Reality Video Quality Assessment Methods",
"doi": null,
"abstractUrl": "/proceedings-article/ucc/2020/239400a414/1pZ0Z6h4ERq",
"parentPublication": {
"id": "proceedings/ucc/2020/2394/0",
"title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ucc/2020/2394/0/239400a402",
"title": "A 360° Video Adaptive Streaming Scheme Based on Multiple Video Qualities",
"doi": null,
"abstractUrl": "/proceedings-article/ucc/2020/239400a402/1pZ0ZIjk5vq",
"parentPublication": {
"id": "proceedings/ucc/2020/2394/0",
"title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2020/9916/0/991600a291",
"title": "MEC-Assisted FoV-Aware and QoE-Driven Adaptive 360° Video Streaming for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2020/991600a291/1sBO3kw7jnq",
"parentPublication": {
"id": "proceedings/msn/2020/9916/0",
"title": "2020 16th International Conference on Mobility, Sensing and Networking (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a183",
"title": "Enabling Collaborative Interaction with 360° Panoramas between Large-scale Displays and Immersive Headsets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a183/1yeQBWUxple",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ13JSUePK",
"doi": "10.1109/VR.2019.8798297",
"title": "Unifying Research to Address Motion Sickness",
"normalizedTitle": "Unifying Research to Address Motion Sickness",
"abstract": "Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.",
"fno": "08798297",
"keywords": [
"Human Computer Interaction",
"Virtual Reality",
"Immersive Sickness",
"Simulator Sickness",
"Virtual Reality Sickness",
"Visuo Vestibular Mismatch",
"Immersive Environments",
"Unified Research Approach",
"Motion Sickness",
"Cybersickness",
"Virtual Reality",
"Visualization",
"Solid Modeling",
"Biology",
"Real Time Systems",
"Conferences",
"Machine Learning",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "U.S. Army Research Laboratory West",
"fullName": "Mark S. Dennison",
"givenName": "Mark S.",
"surname": "Dennison",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "USC Institute for Creative Technologies",
"fullName": "David M. Krum",
"givenName": "David M.",
"surname": "Krum",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1858-1859",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798107",
"articleId": "1cJ12M9tKM0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797781",
"articleId": "1cJ0Sg2UoQE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892307",
"title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892307/12OmNClQ0yz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892284",
"title": "Diminished reality for acceleration stimulus: Motion sickness reduction with vection for autonomous driving",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892284/12OmNwx3QdZ",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a812",
"title": "Adding Difference Flow between Virtual and Actual Motion to Reduce Sensory Mismatch and VR Sickness while Moving",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a812/1CJcDou9mdG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a169",
"title": "Development of VR Motion Sickness Test Platform Based on UE",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a682",
"title": "Geometric simplification for reducing optic flow in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a682/1J7WqYsXIuA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a328",
"title": "Exploring Neural Biomarkers in Young Adults Resistant to VR Motion Sickness: A Pilot Study of EEG",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a328/1MNgLSkIsUw",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798158",
"title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089437",
"title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089437/1jIxcfT0Wt2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412423",
"title": "VR Sickness Assessment with Perception Prior and Hybrid Temporal Features",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412423/1tmiMP82mre",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxcfT0Wt2",
"doi": "10.1109/VR46266.2020.00087",
"title": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"normalizedTitle": "The Effect of a Foveated Field-of-view Restrictor on VR Sickness",
"abstract": "Virtual reality sickness typically results from visual-vestibular conflict. Because self-motion from optical flow is driven most strongly by motion at the periphery of the retina, reducing the user’s field-of-view (FOV) during locomotion has proven to be an effective strategy to minimize visual vestibular conflict and VR sickness. Current FOV restrictor implementations reduce the user’s FOV by rendering a restrictor whose center is fixed at the center of the head mounted display (HMD), which is effective when the user’s eye gaze is aligned with head gaze. However, during eccentric eye gaze, users may look at the FOV restrictor itself, exposing them to peripheral optical flow which could lead to increased VR sickness. To address these limitations, we develop a foveated FOV restrictor and we explore the effect of dynamically moving the center of the FOV restrictor according to the user’s eye gaze position. We conducted a user study (n=22) where each participant uses a foveated FOV restrictor and a head-fixed FOV restrictor while navigating a virtual environment. We found no statistically significant difference in VR sickness measures or noticeability between both restrictors. However, there was a significant difference in eye gaze behavior, as measured by eye gaze dispersion, with the foveated FOV restrictor allowing participants to have a wider visual scan area compared to the head-fixed FOV restrictor, which confined their eye gaze to the center of the FOV.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality sickness typically results from visual-vestibular conflict. Because self-motion from optical flow is driven most strongly by motion at the periphery of the retina, reducing the user’s field-of-view (FOV) during locomotion has proven to be an effective strategy to minimize visual vestibular conflict and VR sickness. Current FOV restrictor implementations reduce the user’s FOV by rendering a restrictor whose center is fixed at the center of the head mounted display (HMD), which is effective when the user’s eye gaze is aligned with head gaze. However, during eccentric eye gaze, users may look at the FOV restrictor itself, exposing them to peripheral optical flow which could lead to increased VR sickness. To address these limitations, we develop a foveated FOV restrictor and we explore the effect of dynamically moving the center of the FOV restrictor according to the user’s eye gaze position. We conducted a user study (n=22) where each participant uses a foveated FOV restrictor and a head-fixed FOV restrictor while navigating a virtual environment. We found no statistically significant difference in VR sickness measures or noticeability between both restrictors. However, there was a significant difference in eye gaze behavior, as measured by eye gaze dispersion, with the foveated FOV restrictor allowing participants to have a wider visual scan area compared to the head-fixed FOV restrictor, which confined their eye gaze to the center of the FOV.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality sickness typically results from visual-vestibular conflict. Because self-motion from optical flow is driven most strongly by motion at the periphery of the retina, reducing the user’s field-of-view (FOV) during locomotion has proven to be an effective strategy to minimize visual vestibular conflict and VR sickness. Current FOV restrictor implementations reduce the user’s FOV by rendering a restrictor whose center is fixed at the center of the head mounted display (HMD), which is effective when the user’s eye gaze is aligned with head gaze. However, during eccentric eye gaze, users may look at the FOV restrictor itself, exposing them to peripheral optical flow which could lead to increased VR sickness. To address these limitations, we develop a foveated FOV restrictor and we explore the effect of dynamically moving the center of the FOV restrictor according to the user’s eye gaze position. We conducted a user study (n=22) where each participant uses a foveated FOV restrictor and a head-fixed FOV restrictor while navigating a virtual environment. We found no statistically significant difference in VR sickness measures or noticeability between both restrictors. However, there was a significant difference in eye gaze behavior, as measured by eye gaze dispersion, with the foveated FOV restrictor allowing participants to have a wider visual scan area compared to the head-fixed FOV restrictor, which confined their eye gaze to the center of the FOV.",
"fno": "09089437",
"keywords": [
"Optical Sensors",
"Visualization",
"Virtual Reality",
"Integrated Optics",
"Navigation",
"Retina",
"Resists",
"Virtual Reality",
"VR Sickness",
"Field Of View Manipulation",
"Eye Tracking"
],
"authors": [
{
"affiliation": "University of Nevada,Computer Science",
"fullName": "Isayas Berhe Adhanom",
"givenName": "Isayas Berhe",
"surname": "Adhanom",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Nevada,Computer Science",
"fullName": "Nathan Navarro Griffin",
"givenName": "Nathan",
"surname": "Navarro Griffin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Nevada,Psychology",
"fullName": "Paul MacNeilage",
"givenName": "Paul",
"surname": "MacNeilage",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Nevada,Computer Science",
"fullName": "Eelke Folmer",
"givenName": "Eelke",
"surname": "Folmer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "645-652",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089661",
"articleId": "1jIxfrGAC8o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089534",
"articleId": "1jIx8B84p5C",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460053",
"title": "Combating VR sickness through subtle dynamic field-of-view modification",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460053/12OmNBubORd",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920164",
"title": "Effects of Field of View on Presence, Enjoyment, Memory, and Simulator Sickness in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920164/12OmNvUsoqB",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446346",
"title": "Reducing VR Sickness Through Peripheral Visual Effects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446346/13bd1fHrlRY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446345",
"title": "Investigating a Sparse Peripheral Display in a Head-Mounted Display for VR Locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446345/13bd1fZBGbI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600b766",
"title": "Cross-Resolution Flow Propagation for Foveated Video Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600b766/1KxV3bSa3Fm",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a205",
"title": "Power, Performance, and Image Quality Tradeoffs in Foveated Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a205/1MNgQoZswDu",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798136",
"title": "VR Sickness in Continuous Exposure to Live-action 180°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08999630",
"title": "Toward Standardized Classification of Foveated Displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08999630/1hpPDGcaf9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnXYDa4Wcg",
"doi": "10.1109/VRW52623.2021.00095",
"title": "Is Virtual Reality Sickness Elicited by Illusory Motion Affected by Gender and Prior Video Gaming Experience?",
"normalizedTitle": "Is Virtual Reality Sickness Elicited by Illusory Motion Affected by Gender and Prior Video Gaming Experience?",
"abstract": "Gaming using VR headsets is becoming increasingly popular; however, these displays can cause VR sickness. To investigate the effects of gender and gamer type on VR sickness motion illusions are used as stimuli, being a novel method of inducing the perception of motion whilst minimising the \"accommodation vergence conflict\". Females and those who do not play action games experienced more severe VR sickness symptoms compared to males and experienced action gamers. The interaction of the gender and gamer type revealed that prior video gaming experience was beneficial for females, however, for males, it did not show the same positive effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gaming using VR headsets is becoming increasingly popular; however, these displays can cause VR sickness. To investigate the effects of gender and gamer type on VR sickness motion illusions are used as stimuli, being a novel method of inducing the perception of motion whilst minimising the \"accommodation vergence conflict\". Females and those who do not play action games experienced more severe VR sickness symptoms compared to males and experienced action gamers. The interaction of the gender and gamer type revealed that prior video gaming experience was beneficial for females, however, for males, it did not show the same positive effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gaming using VR headsets is becoming increasingly popular; however, these displays can cause VR sickness. To investigate the effects of gender and gamer type on VR sickness motion illusions are used as stimuli, being a novel method of inducing the perception of motion whilst minimising the \"accommodation vergence conflict\". Females and those who do not play action games experienced more severe VR sickness symptoms compared to males and experienced action gamers. The interaction of the gender and gamer type revealed that prior video gaming experience was beneficial for females, however, for males, it did not show the same positive effects.",
"fno": "405700a426",
"keywords": [
"Computer Games",
"Three Dimensional Displays",
"Virtual Reality",
"Visual Perception",
"Gamer Type",
"VR Sickness Motion Illusions",
"Accommodation Vergence Conflict",
"Action Games",
"Severe VR Sickness Symptoms",
"Experienced Action Gamers",
"Gender",
"Prior Video Gaming Experience",
"Virtual Reality Sickness Elicited",
"Illusory Motion",
"VR Headsets",
"Training",
"Headphones",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Virtual Environments",
"Games",
"Virtual Reality",
"Fraser Wilcox Illusions",
"VR Sickness",
"Discomfort",
"Head Movements",
"Gender",
"Adaptation",
"Habituation"
],
"authors": [
{
"affiliation": "University of Lincoln",
"fullName": "Katharina Margareta Theresa Pöhlmann",
"givenName": "Katharina Margareta",
"surname": "Theresa Pöhlmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nottingham Trent University",
"fullName": "Louise O’Hare",
"givenName": "Louise",
"surname": "O’Hare",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Lincoln",
"fullName": "Julia Föcker",
"givenName": "Julia",
"surname": "Föcker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of the West of Scotland",
"fullName": "Adrian Parke",
"givenName": "Adrian",
"surname": "Parke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Lincoln",
"fullName": "Patrick Dickinson",
"givenName": "Patrick",
"surname": "Dickinson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "426-427",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "405700a424",
"articleId": "1tnWC56l7u8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a428",
"articleId": "1tnXMGH9lRK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460053",
"title": "Combating VR sickness through subtle dynamic field-of-view modification",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460053/12OmNBubORd",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892307",
"title": "Air cushion: A pilot study of the passive technique to mitigate simulator sickness by responding to vection",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892307/12OmNClQ0yz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446382",
"title": "Please Don't Puke: Early Detection of Severe Motion Sickness in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446382/13bd1f3HvEJ",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a542",
"title": "Resolution Tradeoff in Gameplay Experience, Performance, and Simulator Sickness in Virtual Reality Games",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a542/1CJcAVYrJew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a094",
"title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798213",
"title": "VR Sickness Prediction for Navigation in Immersive Virtual Environments using a Deep Long Short Term Memory Model",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798213/1cJ0RYruJIA",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a735",
"title": "[DC] Towards Universal VR Sickness Mitigation Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a380",
"title": "Evaluating VR Sickness in VR Locomotion Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyGbI4V",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAlNixT",
"doi": "10.1109/ICME.2016.7552965",
"title": "Content-adaptive focus configuration for near-eye multi-focal displays",
"normalizedTitle": "Content-adaptive focus configuration for near-eye multi-focal displays",
"abstract": "Near-eye multi-focal (light field) displays are known to be more advantageous than conventional stereoscopic 3D displays, because they can alleviate the vergence-accommodation conflict. While stereoscopic 3D displays are restricted to projecting images on a single focal plane, near-eye light field displays can form multiple focal planes and volumetrically render 3D data. Most existing near-eye light field displays use simplistic, uniformly spaced focal plane configuration (in dioptric space). In this paper, we present a novel technique that optimizes the focal plane configuration based on characteristics of the content to be rendered. We show that this technique can significantly improve the perceived visual quality of content visualized on such displays.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Near-eye multi-focal (light field) displays are known to be more advantageous than conventional stereoscopic 3D displays, because they can alleviate the vergence-accommodation conflict. While stereoscopic 3D displays are restricted to projecting images on a single focal plane, near-eye light field displays can form multiple focal planes and volumetrically render 3D data. Most existing near-eye light field displays use simplistic, uniformly spaced focal plane configuration (in dioptric space). In this paper, we present a novel technique that optimizes the focal plane configuration based on characteristics of the content to be rendered. We show that this technique can significantly improve the perceived visual quality of content visualized on such displays.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Near-eye multi-focal (light field) displays are known to be more advantageous than conventional stereoscopic 3D displays, because they can alleviate the vergence-accommodation conflict. While stereoscopic 3D displays are restricted to projecting images on a single focal plane, near-eye light field displays can form multiple focal planes and volumetrically render 3D data. Most existing near-eye light field displays use simplistic, uniformly spaced focal plane configuration (in dioptric space). In this paper, we present a novel technique that optimizes the focal plane configuration based on characteristics of the content to be rendered. We show that this technique can significantly improve the perceived visual quality of content visualized on such displays.",
"fno": "07552965",
"keywords": [
"Three Dimensional Displays",
"Measurement",
"Optical Imaging",
"Visualization",
"Optical Sensors",
"Retina",
"Adaptive Optics",
"Content Adaptation",
"Light Field",
"Depth Blending",
"Multi Focal",
"Near Eye Display"
],
"authors": [
{
"affiliation": "Ricoh Innovations Corp., 10050 N. Wolfe Road, Suite SW2-260, Cupertino, California 95014",
"fullName": "Wanmin Wu",
"givenName": "Wanmin",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Duke University, 10050 N. Wolfe Road, Suite SW2-260, Cupertino, California 95014",
"fullName": "Patrick Llull",
"givenName": "Patrick",
"surname": "Llull",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ricoh Innovations Corp., 10050 N. Wolfe Road, Suite SW2-260, Cupertino, California 95014",
"fullName": "Ivana Tosic",
"givenName": "Ivana",
"surname": "Tosic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ricoh Innovations Corp., 10050 N. Wolfe Road, Suite SW2-260, Cupertino, California 95014",
"fullName": "Noah Bedard",
"givenName": "Noah",
"surname": "Bedard",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ricoh Innovations Corp., 10050 N. Wolfe Road, Suite SW2-260, Cupertino, California 95014",
"fullName": "Kathrin Berkner",
"givenName": "Kathrin",
"surname": "Berkner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ricoh Innovations Corp., 10050 N. Wolfe Road, Suite SW2-260, Cupertino, California 95014",
"fullName": "Nikhil Balram",
"givenName": "Nikhil",
"surname": "Balram",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2016",
"issn": "1945-788X",
"isbn": "978-1-4673-7258-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07552964",
"articleId": "12OmNC8Msp8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07552966",
"articleId": "12OmNzl3WNK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a245",
"title": "[POSTER] An Accurate Calibration Method for Optical See-Through Head-Mounted Displays Based on Actual Eye-Observation Model",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a245/12OmNwErpLb",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wccct/2014/2877/0/2877a120",
"title": "Identification of Retinal Image Features Using Bitplane Separation and Mathematical Morphology",
"doi": null,
"abstractUrl": "/proceedings-article/wccct/2014/2877a120/12OmNy5hRc8",
"parentPublication": {
"id": "proceedings/wccct/2014/2877/0",
"title": "2014 World Congress on Computing and Communication Technologies (WCCCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07829412",
"title": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829412/13rRUwcS1D1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/07/07226865",
"title": "Resolving the Vergence-Accommodation Conflict in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/07/07226865/13rRUxASuhD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642529",
"title": "Manufacturing Application-Driven Foveated Near-Eye Displays",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642529/17PYEjG6pn1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049728",
"title": "Off-Axis Layered Displays: Hybrid Direct-View/Near-Eye Mixed Reality with Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049728/1KYotSK4YMM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09186170",
"title": "Parallax Free Registration for Augmented Reality Optical See-Through Displays in the Peripersonal Space",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09186170/1mP2AYgyLQY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09383112",
"title": "Beaming Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09383112/1saZzKxYSqI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a207",
"title": "Focus-Aware Retinal Projection-based Near-Eye Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a207/1yfxNuG3Mju",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs0C9QD",
"title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots",
"acronym": "memsys",
"groupId": "1000438",
"volume": "0",
"displayVolume": "0",
"year": "1997",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxw5B9m",
"doi": "10.1109/MEMSYS.1997.581761",
"title": "Near field optics for nanometric sensing and control",
"normalizedTitle": "Near field optics for nanometric sensing and control",
"abstract": "It has been believed that one of the major reasons for the popularity of optical sensing is its non-destructive and remote-sensing capability. We can measure an object remotely far from the sensor without any damage to the object. However, in the near-field optical sensing which is described in this presentation a probe is nearly in contact to the object in a distance of some nanometers. While near-field optics loses the merit of remotely accessible capability with ordinary optics, it has significant advantages of super-resolving imaging, field-enhanced sensing and control, and nanometric surface fabrication. These advantages enable optics for contributing to the advanced micro-electro-mechanical systems. In this presentation, I describe the principle of this near-field optical sensing and control, with some latest experimental examples. The research of the near-field optics is still now progressing as an advanced optical technology, coupled with the related high technologies such as very precise mechanical scanner, nanometric fabrication, and very high-sensitive photodetection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It has been believed that one of the major reasons for the popularity of optical sensing is its non-destructive and remote-sensing capability. We can measure an object remotely far from the sensor without any damage to the object. However, in the near-field optical sensing which is described in this presentation a probe is nearly in contact to the object in a distance of some nanometers. While near-field optics loses the merit of remotely accessible capability with ordinary optics, it has significant advantages of super-resolving imaging, field-enhanced sensing and control, and nanometric surface fabrication. These advantages enable optics for contributing to the advanced micro-electro-mechanical systems. In this presentation, I describe the principle of this near-field optical sensing and control, with some latest experimental examples. The research of the near-field optics is still now progressing as an advanced optical technology, coupled with the related high technologies such as very precise mechanical scanner, nanometric fabrication, and very high-sensitive photodetection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It has been believed that one of the major reasons for the popularity of optical sensing is its non-destructive and remote-sensing capability. We can measure an object remotely far from the sensor without any damage to the object. However, in the near-field optical sensing which is described in this presentation a probe is nearly in contact to the object in a distance of some nanometers. While near-field optics loses the merit of remotely accessible capability with ordinary optics, it has significant advantages of super-resolving imaging, field-enhanced sensing and control, and nanometric surface fabrication. These advantages enable optics for contributing to the advanced micro-electro-mechanical systems. In this presentation, I describe the principle of this near-field optical sensing and control, with some latest experimental examples. The research of the near-field optics is still now progressing as an advanced optical technology, coupled with the related high technologies such as very precise mechanical scanner, nanometric fabrication, and very high-sensitive photodetection.",
"fno": "00581761",
"keywords": [
"Nanotechnology",
"Optical Sensors",
"Optical Microscopy",
"Laser Beam Machining",
"Micromachining",
"Surface Plasmons",
"Nanometric Sensing",
"Near Field Optics",
"Evanescent Photon",
"Nondestructive Sensing",
"Remote Sensing",
"Near Field Optical Sensing",
"Super Resolving Imaging",
"Field Enhanced Sensing",
"Nanometric Surface Fabrication",
"Microelectromechanical Systems",
"Mechanical Scanner",
"Nanometric Fabrication",
"Photodetection",
"Optical Sensors",
"Optical Control",
"Optical Surface Waves",
"Holography",
"Optical Attenuators",
"Holographic Optical Components",
"Nonlinear Optics",
"Optical Refraction",
"Optical Variables Control",
"Spectroscopy"
],
"authors": [
{
"affiliation": "Dept. of Appl. Phys., Osaka Univ., Japan",
"fullName": "S. Kawata",
"givenName": "S.",
"surname": "Kawata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "memsys",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1997-01-01T00:00:00",
"pubType": "proceedings",
"pages": "37,38,39,40,41,42",
"year": "1997",
"issn": "1084-6999",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00581760",
"articleId": "12OmNAXPyje",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00581762",
"articleId": "12OmNwkR5Cb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iri/2014/5880/0/07051914",
"title": "Towards ray optics formalization of optical imaging systems",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051914/12OmNvq5jzp",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07760037",
"title": "Parameter optimization of free space optics in the perspective of Bangladesh",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760037/12OmNvqEvNs",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413504",
"title": "Calibration for peripheral attenuation in intensity images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413504/12OmNvsDHJ5",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/05/07127036",
"title": "Effects of Optical Combiner and IPD Change for Convergence on Near-Field Depth Perception in an Optical See-Through HMD",
"doi": null,
"abstractUrl": "/journal/tg/2016/05/07127036/13rRUILc8fe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07829412",
"title": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829412/13rRUwcS1D1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/1989/01/k0111",
"title": "The Impact of Optics on Data and Knowledge Base Systems",
"doi": null,
"abstractUrl": "/journal/tk/1989/01/k0111/13rRUxNEqQa",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642529",
"title": "Manufacturing Application-Driven Foveated Near-Eye Displays",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642529/17PYEjG6pn1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794584",
"title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1kwqyDCYmas",
"title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1kwqMlDE9KE",
"doi": "10.1109/ICMEW46912.2020.9106014",
"title": "Computational Multifocal Near-Eye Display with Hybrid Refractive-Diffractive Optics",
"normalizedTitle": "Computational Multifocal Near-Eye Display with Hybrid Refractive-Diffractive Optics",
"abstract": "We present a multifocal computational near-eye display that employs a static diffractive optical element (DOE) in tandem with a refractive lens. The DOE is co-optimized with the con-volutional neural network-based preprocessing to achieve desired multifocal display point spread function in an optimal manner. In the simulations, we demonstrate a multifocal display that can deliver sharp images for three distinct depths sampling the dioptric depth range uniformly from 3 diopters to infinity.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a multifocal computational near-eye display that employs a static diffractive optical element (DOE) in tandem with a refractive lens. The DOE is co-optimized with the con-volutional neural network-based preprocessing to achieve desired multifocal display point spread function in an optimal manner. In the simulations, we demonstrate a multifocal display that can deliver sharp images for three distinct depths sampling the dioptric depth range uniformly from 3 diopters to infinity.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a multifocal computational near-eye display that employs a static diffractive optical element (DOE) in tandem with a refractive lens. The DOE is co-optimized with the con-volutional neural network-based preprocessing to achieve desired multifocal display point spread function in an optimal manner. In the simulations, we demonstrate a multifocal display that can deliver sharp images for three distinct depths sampling the dioptric depth range uniformly from 3 diopters to infinity.",
"fno": "09106014",
"keywords": [
"Diffractive Optical Elements",
"Eye",
"Lenses",
"Optical Design Techniques",
"Optical Transfer Function",
"Vision Defects",
"Optimal Manner",
"Multifocal Display Point Spread Function",
"Con Volutional Neural Network Based",
"DOE",
"Refractive Lens",
"Static Diffractive Optical Element",
"Hybrid Refractive Diffractive Optics",
"Computational Multifocal Near Eye Display",
"Integrated Optics",
"Headphones",
"Optical Diffraction",
"Deconvolution",
"Conferences",
"Computational Modeling",
"Prototypes",
"Computational Display",
"Optics",
"Neural Network"
],
"authors": [
{
"affiliation": "Tampere University,Faculty of Information Technology and Communication Sciences,Tampere,Finland",
"fullName": "Ugur Akpinar",
"givenName": "Ugur",
"surname": "Akpinar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tampere University,Faculty of Information Technology and Communication Sciences,Tampere,Finland",
"fullName": "Erdem Sahin",
"givenName": "Erdem",
"surname": "Sahin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tampere University,Faculty of Information Technology and Communication Sciences,Tampere,Finland",
"fullName": "Atanas Gotchev",
"givenName": "Atanas",
"surname": "Gotchev",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-1485-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09106027",
"articleId": "1kwqCxearf2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09105979",
"articleId": "1kwqLzKxXbO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2018/2526/0/08368469",
"title": "Focal sweep imaging with multi-focal diffractive optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368469/12OmNBV9Ii2",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671761",
"title": "Computational augmented reality eyeglasses",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671761/12OmNx5piSJ",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pi/1999/0440/0/04400043",
"title": "1 Gb/s VCSEL/CMOS Flip-Chip 2-D-Array Interconnects and Associated Diffractive Optics",
"doi": null,
"abstractUrl": "/proceedings-article/pi/1999/04400043/12OmNybfqVC",
"parentPublication": {
"id": "proceedings/pi/1999/0440/0",
"title": "Parallel Interconnects, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c631",
"title": "Single-shot Hyperspectral-Depth Imaging with Learned Diffractive Optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c631/1BmFxzJc1tC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2022/5851/0/09887757",
"title": "Analyzing phase masks for wide étendue holographic displays",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2022/09887757/1GZivOBcOnS",
"parentPublication": {
"id": "proceedings/iccp/2022/5851/0",
"title": "2022 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600t9748",
"title": "Quantization-aware Deep Optics for Diffractive Snapshot Hyperspectral Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600t9748/1H0NBTZAs48",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a449",
"title": "Extended Depth-of-Field Projector using Learned Diffractive Optics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a449/1MNgNe272U0",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a438",
"title": "Computational Glasses: Vision Augmentations Using Computational Near-Eye Optics and Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a438/1gyslg8NM8o",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998293",
"title": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998293/1hrXiCmKkak",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b383",
"title": "Learning Rank-1 Diffractive Optics for Single-Shot High Dynamic Range Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b383/1m3nHdJQcFi",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysyaCOe76",
"doi": "10.1109/ISMAR50242.2020.00057",
"title": "Towards Eyeglass-style Holographic Near-eye Displays with Statically",
"normalizedTitle": "Towards Eyeglass-style Holographic Near-eye Displays with Statically",
"abstract": "Holography is perhaps the only method demonstrated so far that can achieve a wide field of view (FOV) and a compact eyeglass-style form factor for augmented reality (AR) near-eye displays (NEDs). Unfortunately, the eyebox of such NEDs is impractically small (~ <; 1mm). In this paper, we introduce and demonstrate a design for holographic NEDs with a practical, wide eyebox of ~ 10mm and without any moving parts, based on holographic lenslets. In our design, a holographic optical element (HOE) based on a lenslet array was fabricated as the image combiner with expanded eyebox. A phase spatial light modulator (SLM) alters the phase of the incident laser light projected onto the HOE combiner such that the virtual image can be perceived at different focus distances, which can reduce the vergence-accommodation conflict (VAC). We have successfully implemented a bench-top prototype following the proposed design. The experimental results show effective eyebox expansion to a size of ~ 10mm. With further work, we hope that these design concepts can be incorporated into eyeglass-size NEDs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Holography is perhaps the only method demonstrated so far that can achieve a wide field of view (FOV) and a compact eyeglass-style form factor for augmented reality (AR) near-eye displays (NEDs). Unfortunately, the eyebox of such NEDs is impractically small (~ <; 1mm). In this paper, we introduce and demonstrate a design for holographic NEDs with a practical, wide eyebox of ~ 10mm and without any moving parts, based on holographic lenslets. In our design, a holographic optical element (HOE) based on a lenslet array was fabricated as the image combiner with expanded eyebox. A phase spatial light modulator (SLM) alters the phase of the incident laser light projected onto the HOE combiner such that the virtual image can be perceived at different focus distances, which can reduce the vergence-accommodation conflict (VAC). We have successfully implemented a bench-top prototype following the proposed design. The experimental results show effective eyebox expansion to a size of ~ 10mm. With further work, we hope that these design concepts can be incorporated into eyeglass-size NEDs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Holography is perhaps the only method demonstrated so far that can achieve a wide field of view (FOV) and a compact eyeglass-style form factor for augmented reality (AR) near-eye displays (NEDs). Unfortunately, the eyebox of such NEDs is impractically small (~ <; 1mm). In this paper, we introduce and demonstrate a design for holographic NEDs with a practical, wide eyebox of ~ 10mm and without any moving parts, based on holographic lenslets. In our design, a holographic optical element (HOE) based on a lenslet array was fabricated as the image combiner with expanded eyebox. A phase spatial light modulator (SLM) alters the phase of the incident laser light projected onto the HOE combiner such that the virtual image can be perceived at different focus distances, which can reduce the vergence-accommodation conflict (VAC). We have successfully implemented a bench-top prototype following the proposed design. The experimental results show effective eyebox expansion to a size of ~ 10mm. With further work, we hope that these design concepts can be incorporated into eyeglass-size NEDs.",
"fno": "850800a312",
"keywords": [
"Augmented Reality",
"Eye",
"Holographic Displays",
"Holographic Optical Elements",
"Lenses",
"Optical Arrays",
"Optical Projectors",
"Spatial Light Modulators",
"Expanded Eyebox",
"Phase Spatial Light Modulator",
"Incident Laser Light",
"HOE Combiner",
"Virtual Image",
"Effective Eyebox Expansion",
"Compact Eyeglass Style Form Factor",
"Augmented Reality Near Eye Displays",
"Holographic Lenslets",
"Holographic Optical Element",
"Lenslet Array",
"Image Combiner",
"Eyeglass Style Holographic Near Eye Displays",
"Field Of View",
"Focus Distances",
"Vergence Accommodation Conflict",
"Bench Top Prototype",
"Phase Modulation",
"Optical Device Fabrication",
"Prototypes",
"Holography",
"Optical Imaging",
"Optical Modulation",
"Augmented Reality",
"Near Eye Displays",
"Augmented Reality",
"Holographic Displays",
"Expanded Eyebox Computing Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Mixed Augmented Reality",
"Com Puting Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Virtual Reality",
"Hardware X 2014 Communication Hardware",
"Interfaces And Storage X 2014 Displays And Imagers"
],
"authors": [
{
"affiliation": "Shanghai University,School of Mechatronic Engineering and Automation",
"fullName": "Xinxing Xia",
"givenName": "Xinxing",
"surname": "Xia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Singapore Institute of Technology,Infocomm Technology Cluster",
"fullName": "Yunqing Guan",
"givenName": "Yunqing",
"surname": "Guan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Chapel Hill,Department of Computer Science",
"fullName": "Andrei State",
"givenName": "Andrei",
"surname": "State",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Chapel Hill,Department of Computer Science",
"fullName": "Praneeth Chakravarthula",
"givenName": "Praneeth",
"surname": "Chakravarthula",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering",
"fullName": "Tat-Jen Cham",
"givenName": "Tat-Jen",
"surname": "Cham",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Chapel Hill,Department of Computer Science",
"fullName": "Henry Fuchs",
"givenName": "Henry",
"surname": "Fuchs",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "312-319",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a301",
"articleId": "1pysxIK95Yc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a320",
"articleId": "1pysxaykIAo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icig/2013/5050/0/5050a761",
"title": "Holographic Projection Using Converging Spherical Wave Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mppoi/1994/5832/0/00336622",
"title": "Holographic optical interconnections",
"doi": null,
"abstractUrl": "/proceedings-article/mppoi/1994/00336622/12OmNBqv286",
"parentPublication": {
"id": "proceedings/mppoi/1994/5832/0",
"title": "First International Workshop on Massively Parallel Processing Using Optical Interconnections",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nvmt/1996/3510/0/00534666",
"title": "Holographic 3D disks",
"doi": null,
"abstractUrl": "/proceedings-article/nvmt/1996/00534666/12OmNxzuMLd",
"parentPublication": {
"id": "proceedings/nvmt/1996/3510/0",
"title": "Proceedings of Nonvolatile Memory Technology Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2005/2459/0/24590024",
"title": "ASTOR: An Autostereoscopic Optical See-through Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2005/24590024/12OmNzd7bgV",
"parentPublication": {
"id": "proceedings/ismar/2005/2459/0",
"title": "Fourth IEEE and ACM International Symposium on Mixed and Augmented Reality (ISMAR'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1975/04/01672829",
"title": "Ultrasonic Holographic Fourier Spectroscopy via Optical Fourier Transforms",
"doi": null,
"abstractUrl": "/journal/tc/1975/04/01672829/13rRUIIVlbc",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a553",
"title": "Sparse Nanophotonic Phased Arrays for Energy-Efficient Holographic Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a553/1CJczHyWyjK",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2022/9548/0/954800a246",
"title": "Comparison of Virtual-Real Integration Efficiency between Light Field and Conventional Near-Eye AR Displays",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2022/954800a246/1GvditqC14Q",
"parentPublication": {
"id": "proceedings/mipr/2022/9548/0",
"title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a581",
"title": "HoloBeam: Paper-Thin Near-Eye Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a581/1MNgR9rZSCc",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523842",
"title": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523842/1wpqr1B6wA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yfxDjRGMmc",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yfxNuG3Mju",
"doi": "10.1109/ISMAR-Adjunct54149.2021.00049",
"title": "Focus-Aware Retinal Projection-based Near-Eye Display",
"normalizedTitle": "Focus-Aware Retinal Projection-based Near-Eye Display",
"abstract": "The primary challenge in optical see-through near-eye displays lies in providing correct optical focus cues. Established approaches such as varifocal or light field display essentially sacrifice temporal or spatial resolution of the resulting 3D images. This paper explores a new direction to address the trade-off by combining a retinal projection display (RPD) with ocular wavefront sensing (OWS). Our core idea is to display a depth of field-simulated image on an RPD to produce visually consistent optical focus cues while maintaining the spatial and temporal resolution of the image. To obtain the current accommodation of the eye, we integrate OWS. We demonstrate that our proof-of-concept system successfully renders virtual contents with proper depth cues while covering the eye accommodation range from 28.5 cm (3.5 D) to infinity (0.0 D).",
"abstracts": [
{
"abstractType": "Regular",
"content": "The primary challenge in optical see-through near-eye displays lies in providing correct optical focus cues. Established approaches such as varifocal or light field display essentially sacrifice temporal or spatial resolution of the resulting 3D images. This paper explores a new direction to address the trade-off by combining a retinal projection display (RPD) with ocular wavefront sensing (OWS). Our core idea is to display a depth of field-simulated image on an RPD to produce visually consistent optical focus cues while maintaining the spatial and temporal resolution of the image. To obtain the current accommodation of the eye, we integrate OWS. We demonstrate that our proof-of-concept system successfully renders virtual contents with proper depth cues while covering the eye accommodation range from 28.5 cm (3.5 D) to infinity (0.0 D).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The primary challenge in optical see-through near-eye displays lies in providing correct optical focus cues. Established approaches such as varifocal or light field display essentially sacrifice temporal or spatial resolution of the resulting 3D images. This paper explores a new direction to address the trade-off by combining a retinal projection display (RPD) with ocular wavefront sensing (OWS). Our core idea is to display a depth of field-simulated image on an RPD to produce visually consistent optical focus cues while maintaining the spatial and temporal resolution of the image. To obtain the current accommodation of the eye, we integrate OWS. We demonstrate that our proof-of-concept system successfully renders virtual contents with proper depth cues while covering the eye accommodation range from 28.5 cm (3.5 D) to infinity (0.0 D).",
"fno": "129800a207",
"keywords": [
"Eye",
"Image Resolution",
"Optical Focusing",
"Rendering Computer Graphics",
"Retinal Projection Display",
"RPD",
"Ocular Wavefront Sensing",
"OWS",
"Field Simulated Image",
"Visually Consistent Optical Focus Cues",
"Spatial Resolution",
"Temporal Resolution",
"Eye Accommodation Range",
"Focus Aware Retinal Projection Based Near Eye Display",
"Correct Optical Focus Cues",
"Varifocal Field Display",
"Light Field Display",
"Resulting 3 D Images",
"Virtual Contents Rendering",
"Size 28 5 Cm",
"Three Dimensional Displays",
"Optical Variables Measurement",
"Optical Imaging",
"Retina",
"Rendering Computer Graphics",
"Adaptive Optics",
"Light Fields",
"Augmented Reality",
"Accommodation Sensing",
"Retinal Projection",
"Near Eye Display"
],
"authors": [
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Mayu Kaneko",
"givenName": "Mayu",
"surname": "Kaneko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Institute of Technology",
"fullName": "Yuichi Hiroi",
"givenName": "Yuichi",
"surname": "Hiroi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo / RIKEN AIP",
"fullName": "Yuta Itoh",
"givenName": "Yuta",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "207-208",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1298-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yfxNaBVVcI",
"name": "pismar-adjunct202112980-09585807s1-mm_129800a207.zip",
"size": "54.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar-adjunct202112980-09585807s1-mm_129800a207.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "129800a201",
"articleId": "1yfxMXu7XhK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "129800a209",
"articleId": "1yeQYNSYkSY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2016/7258/0/07552965",
"title": "Content-adaptive focus configuration for near-eye multi-focal displays",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552965/12OmNAlNixT",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780125",
"title": "Eye Mark Pointer in Immersive Projection Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780125/12OmNqH9her",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wccct/2014/2877/0/2877a120",
"title": "Identification of Retinal Image Features Using Bitplane Separation and Mathematical Morphology",
"doi": null,
"abstractUrl": "/proceedings-article/wccct/2014/2877a120/12OmNy5hRc8",
"parentPublication": {
"id": "proceedings/wccct/2014/2877/0",
"title": "2014 World Congress on Computing and Communication Technologies (WCCCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/07/07226865",
"title": "Resolving the Vergence-Accommodation Conflict in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/07/07226865/13rRUxASuhD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/03/ttg2010030381",
"title": "A Novel Prototype for an Optical See-Through Head-Mounted Display with Addressable Focus Cues",
"doi": null,
"abstractUrl": "/journal/tg/2010/03/ttg2010030381/13rRUyYSWsN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642529",
"title": "Manufacturing Application-Driven Foveated Near-Eye Displays",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642529/17PYEjG6pn1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794584",
"title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a080",
"title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a312",
"title": "Towards Eyeglass-style Holographic Near-eye Displays with Statically",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a312/1pysyaCOe76",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523847",
"title": "Multifocal Stereoscopic Projection Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523847/1wpqmNfLX9e",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAJ4phI",
"doi": "10.1109/CVPR.2012.6247764",
"title": "An analysis of color demosaicing in plenoptic cameras",
"normalizedTitle": "An analysis of color demosaicing in plenoptic cameras",
"abstract": "A plenoptic camera captures the 4D radiance about a scene. Recent practical solutions mount a microlens array on top of a commodity SLR to directly acquire these rays. However, they suffer from low resolution as hundreds of thousands of views need to be captured in a single shot. In this paper, we develop a simple but effective technique for improving the image resolution of the plenoptic camera by maneuvering the demosaicing process. We first show that the traditional solution by demosaicing each individual microlens image and then blending them for view synthesis is suboptimal. In particular, this demosaicing process often suffers from aliasing artifacts, and it damages high frequency information recorded by each microlens image hence degrades the image quality. We instead propose to de-mosaic the synthesized view at the rendering stage. Specifically, we first transform the radiance to the desired focal plane and then apply frequency domain plenoptic resampling. A full resolution color filtered image is then created by performing a 2D integral projection from the reparam-eterized radiance. Finally, we conduct demosacing to obtain the color result. We show that our solution can achieve visible resolution enhancement on dynamic refocusing and depth-assisted deep focus rendering.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A plenoptic camera captures the 4D radiance about a scene. Recent practical solutions mount a microlens array on top of a commodity SLR to directly acquire these rays. However, they suffer from low resolution as hundreds of thousands of views need to be captured in a single shot. In this paper, we develop a simple but effective technique for improving the image resolution of the plenoptic camera by maneuvering the demosaicing process. We first show that the traditional solution by demosaicing each individual microlens image and then blending them for view synthesis is suboptimal. In particular, this demosaicing process often suffers from aliasing artifacts, and it damages high frequency information recorded by each microlens image hence degrades the image quality. We instead propose to de-mosaic the synthesized view at the rendering stage. Specifically, we first transform the radiance to the desired focal plane and then apply frequency domain plenoptic resampling. A full resolution color filtered image is then created by performing a 2D integral projection from the reparam-eterized radiance. Finally, we conduct demosacing to obtain the color result. We show that our solution can achieve visible resolution enhancement on dynamic refocusing and depth-assisted deep focus rendering.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A plenoptic camera captures the 4D radiance about a scene. Recent practical solutions mount a microlens array on top of a commodity SLR to directly acquire these rays. However, they suffer from low resolution as hundreds of thousands of views need to be captured in a single shot. In this paper, we develop a simple but effective technique for improving the image resolution of the plenoptic camera by maneuvering the demosaicing process. We first show that the traditional solution by demosaicing each individual microlens image and then blending them for view synthesis is suboptimal. In particular, this demosaicing process often suffers from aliasing artifacts, and it damages high frequency information recorded by each microlens image hence degrades the image quality. We instead propose to de-mosaic the synthesized view at the rendering stage. Specifically, we first transform the radiance to the desired focal plane and then apply frequency domain plenoptic resampling. A full resolution color filtered image is then created by performing a 2D integral projection from the reparam-eterized radiance. Finally, we conduct demosacing to obtain the color result. We show that our solution can achieve visible resolution enhancement on dynamic refocusing and depth-assisted deep focus rendering.",
"fno": "114P1C06",
"keywords": [
"Rendering Computer Graphics",
"Image Colour Analysis",
"Image Resolution",
"Image Segmentation",
"Microlenses",
"Reparameterized Radiance",
"Color Demosaicing",
"Plenoptic Cameras",
"4 D Radiance",
"Microlens Array",
"Commodity SLR",
"Image Resolution",
"Microlens Image",
"Image Quality",
"Rendering",
"Frequency Domain Plenoptic Resampling",
"Color Filtered Image",
"Lenses",
"Microoptics",
"Image Resolution",
"Image Color Analysis",
"Cameras",
"Arrays",
"Rendering Computer Graphics"
],
"authors": [
{
"affiliation": "Qualcomm, San Diego, CA, USA",
"fullName": "T. Georgiev",
"givenName": "T.",
"surname": "Georgiev",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Indiana Univ., Bloomington, IN, USA",
"fullName": "A. Lumsdaine",
"givenName": "A.",
"surname": "Lumsdaine",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Delaware, Newark, DE, USA",
"fullName": "Jingyi Yu",
"givenName": null,
"surname": "Jingyi Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. of Delaware, Newark, DE, USA",
"fullName": "Zhan Yu",
"givenName": null,
"surname": "Zhan Yu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "901-908",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "113P1C05",
"articleId": "12OmNrK9q4i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "115P1C07",
"articleId": "12OmNqOwQCT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2010/7023/0/05585092",
"title": "Rich image capture with plenoptic cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2010/05585092/12OmNAYGls3",
"parentPublication": {
"id": "proceedings/iccp/2010/7023/0",
"title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559008",
"title": "The focused plenoptic camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559008/12OmNqI04FS",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a612",
"title": "All-in-Focus Image Reconstruction Based on Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a612/12OmNs0C9JA",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177510",
"title": "Coding of plenoptic images by using a sparse set and disparities",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177510/12OmNvzJG8m",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215210",
"title": "Fourier Slice Super-resolution in plenoptic cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215210/12OmNwEJ0Kt",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a455",
"title": "Dictionary Learning Based Color Demosaicing for Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a455/12OmNwFzO2X",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2011/01/mcg2011010062",
"title": "Using Focused Plenoptic Cameras for Rich Image Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2011/01/mcg2011010062/13rRUyfKIKD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a115",
"title": "Blind Calibration for Focused Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a115/1cdOJf1lggo",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102725",
"title": "Imaging-Correlated Intra Prediction for Plenoptic 2.0 Video Coding",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102725/1kwr4GDUR56",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b125",
"title": "Ray Tracing-Guided Design of Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b125/1zWEpFekVbi",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNB8Cj92",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"acronym": "icmew",
"groupId": "1801805",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBrlPAz",
"doi": "10.1109/ICMEW.2014.6890717",
"title": "Stereoacuity-guided depth image based rendering",
"normalizedTitle": "Stereoacuity-guided depth image based rendering",
"abstract": "This paper presents a novel method to improve the stereopsis and visual comfort of 3D images based on an idea that apply the stereoacuity prior to the process of synthesizing a virtual view by DIBR (Depth image based rendering) technique. DIBR technique is widely used in 3D film and TV programs. However, an emphatic issue is that the stereo video has a weak stereo-effect and viewing discomfort since without considering the true depth perception experience of human visual system (HVS) when directly observes a natural scene. Stereoacuity is a representation of the depth perception nature of HVS and the novel method will explain how the stereoacuity prior can be used to guide and optimize the process of DIBR. In this paper, the presented method does neither refer to the intrinsic nature of HVS nor try to construct an accurate computational model of stereoacuity. Instead, it is accomplished by using the contrast stretch method which is a nonlinear image processing operation to approximately simulate the distribution rule of disparities around the focal plane. Experimental results show that the proposed method provides a more precise depth perception around the focal plane, as is consistent with the nature of HVS, and thus the viewers are able to enjoy a much more comfort 3D experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a novel method to improve the stereopsis and visual comfort of 3D images based on an idea that apply the stereoacuity prior to the process of synthesizing a virtual view by DIBR (Depth image based rendering) technique. DIBR technique is widely used in 3D film and TV programs. However, an emphatic issue is that the stereo video has a weak stereo-effect and viewing discomfort since without considering the true depth perception experience of human visual system (HVS) when directly observes a natural scene. Stereoacuity is a representation of the depth perception nature of HVS and the novel method will explain how the stereoacuity prior can be used to guide and optimize the process of DIBR. In this paper, the presented method does neither refer to the intrinsic nature of HVS nor try to construct an accurate computational model of stereoacuity. Instead, it is accomplished by using the contrast stretch method which is a nonlinear image processing operation to approximately simulate the distribution rule of disparities around the focal plane. Experimental results show that the proposed method provides a more precise depth perception around the focal plane, as is consistent with the nature of HVS, and thus the viewers are able to enjoy a much more comfort 3D experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a novel method to improve the stereopsis and visual comfort of 3D images based on an idea that apply the stereoacuity prior to the process of synthesizing a virtual view by DIBR (Depth image based rendering) technique. DIBR technique is widely used in 3D film and TV programs. However, an emphatic issue is that the stereo video has a weak stereo-effect and viewing discomfort since without considering the true depth perception experience of human visual system (HVS) when directly observes a natural scene. Stereoacuity is a representation of the depth perception nature of HVS and the novel method will explain how the stereoacuity prior can be used to guide and optimize the process of DIBR. In this paper, the presented method does neither refer to the intrinsic nature of HVS nor try to construct an accurate computational model of stereoacuity. Instead, it is accomplished by using the contrast stretch method which is a nonlinear image processing operation to approximately simulate the distribution rule of disparities around the focal plane. Experimental results show that the proposed method provides a more precise depth perception around the focal plane, as is consistent with the nature of HVS, and thus the viewers are able to enjoy a much more comfort 3D experience.",
"fno": "06890717",
"keywords": [
"Three Dimensional Displays",
"Visualization",
"Dynamic Range",
"Stereo Image Processing",
"Rendering Computer Graphics",
"Visual Systems",
"Optical Imaging",
"Stereopsis Enhancement",
"Stereoacuity",
"Virtual View Synthesis",
"DIBR",
"Visual Comfort"
],
"authors": [
{
"affiliation": "School of Electronic Science and Engineering, Nanjing University, Jiangsu 210023, China",
"fullName": "Jinjie Xu",
"givenName": "Jinjie",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Electronic Science and Engineering, Nanjing University, Jiangsu 210023, China",
"fullName": "Feng Yan",
"givenName": "Feng",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Electronic Science and Engineering, Nanjing University, Jiangsu 210023, China",
"fullName": "Xun Cao",
"givenName": "Xun",
"surname": "Cao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmew",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": "1945-7871",
"isbn": "978-1-4799-4717-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890716",
"articleId": "12OmNBA9oBi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890718",
"articleId": "12OmNwlHSZp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cyberc/2015/9200/0/9200a324",
"title": "Real-Time Depth-Image-Based Rendering on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/cyberc/2015/9200a324/12OmNBCqbGM",
"parentPublication": {
"id": "proceedings/cyberc/2015/9200/0",
"title": "2015 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726473",
"title": "Protection of depth-image-based rendering 3D images using blind watermarking",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726473/12OmNBU1jMB",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2017/6067/0/08019542",
"title": "Quality assessment of multi-view-plus-depth images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019542/12OmNBfZSmq",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521671",
"title": "Efficient Depth Image Based Rendering with Edge Dependent Depth Filter and Interpolation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521671/12OmNCeaPXz",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2011/4541/0/4541a254",
"title": "Improvement of Virtual View Rendering Based on Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2011/4541a254/12OmNCgJecv",
"parentPublication": {
"id": "proceedings/icig/2011/4541/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2012/2027/0/06266452",
"title": "Depth Map Super-Resolution Using Synthesized View Matching for Depth-Image-Based Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266452/12OmNs59JV0",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fgcns/2008/3546/3/3546c031",
"title": "A Novel Approach to Depth Image Based Rendering Based on Non-Uniform Scaling of Depth Values",
"doi": null,
"abstractUrl": "/proceedings-article/fgcns/2008/3546c031/12OmNwJybNz",
"parentPublication": {
"id": "proceedings/fgcns/2008/3546/3",
"title": "Future Generation Communication and Networking Symposia, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011849",
"title": "Sift-based improvement of depth imagery",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011849/12OmNwlHT1T",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2010/7491/0/05583559",
"title": "Depth image based rendering with advanced texture synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2010/05583559/12OmNylbov7",
"parentPublication": {
"id": "proceedings/icme/2010/7491/0",
"title": "2010 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798283",
"title": "Hybrid Mono-Stereo Rendering in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798283/1cJ11Kv4Dn2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmGlV9Kg6c",
"doi": "10.1109/ICCV48922.2021.01245",
"title": "NeRD: Neural Reflectance Decomposition from Image Collections",
"normalizedTitle": "NeRD: Neural Reflectance Decomposition from Image Collections",
"abstract": "Decomposing a scene into its shape, reflectance, and illumination is a challenging but important problem in computer vision and graphics. This problem is inherently more challenging when the illumination is not a single light source under laboratory conditions but is instead an unconstrained environmental illumination. Though recent work has shown that implicit representations can be used to model the radiance field of an object, most of these techniques only enable view synthesis and not relighting. Additionally, evaluating these radiance fields is resource and time-intensive. We propose a neural reflectance decomposition (NeRD) technique that uses physically-based rendering to decompose the scene into spatially varying BRDF material properties. In contrast to existing techniques, our input images can be captured under different illumination conditions. In addition, we also propose techniques to convert the learned reflectance volume into a relightable textured mesh enabling fast real-time rendering with novel illuminations. We demonstrate the potential of the proposed approach with experiments on both synthetic and real datasets, where we are able to obtain high-quality relightable 3D assets from image collections. The datasets and code are available at the project page: https://markboss.me/publication/2021-nerd/.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Decomposing a scene into its shape, reflectance, and illumination is a challenging but important problem in computer vision and graphics. This problem is inherently more challenging when the illumination is not a single light source under laboratory conditions but is instead an unconstrained environmental illumination. Though recent work has shown that implicit representations can be used to model the radiance field of an object, most of these techniques only enable view synthesis and not relighting. Additionally, evaluating these radiance fields is resource and time-intensive. We propose a neural reflectance decomposition (NeRD) technique that uses physically-based rendering to decompose the scene into spatially varying BRDF material properties. In contrast to existing techniques, our input images can be captured under different illumination conditions. In addition, we also propose techniques to convert the learned reflectance volume into a relightable textured mesh enabling fast real-time rendering with novel illuminations. We demonstrate the potential of the proposed approach with experiments on both synthetic and real datasets, where we are able to obtain high-quality relightable 3D assets from image collections. The datasets and code are available at the project page: https://markboss.me/publication/2021-nerd/.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Decomposing a scene into its shape, reflectance, and illumination is a challenging but important problem in computer vision and graphics. This problem is inherently more challenging when the illumination is not a single light source under laboratory conditions but is instead an unconstrained environmental illumination. Though recent work has shown that implicit representations can be used to model the radiance field of an object, most of these techniques only enable view synthesis and not relighting. Additionally, evaluating these radiance fields is resource and time-intensive. We propose a neural reflectance decomposition (NeRD) technique that uses physically-based rendering to decompose the scene into spatially varying BRDF material properties. In contrast to existing techniques, our input images can be captured under different illumination conditions. In addition, we also propose techniques to convert the learned reflectance volume into a relightable textured mesh enabling fast real-time rendering with novel illuminations. We demonstrate the potential of the proposed approach with experiments on both synthetic and real datasets, where we are able to obtain high-quality relightable 3D assets from image collections. The datasets and code are available at the project page: https://markboss.me/publication/2021-nerd/.",
"fno": "281200m2664",
"keywords": [
"Reflectivity",
"Computer Vision",
"Three Dimensional Displays",
"Shape",
"Computational Modeling",
"Lighting",
"Rendering Computer Graphics",
"3 D From A Single Image And Shape From X",
"Stereo",
"3 D From Multiview And Other Sensors"
],
"authors": [
{
"affiliation": "University of Tübingen",
"fullName": "Mark Boss",
"givenName": "Mark",
"surname": "Boss",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tübingen",
"fullName": "Raphael Braun",
"givenName": "Raphael",
"surname": "Braun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Varun Jampani",
"givenName": "Varun",
"surname": "Jampani",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Jonathan T. Barron",
"givenName": "Jonathan T.",
"surname": "Barron",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Google Research",
"fullName": "Ce Liu",
"givenName": "Ce",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Tübingen",
"fullName": "Hendrik P.A. Lensch",
"givenName": "Hendrik P.A.",
"surname": "Lensch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "12664-12674",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200m2654",
"articleId": "1BmJ9U7w5A4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200m2675",
"articleId": "1BmHVmOWiic",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2009/3992/0/05206753",
"title": "Relighting objects from image collections",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206753/12OmNBOCWfB",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1999/0164/2/00790314",
"title": "Illumination distribution from brightness in shadows: Adaptive estimation of illumination distribution with unknown reflectance properties in shadow regions",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1999/00790314/12OmNvAiSCT",
"parentPublication": {
"id": "proceedings/iccv/1999/0164/2",
"title": "Proceedings of the Seventh IEEE International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a450",
"title": "Single Image Based Illumination Estimation for Lighting Virtual Object in Real Scene",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a450/12OmNx0RIM6",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459239",
"title": "Image segmentation with simultaneous illumination and reflectance estimation: An energy minimization approach",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459239/12OmNzlUKIf",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341163",
"title": "Diffuse reflectance from rough surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341163/12OmNzwpU3S",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/05/09712406",
"title": "Predicting Surface Reflectance Properties of Outdoor Scenes Under Unknown Natural Illumination",
"doi": null,
"abstractUrl": "/magazine/cg/2022/05/09712406/1AZLEpMIeME",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2799",
"title": "Towards High Fidelity Monocular Face Reconstruction with Rich Reflectance using Self-supervised Learning and Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2799/1BmJb3RcOGY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300i597",
"title": "Neural Inverse Rendering of an Indoor Scene From a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300i597/1hVlOrVOpck",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900f449",
"title": "PhySG: Inverse Rendering with Spherical Gaussians for Physics-based Material Editing and Relighting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900f449/1yeIKNwhdsI",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900h491",
"title": "NeRV: Neural Reflectance and Visibility Fields for Relighting and View Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900h491/1yeJnjys7Is",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H0KCZGVSPm",
"doi": "10.1109/CVPR52688.2022.00380",
"title": "Focal Length and Object Pose Estimation via Render and Compare",
"normalizedTitle": "Focal Length and Object Pose Estimation via Render and Compare",
"abstract": "We introduce FocalPose, a neural render-and-compare method for jointly estimating the camera-object 6D pose and camera focal length given a single RGB input image depicting a known object. The contributions of this work are twofold. First, we derive a focal length update rule that extends an existing state-of-the-art render-and-compare 6D pose estimator to address the joint estimation task. Second, we investigate several different loss functions for jointly estimating the object pose and focal length. We find that a combination of direct focal length regression with a reprojection loss disentangling the contribution of translation, rotation, and focal length leads to improved results. We show results on three challenging benchmark datasets that depict known 3D models in uncontrolled settings. We demonstrate that our focal length and 6D pose estimates have lower error than the existing state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce FocalPose, a neural render-and-compare method for jointly estimating the camera-object 6D pose and camera focal length given a single RGB input image depicting a known object. The contributions of this work are twofold. First, we derive a focal length update rule that extends an existing state-of-the-art render-and-compare 6D pose estimator to address the joint estimation task. Second, we investigate several different loss functions for jointly estimating the object pose and focal length. We find that a combination of direct focal length regression with a reprojection loss disentangling the contribution of translation, rotation, and focal length leads to improved results. We show results on three challenging benchmark datasets that depict known 3D models in uncontrolled settings. We demonstrate that our focal length and 6D pose estimates have lower error than the existing state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce FocalPose, a neural render-and-compare method for jointly estimating the camera-object 6D pose and camera focal length given a single RGB input image depicting a known object. The contributions of this work are twofold. First, we derive a focal length update rule that extends an existing state-of-the-art render-and-compare 6D pose estimator to address the joint estimation task. Second, we investigate several different loss functions for jointly estimating the object pose and focal length. We find that a combination of direct focal length regression with a reprojection loss disentangling the contribution of translation, rotation, and focal length leads to improved results. We show results on three challenging benchmark datasets that depict known 3D models in uncontrolled settings. We demonstrate that our focal length and 6D pose estimates have lower error than the existing state-of-the-art methods.",
"fno": "694600d815",
"keywords": [
"Cameras",
"Image Colour Analysis",
"Pose Estimation",
"Rendering Computer Graphics",
"Camera Focal Length",
"Single RGB Input Image",
"Known Object",
"Focal Length Update Rule",
"Existing State Of The Art Render",
"Joint Estimation Task",
"Direct Focal Length",
"6 D Pose Estimates",
"Object Pose Estimation",
"Neural Render",
"Compare Method",
"Training",
"Solid Modeling",
"Three Dimensional Displays",
"Pose Estimation",
"Cameras",
"Rendering Computer Graphics",
"Pattern Recognition"
],
"authors": [
{
"affiliation": "LIGM, École des Ponts, Univ Gustave Eiffel, CNRS",
"fullName": "Georgy Ponimatkin",
"givenName": "Georgy",
"surname": "Ponimatkin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ENS/Inria",
"fullName": "Yann Labbé",
"givenName": "Yann",
"surname": "Labbé",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Adobe Research",
"fullName": "Bryan Russell",
"givenName": "Bryan",
"surname": "Russell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LIGM, École des Ponts, Univ Gustave Eiffel, CNRS",
"fullName": "Mathieu Aubry",
"givenName": "Mathieu",
"surname": "Aubry",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CIIRC CTU",
"fullName": "Josef Sivic",
"givenName": "Josef",
"surname": "Sivic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3815-3824",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H0KCTlvQxW",
"name": "pcvpr202269460-09879557s1-mm_694600d815.zip",
"size": "19.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879557s1-mm_694600d815.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600d804",
"articleId": "1H0OlF7Jwys",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600d825",
"articleId": "1H1muC7wD0Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2013/2840/0/2840a529",
"title": "Pose Estimation with Unknown Focal Length Using Points, Directions and Lines",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a529/12OmNxw5BwV",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2015/6964/0/07298858",
"title": "P3.5P: Pose estimation with unknown focal length",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298858/12OmNyp9Mol",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/10/ttp2013102387",
"title": "Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2013/10/ttp2013102387/13rRUILLkER",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c222",
"title": "GP2C: Geometric Projection Parameter Consensus for Joint 3D Pose and Focal Length Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c222/1hQquqjcsDe",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c777",
"title": "Unsupervised Joint 3D Object Model Learning and 6D Pose Estimation for Depth-Based Instance Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c777/1i5mKggU8Sc",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1451",
"title": "PFRL: Pose-Free Reinforcement Learning for 6D Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1451/1m3nHGv0jMA",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h043",
"title": "Minimal Solutions to Relative Pose Estimation From Two Views Sharing a Common Direction With Unknown Focal Length",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h043/1m3ooY0KPO8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0707",
"title": "LatentFusion: End-to-End Differentiable Reconstruction and Rendering for Unseen Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0707/1m3oqm1dfm8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900b654",
"title": "Single-view robot pose and joint angle estimation via render & compare",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900b654/1yeIjUcLXxe",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09655492",
"title": "Occlusion-Aware Self-Supervised Monocular 6D Object Pose Estimation",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09655492/1zpnDrdJAty",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1hQqfuoOyHu",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hVlzJY2Ft6",
"doi": "10.1109/ICCV.2019.00547",
"title": "Object-Driven Multi-Layer Scene Decomposition From a Single Image",
"normalizedTitle": "Object-Driven Multi-Layer Scene Decomposition From a Single Image",
"abstract": "We present a method that tackles the challenge of predicting color and depth behind the visible content of an image. Our approach aims at building up a Layered Depth Image (LDI) from a single RGB input, which is an efficient representation that arranges the scene in layers, including originally occluded regions. Unlike previous work, we enable an adaptive scheme for the number of layers and incorporate semantic encoding for better hallucination of partly occluded objects. Additionally, our approach is object-driven, which especially boosts the accuracy for the occluded intermediate objects. The framework consists of two steps. First, we individually complete each object in terms of color and depth, while estimating the scene layout. Second, we rebuild the scene based on the regressed layers and enforce the recomposed image to resemble the structure of the original input. The learned representation enables various applications, such as 3D photography and diminished reality, all from a single RGB image.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method that tackles the challenge of predicting color and depth behind the visible content of an image. Our approach aims at building up a Layered Depth Image (LDI) from a single RGB input, which is an efficient representation that arranges the scene in layers, including originally occluded regions. Unlike previous work, we enable an adaptive scheme for the number of layers and incorporate semantic encoding for better hallucination of partly occluded objects. Additionally, our approach is object-driven, which especially boosts the accuracy for the occluded intermediate objects. The framework consists of two steps. First, we individually complete each object in terms of color and depth, while estimating the scene layout. Second, we rebuild the scene based on the regressed layers and enforce the recomposed image to resemble the structure of the original input. The learned representation enables various applications, such as 3D photography and diminished reality, all from a single RGB image.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method that tackles the challenge of predicting color and depth behind the visible content of an image. Our approach aims at building up a Layered Depth Image (LDI) from a single RGB input, which is an efficient representation that arranges the scene in layers, including originally occluded regions. Unlike previous work, we enable an adaptive scheme for the number of layers and incorporate semantic encoding for better hallucination of partly occluded objects. Additionally, our approach is object-driven, which especially boosts the accuracy for the occluded intermediate objects. The framework consists of two steps. First, we individually complete each object in terms of color and depth, while estimating the scene layout. Second, we rebuild the scene based on the regressed layers and enforce the recomposed image to resemble the structure of the original input. The learned representation enables various applications, such as 3D photography and diminished reality, all from a single RGB image.",
"fno": "480300f368",
"keywords": [
"Computer Vision",
"Image Colour Analysis",
"Image Reconstruction",
"Image Representation",
"Image Resolution",
"Learning Artificial Intelligence",
"Object Recognition",
"Regression Analysis",
"Layered Depth Image",
"Single RGB Input",
"Originally Occluded Regions",
"Adaptive Scheme",
"Semantic Encoding",
"Partly Occluded Objects",
"Occluded Intermediate Objects",
"Scene Layout",
"Regressed Layers",
"Recomposed Image",
"Original Input",
"Single RGB Image",
"Object Driven Multilayer Scene Decomposition",
"Single Image",
"Visible Content",
"LDI",
"3 D Photography",
"Three Dimensional Displays",
"Layout",
"Semantics",
"Color",
"Solid Modeling",
"Rendering Computer Graphics",
"Image Color Analysis"
],
"authors": [
{
"affiliation": "Technical University of Munich",
"fullName": "Helisa Dhamo",
"givenName": "Helisa",
"surname": "Dhamo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Munich. Germany",
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUM. Google",
"fullName": "Federico Tombari",
"givenName": "Federico",
"surname": "Tombari",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "5368-5377",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4803-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "480300f358",
"articleId": "1hQqmw36PUA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "480300f378",
"articleId": "1hVlsyIvyuI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2012/1226/0/007P1A07",
"title": "Scene warping: Layer-based stereoscopic image resizing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/007P1A07/12OmNAiFI8D",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06814989",
"title": "Screen-Space Ambient Occlusion Using A-Buffer Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06814989/12OmNAs2tqk",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459382",
"title": "Factorizing Scene Albedo and Depth from a Single Foggy Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459382/12OmNzTYBSS",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c262",
"title": "Learning to Synthesize a 4D RGBD Light Field from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c262/12OmNzmclkx",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/04/07115131",
"title": "Intrinsic Scene Properties from a Single RGB-D Image",
"doi": null,
"abstractUrl": "/journal/tp/2016/04/07115131/13rRUNvyamf",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/07/07938419",
"title": "Layered Scene Models from Single Hazy Images",
"doi": null,
"abstractUrl": "/journal/tg/2018/07/07938419/13rRUygT7sL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08643583",
"title": "A Perception-driven Hybrid Decomposition for Multi-layer Accommodative Displays",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08643583/18K0hdQEpoI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10045018",
"title": "Monocular Depth Decomposition of Semi-Transparent Volume Renderings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10045018/1KMLV0zTt1m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300h695",
"title": "Neural Scene Decomposition for Multi-Person Motion Capture",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300h695/1gyrSCQUIzm",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d753",
"title": "End-to-End Optimization of Scene Layout",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d753/1m3ooUhHlVC",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1sv5NxY",
"doi": "10.1109/VR.2018.8446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"normalizedTitle": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"abstract": "Optical See-Through Head-Mounted Displays (OST-HMDs) lose the visibility of virtual contents under bright environment illumination due to their see-through nature. We demonstrate how a liquid crystal (LC) filter attached to an OST-HMD can be used to dynamically increase the perceived brightness of virtual content without impacting the perceived brightness of the real scene. We present a prototype OST-HMD that continuously adjusts the opacity of the LC filter to attenuate the environment light without users becoming aware of the change. Consequently, virtual content appears to be brighter. The proposed approach is evaluated in psychophysical experiments in three scenes, with 16, 31, and 31 participants, respectively. The participants were asked to compare the magnitude of brightness changes of both real and virtual objects, before and after dimming the LC filter over a period of 5, 10, and 20 seconds. The results showed that the participants felt increases in the brightness of virtual objects while they were less conscious of reductions of the real scene luminance. These results provide evidence for the effectiveness of our display design. Our design can be applied to a wide range of OST-HMDs to improve the brightness and hence realism of virtual content in augmented reality applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Optical See-Through Head-Mounted Displays (OST-HMDs) lose the visibility of virtual contents under bright environment illumination due to their see-through nature. We demonstrate how a liquid crystal (LC) filter attached to an OST-HMD can be used to dynamically increase the perceived brightness of virtual content without impacting the perceived brightness of the real scene. We present a prototype OST-HMD that continuously adjusts the opacity of the LC filter to attenuate the environment light without users becoming aware of the change. Consequently, virtual content appears to be brighter. The proposed approach is evaluated in psychophysical experiments in three scenes, with 16, 31, and 31 participants, respectively. The participants were asked to compare the magnitude of brightness changes of both real and virtual objects, before and after dimming the LC filter over a period of 5, 10, and 20 seconds. The results showed that the participants felt increases in the brightness of virtual objects while they were less conscious of reductions of the real scene luminance. These results provide evidence for the effectiveness of our display design. Our design can be applied to a wide range of OST-HMDs to improve the brightness and hence realism of virtual content in augmented reality applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Optical See-Through Head-Mounted Displays (OST-HMDs) lose the visibility of virtual contents under bright environment illumination due to their see-through nature. We demonstrate how a liquid crystal (LC) filter attached to an OST-HMD can be used to dynamically increase the perceived brightness of virtual content without impacting the perceived brightness of the real scene. We present a prototype OST-HMD that continuously adjusts the opacity of the LC filter to attenuate the environment light without users becoming aware of the change. Consequently, virtual content appears to be brighter. The proposed approach is evaluated in psychophysical experiments in three scenes, with 16, 31, and 31 participants, respectively. The participants were asked to compare the magnitude of brightness changes of both real and virtual objects, before and after dimming the LC filter over a period of 5, 10, and 20 seconds. The results showed that the participants felt increases in the brightness of virtual objects while they were less conscious of reductions of the real scene luminance. These results provide evidence for the effectiveness of our display design. Our design can be applied to a wide range of OST-HMDs to improve the brightness and hence realism of virtual content in augmented reality applications.",
"fno": "08446441",
"keywords": [
"Augmented Reality",
"Brightness",
"Helmet Mounted Displays",
"Perceived Brightness",
"See Through Head Mounted Displays",
"Unnoticeable Incident Light Reduction",
"Virtual Content",
"Bright Environment Illumination",
"Liquid Crystal Filter",
"Prototype OST HMD",
"LC Filter",
"Environment Light",
"Brightness Changes",
"Virtual Objects",
"Display Design",
"Bright View",
"Augmented Reality Applications",
"Brightness",
"Visualization",
"Prototypes",
"Lighting",
"Retina",
"Optical Imaging",
"Optical Attenuators",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality Interaction Devices Displays And Imagers"
],
"authors": [
{
"affiliation": "Keio University",
"fullName": "Shohei Mori",
"givenName": "Shohei",
"surname": "Mori",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ritsumeikan University",
"fullName": "Sei Ikeda",
"givenName": "Sei",
"surname": "Ikeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Alexander Plopski",
"givenName": "Alexander",
"surname": "Plopski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology",
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "251-258",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08447557",
"articleId": "13bd1fZBGbP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08448287",
"articleId": "13bd1fWcuDr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446052",
"title": "Casting Virtual Shadows Based on Brightness Induction for Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446052/13bd1hyoTyc",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a409",
"title": "Adapting Michelson Contrast for use with Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a409/1J7WpecpAwU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WpecpAwU",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00088",
"title": "Adapting Michelson Contrast for use with Optical See-Through Displays",
"normalizedTitle": "Adapting Michelson Contrast for use with Optical See-Through Displays",
"abstract": "Due to the additive light model employed by current optical see-through head-mounted displays (OST-HMDs), the perceived contrast of displayed imagery is reduced with increased environment lumi-nance, often to the point where it becomes difficult for the user to accurately distinguish the presence of visual imagery. While existing contrast models, such as Weber contrast and Michelson contrast, can be used to predict when the observer will experience difficulty distinguishing and interpreting stimuli on traditional dis-plays, these models must be adapted for use with additive displays. In this paper, we present a simplified model of luminance contrast for optical see-through displays derived from Michelson's contrast equation and demonstrate two applications of the model: informing design decisions involving the color of virtual imagery and optimizing environment light attenuation through the use of neutral density filters.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Due to the additive light model employed by current optical see-through head-mounted displays (OST-HMDs), the perceived contrast of displayed imagery is reduced with increased environment lumi-nance, often to the point where it becomes difficult for the user to accurately distinguish the presence of visual imagery. While existing contrast models, such as Weber contrast and Michelson contrast, can be used to predict when the observer will experience difficulty distinguishing and interpreting stimuli on traditional dis-plays, these models must be adapted for use with additive displays. In this paper, we present a simplified model of luminance contrast for optical see-through displays derived from Michelson's contrast equation and demonstrate two applications of the model: informing design decisions involving the color of virtual imagery and optimizing environment light attenuation through the use of neutral density filters.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Due to the additive light model employed by current optical see-through head-mounted displays (OST-HMDs), the perceived contrast of displayed imagery is reduced with increased environment lumi-nance, often to the point where it becomes difficult for the user to accurately distinguish the presence of visual imagery. While existing contrast models, such as Weber contrast and Michelson contrast, can be used to predict when the observer will experience difficulty distinguishing and interpreting stimuli on traditional dis-plays, these models must be adapted for use with additive displays. In this paper, we present a simplified model of luminance contrast for optical see-through displays derived from Michelson's contrast equation and demonstrate two applications of the model: informing design decisions involving the color of virtual imagery and optimizing environment light attenuation through the use of neutral density filters.",
"fno": "536500a409",
"keywords": [
"Brightness",
"Helmet Mounted Displays",
"Visual Perception",
"Additive Displays",
"Additive Light Model",
"Contrast Models",
"Displayed Imagery",
"Environment Light Attenuation",
"Environment Luminance",
"Head Mounted Displays",
"Luminance Contrast",
"Michelson Contrast",
"Michelsons Contrast Equation",
"Optical See Through Displays",
"OST HM Ds",
"Perceived Contrast",
"Virtual Imagery",
"Visual Imagery",
"Weber Contrast",
"Optical Filters",
"Adaptation Models",
"Visualization",
"Additives",
"Optical Design",
"Optical Attenuators",
"Predictive Models",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Re Ality",
"Human Centered Computing Visualization Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "University of Central Florida",
"fullName": "Austin Erickson",
"givenName": "Austin",
"surname": "Erickson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Gerd Bruder",
"givenName": "Gerd",
"surname": "Bruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Gregory F. Welch",
"givenName": "Gregory F.",
"surname": "Welch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "409-410",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a403",
"articleId": "1J7WjIHPIoE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a411",
"articleId": "1J7WewHTQ76",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460049",
"title": "SharpView: Improved clarity of defocused content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460049/12OmNBWzHQi",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948426",
"title": "SmartColor: Real-time color correction and contrast for optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948426/12OmNzaQoFo",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549352",
"title": "General-purpose telepresence with head-worn optical see-through displays and projector-based lighting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549352/12OmNzmclA3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/12/07138644",
"title": "SmartColor: Real-Time Color and Contrast Correction for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/12/07138644/13rRUwfZC0k",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a115",
"title": "Perceived Transparency in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a115/1yeQLPBHFBe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1MNgk3BHlS0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2023",
"__typename": "ProceedingType"
},
"article": {
"id": "1MNgTZ7ZNLO",
"doi": "10.1109/VR55154.2023.00039",
"title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses",
"normalizedTitle": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses",
"abstract": "Occlusion is a crucial visual element in optical see-through (OST) augmented reality, however, implementing occlusion in OST displays while addressing various design trade-offs is a difficult problem. In contrast to the traditional method of using spatial light modulators (SLMs) for the occlusion mask, using photochromic materials as occlusion masks can effectively eliminate diffraction artifacts in see-through views due to the lack of electronic pixels, thus providing superior see-through image quality. However, this design requires UV illumination to activate the photochromic mate-rial, which traditionally requires multiple SLMs, resulting in a larger form factor for the system. This paper presents a compact photochromic occlusion-capable OST design using multilayer, wavelength-dependent holographic optical lenses (HOLs). Our approach employs a single digital mi-cromirror display (DMD) to form both the occlusion mask with UV light and a virtual image with visible light in a time-multiplexed man-ner. We demonstrate our proof-of-concept system on a bench-top setup and assess the appearance and contrasts of the displayed image. We also suggest potential improvements for current prototypes to encourage the community to explore this occlusion approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occlusion is a crucial visual element in optical see-through (OST) augmented reality, however, implementing occlusion in OST displays while addressing various design trade-offs is a difficult problem. In contrast to the traditional method of using spatial light modulators (SLMs) for the occlusion mask, using photochromic materials as occlusion masks can effectively eliminate diffraction artifacts in see-through views due to the lack of electronic pixels, thus providing superior see-through image quality. However, this design requires UV illumination to activate the photochromic mate-rial, which traditionally requires multiple SLMs, resulting in a larger form factor for the system. This paper presents a compact photochromic occlusion-capable OST design using multilayer, wavelength-dependent holographic optical lenses (HOLs). Our approach employs a single digital mi-cromirror display (DMD) to form both the occlusion mask with UV light and a virtual image with visible light in a time-multiplexed man-ner. We demonstrate our proof-of-concept system on a bench-top setup and assess the appearance and contrasts of the displayed image. We also suggest potential improvements for current prototypes to encourage the community to explore this occlusion approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occlusion is a crucial visual element in optical see-through (OST) augmented reality, however, implementing occlusion in OST displays while addressing various design trade-offs is a difficult problem. In contrast to the traditional method of using spatial light modulators (SLMs) for the occlusion mask, using photochromic materials as occlusion masks can effectively eliminate diffraction artifacts in see-through views due to the lack of electronic pixels, thus providing superior see-through image quality. However, this design requires UV illumination to activate the photochromic mate-rial, which traditionally requires multiple SLMs, resulting in a larger form factor for the system. This paper presents a compact photochromic occlusion-capable OST design using multilayer, wavelength-dependent holographic optical lenses (HOLs). Our approach employs a single digital mi-cromirror display (DMD) to form both the occlusion mask with UV light and a virtual image with visible light in a time-multiplexed man-ner. We demonstrate our proof-of-concept system on a bench-top setup and assess the appearance and contrasts of the displayed image. We also suggest potential improvements for current prototypes to encourage the community to explore this occlusion approach.",
"fno": "481500a237",
"keywords": [
"Visualization",
"Optical Diffraction",
"Three Dimensional Displays",
"Optical Design",
"Prototypes",
"Holography",
"User Interfaces",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality",
"Human Centered Computing Communication Hardware",
"Interfaces And Storage Displays And Imagers"
],
"authors": [
{
"affiliation": "Trinity College,Dublin",
"fullName": "Chun-Wei Ooi",
"givenName": "Chun-Wei",
"surname": "Ooi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Yuichi Hiroi",
"givenName": "Yuichi",
"surname": "Hiroi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Yuta Itoh",
"givenName": "Yuta",
"surname": "Itoh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2023-03-01T00:00:00",
"pubType": "proceedings",
"pages": "237-242",
"year": "2023",
"issn": null,
"isbn": "979-8-3503-4815-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "481500a226",
"articleId": "1MNgVbw2hc4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "481500a243",
"articleId": "1MNgyZ3pLFe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a350",
"title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a800",
"title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10050791",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08827571",
"title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09416829",
"title": "Design of a Pupil-Matched Occlusion-Capable Optical See-Through Wearable Display",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09416829/1t8VUXSYL2E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a422",
"title": "Blending Shadows: Casting Shadows in Virtual and Real using Occlusion-Capable Augmented Reality Near-Eye Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a422/1yeD2Kh0vxS",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNqJ8taH",
"title": "Proceedings Workshop on Motion and Video Computing (MOTION 2002)",
"acronym": "motion",
"groupId": "1001751",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAGw13Q",
"doi": "10.1109/MOTION.2002.1182224",
"title": "Comparative Study of Coarse Head Pose Estimation",
"normalizedTitle": "Comparative Study of Coarse Head Pose Estimation",
"abstract": "For many practical applications, it is sufficient to estimate coarse head infer gaze direction. Indeed for any application in which the camera is situated unobstrusively in an overhead corner, the only possible inference is coarse pose because of the limitations of the quality and resolution of the incoming data. However, the vast majority of research in head pose estimation deals with tracking full rigid body motion (6 degrees of freedom) for a limited range of motion (typically +/-45 degrees out-of-plane) and relatively high resolution data (usally 64x64 or more.) In this paper, we review the smaller body of research on coarse pose estimation. This work involves image-based learning, estimation of a wide range of pose, and is capable of real-time performance for low-resolution imagery. We evaluate two coarse pose estimation schemes, based on (1) a probabilstic model approach and (2) a neural network approach. We compare the results of the two techniques for varying resolution, head localization accuracy and required pose accuracy. We conclude with details for the implementation specifications for resolution and localization accuracy depending on system accuracy requirements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For many practical applications, it is sufficient to estimate coarse head infer gaze direction. Indeed for any application in which the camera is situated unobstrusively in an overhead corner, the only possible inference is coarse pose because of the limitations of the quality and resolution of the incoming data. However, the vast majority of research in head pose estimation deals with tracking full rigid body motion (6 degrees of freedom) for a limited range of motion (typically +/-45 degrees out-of-plane) and relatively high resolution data (usally 64x64 or more.) In this paper, we review the smaller body of research on coarse pose estimation. This work involves image-based learning, estimation of a wide range of pose, and is capable of real-time performance for low-resolution imagery. We evaluate two coarse pose estimation schemes, based on (1) a probabilstic model approach and (2) a neural network approach. We compare the results of the two techniques for varying resolution, head localization accuracy and required pose accuracy. We conclude with details for the implementation specifications for resolution and localization accuracy depending on system accuracy requirements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For many practical applications, it is sufficient to estimate coarse head infer gaze direction. Indeed for any application in which the camera is situated unobstrusively in an overhead corner, the only possible inference is coarse pose because of the limitations of the quality and resolution of the incoming data. However, the vast majority of research in head pose estimation deals with tracking full rigid body motion (6 degrees of freedom) for a limited range of motion (typically +/-45 degrees out-of-plane) and relatively high resolution data (usally 64x64 or more.) In this paper, we review the smaller body of research on coarse pose estimation. This work involves image-based learning, estimation of a wide range of pose, and is capable of real-time performance for low-resolution imagery. We evaluate two coarse pose estimation schemes, based on (1) a probabilstic model approach and (2) a neural network approach. We compare the results of the two techniques for varying resolution, head localization accuracy and required pose accuracy. We conclude with details for the implementation specifications for resolution and localization accuracy depending on system accuracy requirements.",
"fno": "18600125",
"keywords": [
"Head Pose Estimation",
"Gaze Direction Estimation",
"Head Tracking",
"Face Tracking"
],
"authors": [
{
"affiliation": "IBM T.J. Watson Research Center",
"fullName": "Lisa M. Brown",
"givenName": "Lisa M.",
"surname": "Brown",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IBM T.J. Watson Research Center",
"fullName": "Ying-Li Tran",
"givenName": "Ying-Li",
"surname": "Tran",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "motion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-12-01T00:00:00",
"pubType": "proceedings",
"pages": "125",
"year": "2002",
"issn": null,
"isbn": "0-7695-1860-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "18600118",
"articleId": "12OmNyKrHd4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "18600133",
"articleId": "12OmNAJ4pda",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2012/4711/0/4711a794",
"title": "3D Head Pose Estimation Based on Scene Flow and Generic Head Model",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a794/12OmNqGitTB",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2012/4683/0/4683a125",
"title": "Coarse Head Pose Estimation using Image Abstraction",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2012/4683a125/12OmNwE9ORM",
"parentPublication": {
"id": "proceedings/crv/2012/4683/0",
"title": "2012 Ninth Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/amfg/2003/2010/0/20100092",
"title": "Absolute Head Pose Estimation From Overhead Wide-Angle Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/amfg/2003/20100092/12OmNyen1y9",
"parentPublication": {
"id": "proceedings/amfg/2003/2010/0",
"title": "2003 IEEE International Workshop on Analysis and Modeling of Faces and Gestures",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2000/0580/0/05800183",
"title": "Wide-Range, Person- and Illumination-Insensitive Head Orientation Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2000/05800183/12OmNyuyac9",
"parentPublication": {
"id": "proceedings/fg/2000/0580/0",
"title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/10/07346504",
"title": "Social Grouping for Multi-Target Tracking and Head Pose Estimation in Video",
"doi": null,
"abstractUrl": "/journal/tp/2016/10/07346504/13rRUxly9fd",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2018/1737/0/08486490",
"title": "FI-CAP: Robust Framework to Benchmark Head Pose Estimation in Challenging Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486490/14jQfOLF2bC",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2021/3176/0/09666992",
"title": "Relative Pose Consistency for Semi-Supervised Head Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2021/09666992/1A6BGyUQ4yk",
"parentPublication": {
"id": "proceedings/fg/2021/3176/0",
"title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a109",
"title": "Comparing Head and AR Glasses Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a109/1yeQMONGc9y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAO",
"title": "Proceedings International Test Conference 1992",
"acronym": "test",
"groupId": "1000753",
"volume": "0",
"displayVolume": "0",
"year": "1992",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxXCGIT",
"doi": "10.1109/TEST.1992.527806",
"title": "A VXI driver-sensor instrument wlth large tester architecture",
"normalizedTitle": "A VXI driver-sensor instrument wlth large tester architecture",
"abstract": "A digital driver-sensor instrument for use in VXI instrumentation systems is described, with an architecture derivedfrom that of large, powerful digital board test systems. The feature requirements for a VXI digital instrument are presented and compared to those of traditional digital testers. Finally, the instrument's structure and design, which fulfll these requirements, are discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A digital driver-sensor instrument for use in VXI instrumentation systems is described, with an architecture derivedfrom that of large, powerful digital board test systems. The feature requirements for a VXI digital instrument are presented and compared to those of traditional digital testers. Finally, the instrument's structure and design, which fulfll these requirements, are discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A digital driver-sensor instrument for use in VXI instrumentation systems is described, with an architecture derivedfrom that of large, powerful digital board test systems. The feature requirements for a VXI digital instrument are presented and compared to those of traditional digital testers. Finally, the instrument's structure and design, which fulfll these requirements, are discussed.",
"fno": "00527806",
"keywords": [
"Instruments",
"System Testing",
"Timing",
"Power Generation",
"Test Pattern Generators",
"Manufacturing",
"Performance Evaluation",
"Control Systems",
"Telecommunication Control",
"Sensor Phenomena And Characterization"
],
"authors": [
{
"affiliation": null,
"fullName": "M.L. Fichtenbaum",
"givenName": "M.L.",
"surname": "Fichtenbaum",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "R.J. Muller",
"givenName": "R.J.",
"surname": "Muller",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "test",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1992-01-01T00:00:00",
"pubType": "proceedings",
"pages": "76",
"year": "1992",
"issn": "1089-3539",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00527805",
"articleId": "12OmNB6UIcD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00527807",
"articleId": "12OmNxxvAJM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/etc/1993/3360/0/00246589",
"title": "MixTest: a mixed signal extension to the HP82000",
"doi": null,
"abstractUrl": "/proceedings-article/etc/1993/00246589/12OmNAo45Tr",
"parentPublication": {
"id": "proceedings/etc/1993/3360/0",
"title": "Proceedings ETC 93 Third European Test Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/4353/2/05751064",
"title": "Tester on the High-voltage Electrical Insulated Resistor Based on SCM",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05751064/12OmNAoUT10",
"parentPublication": {
"id": "icicta/2011/4353/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2015/9393/0/9393a940",
"title": "Visibility Automatic Observation Instrument Based on the Visual Technology",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2015/9393a940/12OmNvjQ8Xx",
"parentPublication": {
"id": "proceedings/isdea/2015/9393/0",
"title": "2015 Sixth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfcse/2011/1562/0/06041636",
"title": "Barrier-Free Electronic Height Instrument",
"doi": null,
"abstractUrl": "/proceedings-article/icfcse/2011/06041636/12OmNxvwoUK",
"parentPublication": {
"id": "proceedings/icfcse/2011/1562/0",
"title": "2011 International Conference on Future Computer Science and Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/1/4647a401",
"title": "Gyroscope Test Instrument Rotating Fioor DSP Control System Based on Repetitive Control",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647a401/12OmNyxXlyC",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/2",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icrteect/2017/6266/0/6266a044",
"title": "Portable Weight Measuring Instrument",
"doi": null,
"abstractUrl": "/proceedings-article/icrteect/2017/6266a044/12OmNz6iOFe",
"parentPublication": {
"id": "proceedings/icrteect/2017/6266/0",
"title": "2017 International Conference on Recent Trends in Electrical, Electronics and Computing Technologies (ICRTEECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ahs/2010/5889/0/05546279",
"title": "iBoard: A highly-capable, high-performance, reconfigurable FPGA-based building block for flight instrument digital electronics",
"doi": null,
"abstractUrl": "/proceedings-article/ahs/2010/05546279/12OmNz6iOf8",
"parentPublication": {
"id": "proceedings/ahs/2010/5889/0",
"title": "2010 NASA/ESA Conference on Adaptive Hardware and Systems (AHS 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08658185",
"title": "Implementation and Evaluation of a 50 kHz, 28μs Motion-to-Pose Latency Head Tracking Instrument",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08658185/187ZsHB2Pwk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2021/1732/0/173200a535",
"title": "Research and Application of Instrument Reading Recognition Algorithm Based on Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2021/173200a535/1BzTUbkT7y0",
"parentPublication": {
"id": "proceedings/aiam/2021/1732/0",
"title": "2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2021/4254/0/425400a330",
"title": "Method for Creation of Original Music by the VR Panorama Based Virtual Instrument",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2021/425400a330/1ziPn2wVnuE",
"parentPublication": {
"id": "proceedings/iccst/2021/4254/0",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzVXNJh",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxisQTZ",
"doi": "10.1109/3DUI.2015.7131770",
"title": "ChromaChord: A virtual musical instrument",
"normalizedTitle": "ChromaChord: A virtual musical instrument",
"abstract": "ChromaChord utilizes a combination of the Oculus Rift headset and an attached Leap Motion controller to create an immersive 3D VMI environment. The Oculus Rift provides a three-paneled visual interface, through Unity, that allows the users to access different component windows of the instrument (simply by turning their head), while the Leap Motion Controller allows the user to interact with the visual interface to musical effect (processed and generated in Max/MSP).",
"abstracts": [
{
"abstractType": "Regular",
"content": "ChromaChord utilizes a combination of the Oculus Rift headset and an attached Leap Motion controller to create an immersive 3D VMI environment. The Oculus Rift provides a three-paneled visual interface, through Unity, that allows the users to access different component windows of the instrument (simply by turning their head), while the Leap Motion Controller allows the user to interact with the visual interface to musical effect (processed and generated in Max/MSP).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "ChromaChord utilizes a combination of the Oculus Rift headset and an attached Leap Motion controller to create an immersive 3D VMI environment. The Oculus Rift provides a three-paneled visual interface, through Unity, that allows the users to access different component windows of the instrument (simply by turning their head), while the Leap Motion Controller allows the user to interact with the visual interface to musical effect (processed and generated in Max/MSP).",
"fno": "07131770",
"keywords": [
"Instruments",
"Modulation",
"Tracking",
"Switches",
"Headphones",
"Three Dimensional Displays",
"Stereo Image Processing",
"Virtual Instruments",
"Immersive VR",
"Virtual Environments",
"Oculus Rift",
"Leap Motion",
"Human Computer Interface"
],
"authors": [
{
"affiliation": "Institute for Digital Intermedia Arts [IDIA Lab] at Ball State University, USA",
"fullName": "John Fillwalk",
"givenName": "John",
"surname": "Fillwalk",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "201-202",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-6886-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07131769",
"articleId": "12OmNBPc8zo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07131771",
"articleId": "12OmNyTOslO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icis/2017/5507/0/07960051",
"title": "Double hand-gesture interaction for walk-through in VR environment",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2017/07960051/12OmNB06l56",
"parentPublication": {
"id": "proceedings/icis/2017/5507/0",
"title": "2017 IEEE/ACIS 16th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131769",
"title": "Crosscale: A 3D virtual musical instrument interface",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131769/12OmNBPc8zo",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c671",
"title": "Position Tracking for Virtual Reality Using Commodity WiFi",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c671/12OmNC8MsMz",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802102",
"title": "Diplopia: A virtual reality game designed to help amblyopics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802102/12OmNqG0T2a",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2017/3870/0/3870a477",
"title": "A Comparison between Oculus Rift and a Low-Cost Smartphone VR Headset: Immersive User Experience and Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2017/3870a477/12OmNroijg2",
"parentPublication": {
"id": "proceedings/icalt/2017/3870/0",
"title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802099",
"title": "Full body interaction in virtual reality with affordable hardware",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802099/12OmNy2agQj",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/08/08392385",
"title": "Realtime Hand-Object Interaction Using Learned Grasp Space for Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2019/08/08392385/13rRUx0geq3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797680",
"title": "Coretet: A Dynamic Virtual Musical Instrument for the Twenty-First Century",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797680/1cJ0NOb4Lba",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a025",
"title": "CPR Virtual Reality Training Simulator for Schools",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a025/1fHkmE0X3OM",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090493",
"title": "Lingering Effects Associated with Virtual Reality: An Analysis Based on Consumer Discussions Over Time",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090493/1jIxsdXB2mY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrNh0vw",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyo1nKa",
"doi": "10.1109/ICPR.2014.327",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"normalizedTitle": "Appearance-Based Gaze Tracking with Free Head Movement",
"abstract": "In this work, we develop an appearance-based gaze tracking system allowing user to move their head freely. The main difficulty of the appearance-based gaze tracking method is that the eye appearance is sensitive to head orientation. To overcome the difficulty, we propose a 3-D gaze tracking method combining head pose tracking and appearance-based gaze estimation. We use a random forest approach to model the neighbor structure of the joint head pose and eye appearance space, and efficiently select neighbors from the collected high dimensional data set. Li-optimization is then used to seek for the best solution for regression from the selected neighboring samples. Experiment results shows that it can provide robust binocular gaze tracking results with less constraints but still provides moderate estimation accuracy of gaze estimation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we develop an appearance-based gaze tracking system allowing user to move their head freely. The main difficulty of the appearance-based gaze tracking method is that the eye appearance is sensitive to head orientation. To overcome the difficulty, we propose a 3-D gaze tracking method combining head pose tracking and appearance-based gaze estimation. We use a random forest approach to model the neighbor structure of the joint head pose and eye appearance space, and efficiently select neighbors from the collected high dimensional data set. Li-optimization is then used to seek for the best solution for regression from the selected neighboring samples. Experiment results shows that it can provide robust binocular gaze tracking results with less constraints but still provides moderate estimation accuracy of gaze estimation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we develop an appearance-based gaze tracking system allowing user to move their head freely. The main difficulty of the appearance-based gaze tracking method is that the eye appearance is sensitive to head orientation. To overcome the difficulty, we propose a 3-D gaze tracking method combining head pose tracking and appearance-based gaze estimation. We use a random forest approach to model the neighbor structure of the joint head pose and eye appearance space, and efficiently select neighbors from the collected high dimensional data set. Li-optimization is then used to seek for the best solution for regression from the selected neighboring samples. Experiment results shows that it can provide robust binocular gaze tracking results with less constraints but still provides moderate estimation accuracy of gaze estimation.",
"fno": "5209b869",
"keywords": [
"Head",
"Estimation",
"Tracking",
"Training",
"Magnetic Heads",
"Accuracy",
"Cameras"
],
"authors": [
{
"affiliation": null,
"fullName": "Chih-Chuan Lai",
"givenName": "Chih-Chuan",
"surname": "Lai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yu-Ting Chen",
"givenName": "Yu-Ting",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kuan-Wen Chen",
"givenName": "Kuan-Wen",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Shen-Chi Chen",
"givenName": "Shen-Chi",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sheng-Wen Shih",
"givenName": "Sheng-Wen",
"surname": "Shih",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yi-Ping Hung",
"givenName": "Yi-Ping",
"surname": "Hung",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1869-1873",
"year": "2014",
"issn": "1051-4651",
"isbn": "978-1-4799-5209-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5209b863",
"articleId": "12OmNyo1nKX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5209b874",
"articleId": "12OmNxXUhN3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460306",
"title": "Head pose-free appearance-based gaze sensing via eye image synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460306/12OmNrMHOcV",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130456",
"title": "Appearance-based head pose estimation with scene-specific adaptation",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130456/12OmNxXl5xs",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a176",
"title": "Mobile 3D Gaze Tracking Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a176/12OmNzzxusS",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/10/06777326",
"title": "Adaptive Linear Regression for Appearance-Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2014/10/06777326/13rRUEgartY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/01/08122058",
"title": "MPIIGaze: Real-World Dataset and Deep Appearance-Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2019/01/08122058/17D45WZZ7E5",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/01/09706357",
"title": "Towards High Performance Low Complexity Calibration in Appearance Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2023/01/09706357/1AO2a7pgNPO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10061572",
"title": "Free-HeadGAN: Neural Talking Head Synthesis with Explicit Gaze Control",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10061572/1Lk2C6ZD2zC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093476",
"title": "Learning to Detect Head Movement in Unconstrained Remote Gaze Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093476/1jPblTmx0s0",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKir6",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VUZMZM",
"doi": "10.1109/ICPR.2018.8546089",
"title": "A Benchmark for Full Rotation Head Tracking",
"normalizedTitle": "A Benchmark for Full Rotation Head Tracking",
"abstract": "This paper introduces a new benchmark for 360-degree rotation head tracking, named Full Rotation Head Tracking (FRHT). The benchmark consists of 50 color sequences containing diverse human activities with complicated head motions. Specially, FRHT covers the most challenges of head tracking and focuses on the appearance variations of heads during the 360-degree rotation. It also pays attention to the clutters from the heads of nearby people. Further, we propose a baseline tracker. It guides a selective adaption updating by verifying strategies, thus alleviates error accumulation. Extensive experiments validate the advantages of FRHT in head rotation and similar object clutter.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a new benchmark for 360-degree rotation head tracking, named Full Rotation Head Tracking (FRHT). The benchmark consists of 50 color sequences containing diverse human activities with complicated head motions. Specially, FRHT covers the most challenges of head tracking and focuses on the appearance variations of heads during the 360-degree rotation. It also pays attention to the clutters from the heads of nearby people. Further, we propose a baseline tracker. It guides a selective adaption updating by verifying strategies, thus alleviates error accumulation. Extensive experiments validate the advantages of FRHT in head rotation and similar object clutter.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a new benchmark for 360-degree rotation head tracking, named Full Rotation Head Tracking (FRHT). The benchmark consists of 50 color sequences containing diverse human activities with complicated head motions. Specially, FRHT covers the most challenges of head tracking and focuses on the appearance variations of heads during the 360-degree rotation. It also pays attention to the clutters from the heads of nearby people. Further, we propose a baseline tracker. It guides a selective adaption updating by verifying strategies, thus alleviates error accumulation. Extensive experiments validate the advantages of FRHT in head rotation and similar object clutter.",
"fno": "08546089",
"keywords": [
"Image Colour Analysis",
"Image Sequences",
"Object Tracking",
"Object Clutter",
"Error Accumulation",
"Selective Adaption Updating",
"Baseline Tracker",
"Full Rotation Head Tracking",
"Color Sequences",
"Head Rotation",
"Complicated Head Motions",
"Diverse Human Activities",
"FRHT",
"Frequency Modulation",
"Benchmark Testing",
"Intellectual Property",
"Clutter",
"Head",
"Target Tracking"
],
"authors": [
{
"affiliation": "“Key Lab of Intelligent Information Processing of Chinese Academy of Sciences(CAS), Institute of Computing Technology, CAS, Bejing, China",
"fullName": "Yulin Li",
"givenName": "Yulin",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "“Key Lab of Intelligent Information Processing of Chinese Academy of Sciences(CAS), Institute of Computing Technology, CAS, Bejing, China",
"fullName": "Bingpeng Ma",
"givenName": "Bingpeng",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "“Key Lab of Intelligent Information Processing of Chinese Academy of Sciences(CAS), Institute of Computing Technology, CAS, Bejing, China",
"fullName": "Hong Chong",
"givenName": "Hong",
"surname": "Chong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "“Key Lab of Intelligent Information Processing of Chinese Academy of Sciences(CAS), Institute of Computing Technology, CAS, Bejing, China",
"fullName": "Xilin Chen",
"givenName": "Xilin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2106-2111",
"year": "2018",
"issn": "1051-4651",
"isbn": "978-1-5386-3788-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08546208",
"articleId": "17D45W1Oa5L",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08546179",
"articleId": "17D45WaTknU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2002/1602/0/16020089",
"title": "Head Tracking by Active Particle Filtering",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020089/12OmNBLdKN7",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476614",
"title": "Poster: Effects of Head Tracking and Stereo on Non-Isomorphic 3D Rotation",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476614/12OmNqEAT7R",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020390",
"title": "Fast Stereo-Based Head Tracking for Interactive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020390/12OmNvDZEQU",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2005/9331/0/01521566",
"title": "Head Tracking Using Particle Filter with Intensity Gradient and Color Histogram",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2005/01521566/12OmNvsm6zi",
"parentPublication": {
"id": "proceedings/icme/2005/9331/0",
"title": "2005 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892227",
"title": "Guided head rotation and amplified head rotation: Evaluating semi-natural travel and viewing techniques in virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892227/12OmNwseEYz",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549373",
"title": "Integrating head and full-body tracking for embodiment in virtual characters",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549373/12OmNx0RIVC",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2000/0595/0/05950192",
"title": "Head Segmentation and Head Orientation in 3D Space for Pose Estimation of Multiple People",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2000/05950192/12OmNyv7m7n",
"parentPublication": {
"id": "proceedings/ssiai/2000/0595/0",
"title": "Image Analysis and Interpretation, IEEE Southwest Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a043",
"title": "Head Rotation Model for Virtual Reality System Level Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a043/1A3j4PzAcHS",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09736631",
"title": "On Rotation Gains Within and Beyond Perceptual Limitations for Seated VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09736631/1BN1UtLinTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2021/0679/0/067900a081",
"title": "Reinspecting Classification and Regression in the Sibling Head for Visual Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2021/067900a081/1CATqxreKd2",
"parentPublication": {
"id": "proceedings/itme/2021/0679/0",
"title": "2021 11th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CAThh2drSE",
"title": "2021 11th International Conference on Information Technology in Medicine and Education (ITME)",
"acronym": "itme",
"groupId": "1002567",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1CATqxreKd2",
"doi": "10.1109/ITME53901.2021.00027",
"title": "Reinspecting Classification and Regression in the Sibling Head for Visual Tracking",
"normalizedTitle": "Reinspecting Classification and Regression in the Sibling Head for Visual Tracking",
"abstract": "The sibling head has been widely used in Siamese-based trackers, however, the structure of the sibling head is the same between classification and regression tasks, which limits the ability of the tracker to obtain more robust and accurate prediction. To solve this issue, we reinspect the network structure of tracking-head for classification and regression tasks, since recognizing the target category needs translation invariant feature while the position-sensitive information facilitates target bounding-box regression task. Further, we propose a differenti-ated tracking-head network, named SiamDTH, by exploiting the feature response module (FRM) and the differentiated sibling head (DSH) to alleviate misalignments between classification and regression task domains. Extensive experiments on visual tracking benchmarks including VOT2019 and OTB100 demon-strate that SiamDTH achieves state-of-the-art performance with a considerable real-time speed. Our source code is available at: https://github.com/x10312/SiamDTH.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The sibling head has been widely used in Siamese-based trackers, however, the structure of the sibling head is the same between classification and regression tasks, which limits the ability of the tracker to obtain more robust and accurate prediction. To solve this issue, we reinspect the network structure of tracking-head for classification and regression tasks, since recognizing the target category needs translation invariant feature while the position-sensitive information facilitates target bounding-box regression task. Further, we propose a differenti-ated tracking-head network, named SiamDTH, by exploiting the feature response module (FRM) and the differentiated sibling head (DSH) to alleviate misalignments between classification and regression task domains. Extensive experiments on visual tracking benchmarks including VOT2019 and OTB100 demon-strate that SiamDTH achieves state-of-the-art performance with a considerable real-time speed. Our source code is available at: https://github.com/x10312/SiamDTH.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The sibling head has been widely used in Siamese-based trackers, however, the structure of the sibling head is the same between classification and regression tasks, which limits the ability of the tracker to obtain more robust and accurate prediction. To solve this issue, we reinspect the network structure of tracking-head for classification and regression tasks, since recognizing the target category needs translation invariant feature while the position-sensitive information facilitates target bounding-box regression task. Further, we propose a differenti-ated tracking-head network, named SiamDTH, by exploiting the feature response module (FRM) and the differentiated sibling head (DSH) to alleviate misalignments between classification and regression task domains. Extensive experiments on visual tracking benchmarks including VOT2019 and OTB100 demon-strate that SiamDTH achieves state-of-the-art performance with a considerable real-time speed. Our source code is available at: https://github.com/x10312/SiamDTH.",
"fno": "067900a081",
"keywords": [
"Feature Extraction",
"Image Classification",
"Image Representation",
"Object Detection",
"Object Tracking",
"Regression Analysis",
"Siamese Based Trackers",
"Robust Prediction",
"Network Structure",
"Position Sensitive Information Facilitates Target Bounding Box Regression Task",
"Tracking Head Network",
"Differentiated Sibling Head",
"Regression Task Domains",
"Visual Tracking Benchmarks",
"DSH",
"VOT 2019",
"OTB 100",
"Siam DTH",
"Translation Invariant Feature",
"Visualization",
"Target Tracking",
"Head",
"Codes",
"Target Recognition",
"Education",
"Benchmark Testing",
"Siamese Network",
"Feature Aggregation",
"Task Do Main",
"Sibling Head"
],
"authors": [
{
"affiliation": "Qingdao University,School of Computer Science and Technology,Qingdao,China",
"fullName": "Luming Li",
"givenName": "Luming",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qingdao University,School of Computer Science and Technology,Qingdao,China",
"fullName": "Xiaowei Zhang",
"givenName": "Xiaowei",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Qingdao School for the Blind,Qingdao,China",
"fullName": "Xiaohong Sun",
"givenName": "Xiaohong",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong Normal University,School of Information Science and Engineering,Jinan,China",
"fullName": "Hong Liu",
"givenName": "Hong",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "itme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "81-85",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-0679-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "067900a076",
"articleId": "1CATij0M5wc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "067900a086",
"articleId": "1CATlDUEjLO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1999/0149/1/01491604",
"title": "Fast, Reliable Head Tracking under Varying Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1999/01491604/12OmNAlNiLz",
"parentPublication": {
"id": "proceedings/cvpr/1999/0149/2",
"title": "Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460028",
"title": "The benefits of rotational head tracking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460028/12OmNqC2uYu",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920235",
"title": "Unique Shared-Aperture Display with Head or Target Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920235/12OmNx7G622",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2002/1602/0/16020255",
"title": "Model-Based Head Pose Tracking With Stereovision",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2002/16020255/12OmNz5JBXs",
"parentPublication": {
"id": "proceedings/fg/2002/1602/0",
"title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2014/5711/0/07043851",
"title": "A people counting method based on head detection and tracking",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2014/07043851/12OmNzcPAhe",
"parentPublication": {
"id": "proceedings/smartcomp/2014/5711/0",
"title": "2014 International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/10/07346504",
"title": "Social Grouping for Multi-Target Tracking and Head Pose Estimation in Video",
"doi": null,
"abstractUrl": "/journal/tp/2016/10/07346504/13rRUxly9fd",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546089",
"title": "A Benchmark for Full Rotation Head Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546089/17D45VUZMZM",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h181",
"title": "Probabilistic Regression for Visual Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h181/1m3nIYYhH4A",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaa/2021/3730/0/373000a165",
"title": "Two Stages for Visual Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icaa/2021/373000a165/1zL1IK338RO",
"parentPublication": {
"id": "proceedings/icaa/2021/3730/0",
"title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBUAvV0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"acronym": "icdma",
"groupId": "1800272",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBEpnAQ",
"doi": "10.1109/ICDMA.2013.52",
"title": "Analysis of Terrain Roughness Based on Statistics",
"normalizedTitle": "Analysis of Terrain Roughness Based on Statistics",
"abstract": "A method based on statistics was proposed. A distance laser sensor equipment was used to obtain digital surfaces of three kinds of rough terrains, which were prepared with three particle sizes of quartz. The results of analyzing the discrete data of digital surfaces indicated that there was a good relationships between average root mean-squared height, average terrain energy and average particle size dav, which meant that terrain energy can be as a standard to evaluate the roughness of a terrain. Double logarithmic auto power spectrum density curves of rough terrains were drawn and the linear equations of double logarithmic auto power spectrum density of curves were given. The slopes of fitting linear equations lg(b) had a increasing relation with the roughness of terrain, which indicated that the positions of fitting straight lines of double logarithmic auto power spectrum density curve can also evaluated the roughness of a terrain.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A method based on statistics was proposed. A distance laser sensor equipment was used to obtain digital surfaces of three kinds of rough terrains, which were prepared with three particle sizes of quartz. The results of analyzing the discrete data of digital surfaces indicated that there was a good relationships between average root mean-squared height, average terrain energy and average particle size dav, which meant that terrain energy can be as a standard to evaluate the roughness of a terrain. Double logarithmic auto power spectrum density curves of rough terrains were drawn and the linear equations of double logarithmic auto power spectrum density of curves were given. The slopes of fitting linear equations lg(b) had a increasing relation with the roughness of terrain, which indicated that the positions of fitting straight lines of double logarithmic auto power spectrum density curve can also evaluated the roughness of a terrain.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A method based on statistics was proposed. A distance laser sensor equipment was used to obtain digital surfaces of three kinds of rough terrains, which were prepared with three particle sizes of quartz. The results of analyzing the discrete data of digital surfaces indicated that there was a good relationships between average root mean-squared height, average terrain energy and average particle size dav, which meant that terrain energy can be as a standard to evaluate the roughness of a terrain. Double logarithmic auto power spectrum density curves of rough terrains were drawn and the linear equations of double logarithmic auto power spectrum density of curves were given. The slopes of fitting linear equations lg(b) had a increasing relation with the roughness of terrain, which indicated that the positions of fitting straight lines of double logarithmic auto power spectrum density curve can also evaluated the roughness of a terrain.",
"fno": "5016a224",
"keywords": [
"Rough Surfaces",
"Surface Roughness",
"Fitting",
"Soil Measurements",
"Surface Treatment",
"Lasers",
"Measurement By Laser Beam",
"Statistics",
"Transportation Engineering",
"Terrain Roughness",
"Distance Laser Sensor"
],
"authors": [
{
"affiliation": null,
"fullName": "Dong-zhang Xiao",
"givenName": "Dong-zhang",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qiao-Li Jian",
"givenName": "Qiao-Li",
"surname": "Jian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Meng-zou",
"givenName": null,
"surname": "Meng-zou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "224-226",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5016-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5016a219",
"articleId": "12OmNyUFfTf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5016a227",
"articleId": "12OmNy2Jt1a",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wmso/2008/3484/0/3484a326",
"title": "Study on the Surface Roughness and Surface Shape Simulation Based on STEP-NC Turning",
"doi": null,
"abstractUrl": "/proceedings-article/wmso/2008/3484a326/12OmNB9t6vx",
"parentPublication": {
"id": "proceedings/wmso/2008/3484/0",
"title": "Modelling, Simulation and Optimization, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/2/4151b298",
"title": "Surface Roughness Measure Based on Average Texture Cycle",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151b298/12OmNxE2mME",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/2",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a534",
"title": "Lunar Terrain Auto Identification Based on DEM Topographic Factor and Texture Feature Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a534/12OmNxGAL2w",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00340963",
"title": "Fractal surface reconstruction for modeling natural terrain",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00340963/12OmNy3RRJb",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2017/3981/0/3981a138",
"title": "Research on Fault Diagnosis Technology of CNC Machine Tool Based on Machining Surface Roughness",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2017/3981a138/12OmNyen1vo",
"parentPublication": {
"id": "proceedings/iccnea/2017/3981/0",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a218",
"title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/1/3736a521",
"title": "Intelligent Modeling and Predicting Surface Roughness in End Milling",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736a521/12OmNzZEArR",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/4",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/02/tth2011020122",
"title": "Roughness Perception in Virtual Textures",
"doi": null,
"abstractUrl": "/journal/th/2011/02/tth2011020122/13rRUxYINfp",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2019/4689/0/468900b062",
"title": "Study on Terrain Heterogeneity and Its Influence in "Sanjiangyuan" Area",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2019/468900b062/1h0FjJbnqNy",
"parentPublication": {
"id": "proceedings/icmcce/2019/4689/0",
"title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2020/7081/0/708100a012",
"title": "Analysis of Static Characteristics of Hydrodynamic Bearing with Different Surface Roughness",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2020/708100a012/1iERIawDD5C",
"parentPublication": {
"id": "proceedings/icmtma/2020/7081/0",
"title": "2020 12th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwp74rq",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCdBDR7",
"doi": "10.1109/CVPR.1993.341164",
"title": "Roughness and shape of specular lobe surfaces using photometric sampling method",
"normalizedTitle": "Roughness and shape of specular lobe surfaces using photometric sampling method",
"abstract": "An algorithm is proposed to determine surface orientation and roughness for specular lobe dominant surfaces under photometric sampling. From the image sequence, surface reflectance and orientation are obtained by determining the parameters of the reflectance model. The validity of this approach is demonstrated by applying it to real specular lobe dominant surfaces and examining orientation errors.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "An algorithm is proposed to determine surface orientation and roughness for specular lobe dominant surfaces under photometric sampling. From the image sequence, surface reflectance and orientation are obtained by determining the parameters of the reflectance model. The validity of this approach is demonstrated by applying it to real specular lobe dominant surfaces and examining orientation errors.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An algorithm is proposed to determine surface orientation and roughness for specular lobe dominant surfaces under photometric sampling. From the image sequence, surface reflectance and orientation are obtained by determining the parameters of the reflectance model. The validity of this approach is demonstrated by applying it to real specular lobe dominant surfaces and examining orientation errors.",
"fno": "00341164",
"keywords": [
"Image Sequences",
"Reflectivity",
"Surface Topography",
"Photometry",
"Shape",
"Specular Lobe Surfaces",
"Photometric Sampling",
"Surface Orientation",
"Roughness",
"Image Sequence",
"Surface Reflectance",
"Orientation Errors",
"Shape",
"Rough Surfaces",
"Surface Roughness",
"Photometry",
"Sampling Methods",
"Light Sources",
"Equations",
"Brightness",
"Reflectivity",
"Face Detection"
],
"authors": [
{
"affiliation": "Sch. of Comput. Sci., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "T. Kiuchi",
"givenName": "T.",
"surname": "Kiuchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Comput. Sci., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "K. Ikeuchi",
"givenName": "K.",
"surname": "Ikeuchi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "765-766",
"year": "1993",
"issn": "1063-6919",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00341163",
"articleId": "12OmNzwpU3S",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00341165",
"articleId": "12OmNx6PiFK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1989/1938/0/00099963",
"title": "Shape and reflectance from an image sequence generated using extended sources",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1989/00099963/12OmNAq3hRr",
"parentPublication": {
"id": "proceedings/robot/1989/1938/0",
"title": "1989 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1990/2057/0/00139483",
"title": "Determining reflectance parameters using range and brightness images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139483/12OmNrMZpwA",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206498",
"title": "A unified model of specular and diffuse reflectance for rough, glossy surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206498/12OmNvAiShy",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413545",
"title": "Shape from shading for non-Lambertian surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413545/12OmNvsDHHP",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223149",
"title": "Extracting the shape and roughness of specular lobe objects using four light photometric stereo",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223149/12OmNwoxSc1",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acv/1992/2840/0/00240318",
"title": "Shape recovery methods for visual inspection",
"doi": null,
"abstractUrl": "/proceedings-article/acv/1992/00240318/12OmNxRF6VK",
"parentPublication": {
"id": "proceedings/acv/1992/2840/0",
"title": "Proceedings IEEE Workshop on Applications of Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220132",
"title": "Inspecting specular lobe objects using four light sources",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220132/12OmNzd7bWl",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937522",
"title": "New perspectives on geometric reflection theory from rough surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937522/12OmNzmclV4",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341163",
"title": "Diffuse reflectance from rough surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341163/12OmNzwpU3S",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1991/07/i0611",
"title": "Surface Reflection: Physical and Geometrical Perspectives",
"doi": null,
"abstractUrl": "/journal/tp/1991/07/i0611/13rRUxDqS9g",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzvQI2D",
"title": "2015 International Conference on Computer Application Technologies (CCATS)",
"acronym": "ccats",
"groupId": "1809704",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvT2pgA",
"doi": "10.1109/CCATS.2015.30",
"title": "Surface Roughness Measurement Application Using Multi-frame Techniques",
"normalizedTitle": "Surface Roughness Measurement Application Using Multi-frame Techniques",
"abstract": "This paper presents a computer application of the surface roughness measurement of highly smooth surfaces. The Phase Shifting Interferometry technique which is a non-contact technique is applied for the roughness measurement. Our optic-based measurement system utilizes a 0.5 mW He-Ne laser source with the wavelength of 632.8 nm. Fringes from the measurement system were recorded using a high precision camera and were analyzed by our programs to produce the surface roughness measurement. This technique is a simple technique which gives accurate results. It is a four-frame algorithm giving similar results to the more complex five-frame one with less processing time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a computer application of the surface roughness measurement of highly smooth surfaces. The Phase Shifting Interferometry technique which is a non-contact technique is applied for the roughness measurement. Our optic-based measurement system utilizes a 0.5 mW He-Ne laser source with the wavelength of 632.8 nm. Fringes from the measurement system were recorded using a high precision camera and were analyzed by our programs to produce the surface roughness measurement. This technique is a simple technique which gives accurate results. It is a four-frame algorithm giving similar results to the more complex five-frame one with less processing time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a computer application of the surface roughness measurement of highly smooth surfaces. The Phase Shifting Interferometry technique which is a non-contact technique is applied for the roughness measurement. Our optic-based measurement system utilizes a 0.5 mW He-Ne laser source with the wavelength of 632.8 nm. Fringes from the measurement system were recorded using a high precision camera and were analyzed by our programs to produce the surface roughness measurement. This technique is a simple technique which gives accurate results. It is a four-frame algorithm giving similar results to the more complex five-frame one with less processing time.",
"fno": "8211a086",
"keywords": [
"Optical Surface Waves",
"Rough Surfaces",
"Surface Roughness",
"Mirrors",
"Surface Waves",
"Surface Treatment",
"Phase Shifting Interferometry",
"Surface Roughness Measurement",
"Phase Shifting Interferometry",
"Image Processing",
"Four Frame Algorithm"
],
"authors": [
{
"affiliation": null,
"fullName": "Tanaporn Leelawattananon",
"givenName": "Tanaporn",
"surname": "Leelawattananon",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Warawoot Thowladda",
"givenName": "Warawoot",
"surname": "Thowladda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Suphamit Chittayasothorn",
"givenName": "Suphamit",
"surname": "Chittayasothorn",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ccats",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-08-01T00:00:00",
"pubType": "proceedings",
"pages": "86-91",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8211-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8211a080",
"articleId": "12OmNqJq4CY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8211a092",
"articleId": "12OmNwFRpaW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icoip/2010/4252/1/4252a623",
"title": "Influence of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a623/12OmNAoUTj0",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmso/2008/3484/0/3484a326",
"title": "Study on the Surface Roughness and Surface Shape Simulation Based on STEP-NC Turning",
"doi": null,
"abstractUrl": "/proceedings-article/wmso/2008/3484a326/12OmNB9t6vx",
"parentPublication": {
"id": "proceedings/wmso/2008/3484/0",
"title": "Modelling, Simulation and Optimization, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a824",
"title": "The In-process and Real-Time Roughness Measuring System Design for Free-Form Surface",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a824/12OmNxETanT",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/1/3583a210",
"title": "Development of Non-contact Surface Roughness Measurement in Last Decades",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583a210/12OmNy2rS2r",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/3",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2017/3981/0/3981a138",
"title": "Research on Fault Diagnosis Technology of CNC Machine Tool Based on Machining Surface Roughness",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2017/3981a138/12OmNyen1vo",
"parentPublication": {
"id": "proceedings/iccnea/2017/3981/0",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a218",
"title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2009/3781/0/05090842",
"title": "New simulation methodology of 3D surface roughness loss for interconnects modeling",
"doi": null,
"abstractUrl": "/proceedings-article/date/2009/05090842/12OmNzC5T1P",
"parentPublication": {
"id": "proceedings/date/2009/3781/0",
"title": "2009 Design, Automation & Test in Europe Conference & Exhibition (DATE'09)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/1/3736a521",
"title": "Intelligent Modeling and Predicting Surface Roughness in End Milling",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736a521/12OmNzZEArR",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/4",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122232",
"title": "Effect of Surface Roughness on Thermal Contact Resistance of Fixed Interface in Thermal Measurement of Electron Device",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122232/1kRSyHsM0HS",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400c041",
"title": "A new Surface roughness measurement method based on image mosaic of template matching algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400c041/1tzyQuwoo4E",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNApcuag",
"title": "IEEE Haptics Symposium 2008",
"acronym": "haptics",
"groupId": "1000312",
"volume": "0",
"displayVolume": "0",
"year": "2008",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy87QB3",
"doi": "10.1109/HAPTICS.2008.4479977",
"title": "A plate tuning fork shaped tactile display using elastic waves",
"normalizedTitle": "A plate tuning fork shaped tactile display using elastic waves",
"abstract": "Through the present study, the tactile display for representing surface roughness by using a Langevin-type piezoelectric ultrasonic transducer is developed. We draw reference from the tuning fork and the ancient Chinese Chime-bell in order to enhance the vibration amplitude and make the modal figures of the device adequately complex. A U-shape aluminum plate is utilized as the touch interface on which several rib-type grooves are processed. A bolt clamped Langevin-type piezoelectric ultrasonic transducer is jointed to the bottom of the fork plate. Two sets of piezoelectric elements are adopted to excite the different modes respectively. The FEM analysis shows that a large number of vibration modes with near frequency are produced on the U-shape plate. Then a set of preliminary experiments on sensory evaluation is carried out to confirm that the output stimulations with fingertip's movement are effective to realize an artificial roughness tactile sensation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Through the present study, the tactile display for representing surface roughness by using a Langevin-type piezoelectric ultrasonic transducer is developed. We draw reference from the tuning fork and the ancient Chinese Chime-bell in order to enhance the vibration amplitude and make the modal figures of the device adequately complex. A U-shape aluminum plate is utilized as the touch interface on which several rib-type grooves are processed. A bolt clamped Langevin-type piezoelectric ultrasonic transducer is jointed to the bottom of the fork plate. Two sets of piezoelectric elements are adopted to excite the different modes respectively. The FEM analysis shows that a large number of vibration modes with near frequency are produced on the U-shape plate. Then a set of preliminary experiments on sensory evaluation is carried out to confirm that the output stimulations with fingertip's movement are effective to realize an artificial roughness tactile sensation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Through the present study, the tactile display for representing surface roughness by using a Langevin-type piezoelectric ultrasonic transducer is developed. We draw reference from the tuning fork and the ancient Chinese Chime-bell in order to enhance the vibration amplitude and make the modal figures of the device adequately complex. A U-shape aluminum plate is utilized as the touch interface on which several rib-type grooves are processed. A bolt clamped Langevin-type piezoelectric ultrasonic transducer is jointed to the bottom of the fork plate. Two sets of piezoelectric elements are adopted to excite the different modes respectively. The FEM analysis shows that a large number of vibration modes with near frequency are produced on the U-shape plate. Then a set of preliminary experiments on sensory evaluation is carried out to confirm that the output stimulations with fingertip's movement are effective to realize an artificial roughness tactile sensation.",
"fno": "04479977",
"keywords": [
"Haptic Interfaces",
"Piezoelectric Transducers",
"Ultrasonic Transducers",
"Vibrations",
"Virtual Reality",
"Plate Tuning Fork Shaped Tactile Display",
"Elastic Waves",
"Surface Roughness",
"Ancient Chinese Chime Bell",
"Vibration Amplitude",
"Modal Figure",
"U Shape Aluminum Plate",
"Touch Interface",
"Rib Type Grooves",
"Bolt Clamped Langevin Type Piezoelectric Ultrasonic Transducer",
"Piezoelectric Elements",
"FEM Analysis",
"Vibration Modes",
"Sensory Evaluation",
"Fingertip Movement",
"Artificial Roughness Tactile Sensation",
"Vibrations",
"Displays",
"Ultrasonic Transducers",
"Piezoelectric Transducers",
"Frequency",
"Rough Surfaces",
"Surface Roughness",
"Aluminum",
"Fasteners",
"Virtual Reality",
"Tactile Display",
"Elastic Waves",
"Ultrasonic Vibration",
"Texture Presentation",
"Langevin Type Piezoelectric Transducer",
"H 5 2 Information Interfaces And Presentation User Interfaces Haptic I O",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "School of Mechanical & Electronic Engineering and Automation, Shanghai University, Shanghai, China, 149 Yan Chang Road, Shanghai 200072, China, Email: eastward@sh163.net",
"fullName": "Chaodong Li",
"givenName": "Chaodong",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Mechanical & Electronic Engineering and Automation, Shanghai University, Shanghai, China, 149 Yan Chang Road, Shanghai 200072, China",
"fullName": "Hua Yao",
"givenName": "Hua",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Mechanical & Electronic Engineering and Automation, Shanghai University, Shanghai, China, 149 Yan Chang Road, Shanghai 200072, China",
"fullName": "Jingjing Xu",
"givenName": "Jingjing",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Mechanical & Electronic Engineering and Automation, Shanghai University, Shanghai, China, 149 Yan Chang Road, Shanghai 200072, China",
"fullName": "Yanyan Zhang",
"givenName": "Yanyan",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Mechanical & Electronic Engineering and Automation, Shanghai University, Shanghai, China, 149 Yan Chang Road, Shanghai 200072, China",
"fullName": "Boqian Kuang",
"givenName": "Boqian",
"surname": "Kuang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "haptics",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2008-03-01T00:00:00",
"pubType": "proceedings",
"pages": "375-376",
"year": "2008",
"issn": "2324-7347",
"isbn": "978-1-4244-2005-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04479960",
"articleId": "12OmNyv7md7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04479962",
"articleId": "12OmNySXEVF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2009/3583/2/3583b045",
"title": "Design and Fabrication of a Novel PZT Films Based Piezoelectric Micromachined Ultrasonic Transducers",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583b045/12OmNBSBkil",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/2",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccce/2014/7635/0/7635a036",
"title": "Parameter Optimization for Piezoelectric Micro-energy Harvesting System",
"doi": null,
"abstractUrl": "/proceedings-article/iccce/2014/7635a036/12OmNBZHihh",
"parentPublication": {
"id": "proceedings/iccce/2014/7635/0",
"title": "2014 International Conference on Computer & Communication Engineering (ICCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572549",
"title": "Generation of usable electric power from available random sound energy",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572549/12OmNCwUmw7",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/3/4296f004",
"title": "Theoretical Design and Experiment of a Plate Type Multi-degree-of-freedom Piezoelectric Motor",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296f004/12OmNvjyxLg",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/3",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmpeur/1989/1940/0/00093367",
"title": "Nonimpact printers as low end hardcopy printout devices",
"doi": null,
"abstractUrl": "/proceedings-article/cmpeur/1989/00093367/12OmNwDACAK",
"parentPublication": {
"id": "proceedings/cmpeur/1989/1940/0",
"title": "COMPEURO 89 Proceedings VLSI and Computer Peripherals.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccce/2016/2427/0/2427a204",
"title": "Design and Analysis of a Buck-Boost Converter Circuit for Piezoelectric Energy Harvesting System",
"doi": null,
"abstractUrl": "/proceedings-article/iccce/2016/2427a204/12OmNy4r3SN",
"parentPublication": {
"id": "proceedings/iccce/2016/2427/0",
"title": "2016 International Conference on Computer and Communication Engineering (ICCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a062",
"title": "Active Vibration Control of Thin Plate Milling Using Piezoelectric Actuator",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a062/1ANLDTny8la",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122061",
"title": "Design and experimental study of concave longitudinal vibration piezoelectric ultrasonic transducer",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122061/1kRSKgsFMhG",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2020/4109/0/410900a802",
"title": "Design and simulation analysis of ultrasonic extrusion transducer",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2020/410900a802/1t2mHuS7a6Y",
"parentPublication": {
"id": "proceedings/wcmeim/2020/4109/0",
"title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAndiq8",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"acronym": "iccnea",
"groupId": "1823164",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyen1vo",
"doi": "10.1109/ICCNEA.2017.66",
"title": "Research on Fault Diagnosis Technology of CNC Machine Tool Based on Machining Surface Roughness",
"normalizedTitle": "Research on Fault Diagnosis Technology of CNC Machine Tool Based on Machining Surface Roughness",
"abstract": "This paper studied the relationship between the spindle fault and the roughness characteristics, by surface roughness of machining. Spindle common fault is divided into the spindle system is not balanced, the spindle system is not right, the spindle system has a transverse crack and the spindle system rolling bearing failure. The characteristic amount of the machining surface is extracted by CCD laser speckle surface roughness measurement technique. Machine fault information and rough surface relationship were established through the adaptive network-based fuzzy inference system (ANFIS), to achieve the machine tool spindle fault diagnosis. The results indicate that the roughness characteristic can accurately diagnose the machine tool spindle fault and can be an effective method to study the spindle fault of the machine tool.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper studied the relationship between the spindle fault and the roughness characteristics, by surface roughness of machining. Spindle common fault is divided into the spindle system is not balanced, the spindle system is not right, the spindle system has a transverse crack and the spindle system rolling bearing failure. The characteristic amount of the machining surface is extracted by CCD laser speckle surface roughness measurement technique. Machine fault information and rough surface relationship were established through the adaptive network-based fuzzy inference system (ANFIS), to achieve the machine tool spindle fault diagnosis. The results indicate that the roughness characteristic can accurately diagnose the machine tool spindle fault and can be an effective method to study the spindle fault of the machine tool.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper studied the relationship between the spindle fault and the roughness characteristics, by surface roughness of machining. Spindle common fault is divided into the spindle system is not balanced, the spindle system is not right, the spindle system has a transverse crack and the spindle system rolling bearing failure. The characteristic amount of the machining surface is extracted by CCD laser speckle surface roughness measurement technique. Machine fault information and rough surface relationship were established through the adaptive network-based fuzzy inference system (ANFIS), to achieve the machine tool spindle fault diagnosis. The results indicate that the roughness characteristic can accurately diagnose the machine tool spindle fault and can be an effective method to study the spindle fault of the machine tool.",
"fno": "3981a138",
"keywords": [
"Computerised Numerical Control",
"Cracks",
"Fault Diagnosis",
"Fuzzy Reasoning",
"Machine Tool Spindles",
"Machine Tools",
"Machining",
"Mechanical Engineering Computing",
"Rolling Bearings",
"Speckle",
"Surface Roughness",
"Surface Topography Measurement",
"Vibrations",
"Fault Diagnosis Technology",
"CNC Machine Tool",
"Roughness Characteristic",
"Spindle Common Fault",
"Spindle System Rolling Bearing Failure",
"CCD Laser Speckle Surface Roughness Measurement Technique",
"Machine Fault Information",
"Rough Surface Relationship",
"Machine Tool Spindle Fault Diagnosis",
"Adaptive Network Based Fuzzy Inference System",
"Machining Surface Roughness",
"Transverse Crack",
"ANFIS",
"Rough Surfaces",
"Surface Roughness",
"Surface Topography",
"Surface Texture",
"Surface Treatment",
"Vibrations",
"Fault Diagnosis",
"The Spindle Fault",
"Roughness Characteristics",
"CCD",
"ANFIS",
"Machining"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhou Guang-Wen",
"givenName": "Zhou",
"surname": "Guang-Wen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mao Chun-Yu",
"givenName": "Mao",
"surname": "Chun-Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tian Mei",
"givenName": "Tian",
"surname": "Mei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sun Yan-Hong",
"givenName": "Sun",
"surname": "Yan-Hong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccnea",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-09-01T00:00:00",
"pubType": "proceedings",
"pages": "138-142",
"year": "2017",
"issn": null,
"isbn": "978-1-5386-3981-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3981a133",
"articleId": "12OmNyuPL5C",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3981a143",
"articleId": "12OmNyGbIjP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscsic/2017/2941/0/2941a172",
"title": "Influences of Cutting Speed on Surface Roughness during Machining of Chromium Molybdenum Steel with Ceramic Insert Cutting Tool",
"doi": null,
"abstractUrl": "/proceedings-article/iscsic/2017/2941a172/12OmNBhpS7e",
"parentPublication": {
"id": "proceedings/iscsic/2017/2941/0",
"title": "2017 International Symposium on Computer Science and Intelligent Controls (ISCSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cerma/2008/3320/0/3320a033",
"title": "Development and Application of an Intelligent System to Predict and Optimize the Surface Roughness of 1018 and 4140 Steel",
"doi": null,
"abstractUrl": "/proceedings-article/cerma/2008/3320a033/12OmNBpVQ1A",
"parentPublication": {
"id": "proceedings/cerma/2008/3320/0",
"title": "Electronics, Robotics and Automotive Mechanics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2009/3648/0/3648a035",
"title": "Review of ANN Technique for Modeling Surface Roughness Performance Measure in Machining Process",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2009/3648a035/12OmNx0RIPU",
"parentPublication": {
"id": "proceedings/ams/2009/3648/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a218",
"title": "Analysis of the Curved Surface on Surface Roughness Measurement Using Dichromatic Speckle Pattens",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a218/12OmNz2kqrR",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acsat/2015/0423/0/07478715",
"title": "Analyzing and Modeling the Influence of Workpiece Thickness on Geometry of Slot Machining Wire EDMs",
"doi": null,
"abstractUrl": "/proceedings-article/acsat/2015/07478715/12OmNzAohQu",
"parentPublication": {
"id": "proceedings/acsat/2015/0423/0",
"title": "2015 4th International Conference on Advanced Computer Science Applications and Technologies (ACSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/1/3736a521",
"title": "Intelligent Modeling and Predicting Surface Roughness in End Milling",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736a521/12OmNzZEArR",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/4",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000a791",
"title": "Influence of Random Road Surface Roughness to Servo System",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000a791/17D45Xq6dAW",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2019/1307/0/130700a122",
"title": "Process Analysis and Parameter Optimization of Five Axis NC Machine for Machining Complex Curved Surface Impellers",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2019/130700a122/18Av0D6MDwA",
"parentPublication": {
"id": "proceedings/icitbs/2019/1307/0",
"title": "2019 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2020/4109/0/410900a283",
"title": "Ultrasonic-assisted micro-hole machining research and parameter optimization experiment",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2020/410900a283/1t2mAcb2hDG",
"parentPublication": {
"id": "proceedings/wcmeim/2020/4109/0",
"title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2020/4109/0/410900a161",
"title": "Research on Predicting Machining Surface Roughness Based on Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2020/410900a161/1t2mLsKZMVW",
"parentPublication": {
"id": "proceedings/wcmeim/2020/4109/0",
"title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBUAvV0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"acronym": "icdma",
"groupId": "1800272",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzw8j84",
"doi": "10.1109/ICDMA.2013.135",
"title": "Experimental Research on Application of Nano-Concrete to Reduce Roughness Coefficient",
"normalizedTitle": "Experimental Research on Application of Nano-Concrete to Reduce Roughness Coefficient",
"abstract": "In order to research on the application of nano-concrete to reduce roughness coefficient, and considering that roughness coefficient and calculating it are fundamental issues in hydraulic engineering. Also, analyzing the correct evaluation of flow rate and interaction with other hydraulic parameters such as velocity, shape and type of section, and flow pattern, is one of the most important problems in fluid mechanics, and considering that the possibility of minimizing roughness on designing of hydraulic structures and irrigation networks in order to increase velocity and then the flow rate at different sections, are subjects which have been noted by researches and industrial entrepreneurs since years ago. the author of the article had done a series of experiments, the effects of using silicate nano-particles in the floor coating of the channels have been studied in a hydraulic laboratory flume. Through this experimental research, we could had known that adding Nano-Silicate to the concrete mixture will cause the active SiO2 to mix with the free calcium hydroxide available in the micro holes of the concrete and produce unsolved calcium silicate, and eventually cause the structure of the cement to become more dense and become less penetrable causing the concrete to be more resistant. By using this product, we can produce smooth and homogenous surfaces in the upper surface that can increase the flow rate and velocity of fluid in channels, clarifiers and crests of dams.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In order to research on the application of nano-concrete to reduce roughness coefficient, and considering that roughness coefficient and calculating it are fundamental issues in hydraulic engineering. Also, analyzing the correct evaluation of flow rate and interaction with other hydraulic parameters such as velocity, shape and type of section, and flow pattern, is one of the most important problems in fluid mechanics, and considering that the possibility of minimizing roughness on designing of hydraulic structures and irrigation networks in order to increase velocity and then the flow rate at different sections, are subjects which have been noted by researches and industrial entrepreneurs since years ago. the author of the article had done a series of experiments, the effects of using silicate nano-particles in the floor coating of the channels have been studied in a hydraulic laboratory flume. Through this experimental research, we could had known that adding Nano-Silicate to the concrete mixture will cause the active SiO2 to mix with the free calcium hydroxide available in the micro holes of the concrete and produce unsolved calcium silicate, and eventually cause the structure of the cement to become more dense and become less penetrable causing the concrete to be more resistant. By using this product, we can produce smooth and homogenous surfaces in the upper surface that can increase the flow rate and velocity of fluid in channels, clarifiers and crests of dams.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In order to research on the application of nano-concrete to reduce roughness coefficient, and considering that roughness coefficient and calculating it are fundamental issues in hydraulic engineering. Also, analyzing the correct evaluation of flow rate and interaction with other hydraulic parameters such as velocity, shape and type of section, and flow pattern, is one of the most important problems in fluid mechanics, and considering that the possibility of minimizing roughness on designing of hydraulic structures and irrigation networks in order to increase velocity and then the flow rate at different sections, are subjects which have been noted by researches and industrial entrepreneurs since years ago. the author of the article had done a series of experiments, the effects of using silicate nano-particles in the floor coating of the channels have been studied in a hydraulic laboratory flume. Through this experimental research, we could had known that adding Nano-Silicate to the concrete mixture will cause the active SiO2 to mix with the free calcium hydroxide available in the micro holes of the concrete and produce unsolved calcium silicate, and eventually cause the structure of the cement to become more dense and become less penetrable causing the concrete to be more resistant. By using this product, we can produce smooth and homogenous surfaces in the upper surface that can increase the flow rate and velocity of fluid in channels, clarifiers and crests of dams.",
"fno": "5016a572",
"keywords": [
"Concrete",
"Coatings",
"Floors",
"Rough Surfaces",
"Surface Roughness",
"Laboratories",
"Rroughness Coefficient",
"Experimental Research",
"Nano Concrete"
],
"authors": [
{
"affiliation": null,
"fullName": "Deng Shaoyun",
"givenName": "Deng",
"surname": "Shaoyun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-06-01T00:00:00",
"pubType": "proceedings",
"pages": "572-574",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5016-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5016a568",
"articleId": "12OmNzVGcNf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5016a575",
"articleId": "12OmNvSKNMw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2013/5016/0/5016a224",
"title": "Analysis of Terrain Roughness Based on Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016a224/12OmNBEpnAQ",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccats/2015/8211/0/8211a086",
"title": "Surface Roughness Measurement Application Using Multi-frame Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/ccats/2015/8211a086/12OmNvT2pgA",
"parentPublication": {
"id": "proceedings/ccats/2015/8211/0",
"title": "2015 International Conference on Computer Application Technologies (CCATS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479905",
"title": "The Geometric Model for Perceived Roughness Applies to Virtual Textures",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479905/12OmNwt5sjl",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2017/3981/0/3981a138",
"title": "Research on Fault Diagnosis Technology of CNC Machine Tool Based on Machining Surface Roughness",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2017/3981a138/12OmNyen1vo",
"parentPublication": {
"id": "proceedings/iccnea/2017/3981/0",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/02/tth2011020122",
"title": "Roughness Perception in Virtual Textures",
"doi": null,
"abstractUrl": "/journal/th/2011/02/tth2011020122/13rRUxYINfp",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/02/07060666",
"title": "Surface-Roughness-Based Virtual Textiles: Evaluation Using a Multi-Contactor Display",
"doi": null,
"abstractUrl": "/journal/th/2015/02/07060666/13rRUxly95L",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2018/5500/0/550000a791",
"title": "Influence of Random Road Surface Roughness to Servo System",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2018/550000a791/17D45Xq6dAW",
"parentPublication": {
"id": "proceedings/icisce/2018/5500/0",
"title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642446",
"title": "Modulating Fine Roughness Perception of Vibrotactile Textured Surface using Pseudo-haptic Effect",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642446/17PYEjfZjoZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccairo/2019/3572/0/357200a213",
"title": "Surface Roughness Optimization of Poly-Jet 3D Printing Using Grey Taguchi Method",
"doi": null,
"abstractUrl": "/proceedings-article/iccairo/2019/357200a213/1iQ31NIk7xC",
"parentPublication": {
"id": "proceedings/iccairo/2019/3572/0",
"title": "2019 International Conference on Control, Artificial Intelligence, Robotics & Optimization (ICCAIRO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2020/4109/0/410900a161",
"title": "Research on Predicting Machining Surface Roughness Based on Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2020/410900a161/1t2mLsKZMVW",
"parentPublication": {
"id": "proceedings/wcmeim/2020/4109/0",
"title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxdDFCM",
"title": "2012 16th International Symposium on Wearable Computers",
"acronym": "iswc",
"groupId": "1000810",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxwncll",
"doi": "10.1109/ISWC.2012.28",
"title": "Huffman Base-4 Text Entry Glove (H4 TEG)",
"normalizedTitle": "Huffman Base-4 Text Entry Glove (H4 TEG)",
"abstract": "We designed and evaluated a Huffman base-4 Text Entry Glove (H4 TEG). H4 TEG uses pinches between the thumb and fingers on the user's right hand. Characters and commands use base-4 Huffman codes for efficient input. In a longitudinal study, participants reached 14.0 wpm with error rates",
"abstracts": [
{
"abstractType": "Regular",
"content": "We designed and evaluated a Huffman base-4 Text Entry Glove (H4 TEG). H4 TEG uses pinches between the thumb and fingers on the user's right hand. Characters and commands use base-4 Huffman codes for efficient input. In a longitudinal study, participants reached 14.0 wpm with error rates",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We designed and evaluated a Huffman base-4 Text Entry Glove (H4 TEG). H4 TEG uses pinches between the thumb and fingers on the user's right hand. Characters and commands use base-4 Huffman codes for efficient input. In a longitudinal study, participants reached 14.0 wpm with error rates",
"fno": "4697a041",
"keywords": [
"Thumb",
"Keyboards",
"Error Analysis",
"Indexes",
"Visualization",
"Presses",
"Design",
"Input Glove",
"Text Input",
"Experimentation"
],
"authors": [
{
"affiliation": null,
"fullName": "I. Scott MacKenzie",
"givenName": "I. Scott",
"surname": "MacKenzie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Bartosz Bajer",
"givenName": "Bartosz",
"surname": "Bajer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Melanie Baljko",
"givenName": "Melanie",
"surname": "Baljko",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iswc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "41-47",
"year": "2012",
"issn": "1550-4816",
"isbn": "978-0-7695-4697-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4697a037",
"articleId": "12OmNAYXWJZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4697a048",
"articleId": "12OmNxwncuJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vlhcc/2017/0443/0/08103481",
"title": "Text entry using five to seven physical keys",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2017/08103481/17D45XDIXRA",
"parentPublication": {
"id": "proceedings/vlhcc/2017/0443/0",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKipK",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"acronym": "vlhcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XDIXRA",
"doi": "10.1109/VLHCC.2017.8103481",
"title": "Text entry using five to seven physical keys",
"normalizedTitle": "Text entry using five to seven physical keys",
"abstract": "We designed, implemented, and evaluated a small physical keyboard, composed of four to five keys with a variety of mappings for the letters, along with one or two navigational function keys operated by the thumb. The small size allows it to be used for smartwatch text entry, and can be mounted on various body parts as a wearable keyboard. It is primarily used as an ambiguous keyboard, akin to T9, although a multi-tap mode is also present. Our keyboard layouts include Alphabetic, Collapsed QWERTY, Mnemonic (where letters are grouped based on their shapes), and Optimized (which minimizes the number of conflicts). In a between-users study, participants achieved an average of 15.4 words per minute (WPM) across all layouts, with one user with the Collapsed QWERTY layout reaching a top speed of 30.6 WPM after 4 hours of practice. User feedback was generally favorable.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We designed, implemented, and evaluated a small physical keyboard, composed of four to five keys with a variety of mappings for the letters, along with one or two navigational function keys operated by the thumb. The small size allows it to be used for smartwatch text entry, and can be mounted on various body parts as a wearable keyboard. It is primarily used as an ambiguous keyboard, akin to T9, although a multi-tap mode is also present. Our keyboard layouts include Alphabetic, Collapsed QWERTY, Mnemonic (where letters are grouped based on their shapes), and Optimized (which minimizes the number of conflicts). In a between-users study, participants achieved an average of 15.4 words per minute (WPM) across all layouts, with one user with the Collapsed QWERTY layout reaching a top speed of 30.6 WPM after 4 hours of practice. User feedback was generally favorable.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We designed, implemented, and evaluated a small physical keyboard, composed of four to five keys with a variety of mappings for the letters, along with one or two navigational function keys operated by the thumb. The small size allows it to be used for smartwatch text entry, and can be mounted on various body parts as a wearable keyboard. It is primarily used as an ambiguous keyboard, akin to T9, although a multi-tap mode is also present. Our keyboard layouts include Alphabetic, Collapsed QWERTY, Mnemonic (where letters are grouped based on their shapes), and Optimized (which minimizes the number of conflicts). In a between-users study, participants achieved an average of 15.4 words per minute (WPM) across all layouts, with one user with the Collapsed QWERTY layout reaching a top speed of 30.6 WPM after 4 hours of practice. User feedback was generally favorable.",
"fno": "08103481",
"keywords": [
"Layout",
"Keyboards",
"Thumb",
"Presses",
"Mobile Communication",
"Shape",
"Text Entry",
"Mobile Input",
"One Handed Keyboard",
"Interaction Techniques",
"Handheld Device",
"Smart Watch"
],
"authors": [
{
"affiliation": "Carnegie Mellon University, Pittsburgh, PA, USA",
"fullName": "Elliot Lockerman",
"givenName": "Elliot",
"surname": "Lockerman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University, Pittsburgh, PA, USA",
"fullName": "Shuobi Wu",
"givenName": "Shuobi",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Facebook, Inc., Menlo Park, CA, USA",
"fullName": "Ariel Rao",
"givenName": "Ariel",
"surname": "Rao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University, Pittsburgh, PA, USA",
"fullName": "Jarret Lin",
"givenName": "Jarret",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University, Pittsburgh, PA, USA",
"fullName": "Neil Bantoc",
"givenName": "Neil",
"surname": "Bantoc",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University, Pittsburgh, PA, USA",
"fullName": "Brad A. Myers",
"givenName": "Brad A.",
"surname": "Myers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vlhcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "291-295",
"year": "2017",
"issn": "1943-6106",
"isbn": "978-1-5386-0443-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08103480",
"articleId": "17D45WB0qby",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08103482",
"articleId": "17D45XzbnJs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpads/2014/7615/0/07097812",
"title": "Virtual keyboard for head mounted display-based wearable devices",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2014/07097812/12OmNqzu6VX",
"parentPublication": {
"id": "proceedings/icpads/2014/7615/0",
"title": "2014 20th IEEE International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2016/4459/0/4459a165",
"title": "Modifying Keyboard Layout to Reduce Finger-Travel Distance",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2016/4459a165/12OmNvwC5up",
"parentPublication": {
"id": "proceedings/ictai/2016/4459/0",
"title": "2016 IEEE 28th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2012/4697/0/4697a041",
"title": "Huffman Base-4 Text Entry Glove (H4 TEG)",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2012/4697a041/12OmNxwncll",
"parentPublication": {
"id": "proceedings/iswc/2012/4697/0",
"title": "2012 16th International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456570",
"title": "PizzaText: Text Entry for Virtual Reality Systems Using Dual Thumbsticks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456570/14M3DYGRu3o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642443",
"title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/5555/01/09737726",
"title": "MyoKey: Inertial Motion Sensing and Gesture-based QWERTY Keyboard for Extended Realities",
"doi": null,
"abstractUrl": "/journal/tm/5555/01/09737726/1BQlEBR0ceY",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874256",
"title": "Efficient Flower Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icghit/2019/0627/0/062700a055",
"title": "Wireless Text Entry and Mouse System for the Handicapped",
"doi": null,
"abstractUrl": "/proceedings-article/icghit/2019/062700a055/1e5ZfgVovLi",
"parentPublication": {
"id": "proceedings/icghit/2019/0627/0",
"title": "2019 International Conference on Green and Human Information Technology (ICGHIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089533",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ196OGdJm",
"doi": "10.1109/VR.2019.8797740",
"title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality",
"normalizedTitle": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality",
"abstract": "Text entry is a challenge for Virtual Reality (VR) applications. In the context of immersive VR Head-Mounted Displays, text entry has been investigated for standard physical keyboards as well as for various hand representations. Specifically, prior work has indicated that minimalistic fingertip visualizations are an efficient hand representation. However, they typically require external tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself. However, they have not been thoroughly investigated within VR. Our work brings together the domains of VR text entry and touch-sensitive physical keyboards. Specifically, we propose to utilize touch-sensitive physical keyboards for text entry as an alternative sensing mechanism for tracking user's fingertips and study its performance in a preliminary user study.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text entry is a challenge for Virtual Reality (VR) applications. In the context of immersive VR Head-Mounted Displays, text entry has been investigated for standard physical keyboards as well as for various hand representations. Specifically, prior work has indicated that minimalistic fingertip visualizations are an efficient hand representation. However, they typically require external tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself. However, they have not been thoroughly investigated within VR. Our work brings together the domains of VR text entry and touch-sensitive physical keyboards. Specifically, we propose to utilize touch-sensitive physical keyboards for text entry as an alternative sensing mechanism for tracking user's fingertips and study its performance in a preliminary user study.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text entry is a challenge for Virtual Reality (VR) applications. In the context of immersive VR Head-Mounted Displays, text entry has been investigated for standard physical keyboards as well as for various hand representations. Specifically, prior work has indicated that minimalistic fingertip visualizations are an efficient hand representation. However, they typically require external tracking systems. Touch-sensitive physical keyboards allow for on-surface interaction, with sensing integrated into the keyboard itself. However, they have not been thoroughly investigated within VR. Our work brings together the domains of VR text entry and touch-sensitive physical keyboards. Specifically, we propose to utilize touch-sensitive physical keyboards for text entry as an alternative sensing mechanism for tracking user's fingertips and study its performance in a preliminary user study.",
"fno": "08797740",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Keyboards",
"Touch Sensitive Screens",
"Touch Sensitive Physical Keyboards",
"VR Text Entry",
"Virtual Reality Applications",
"Immersive VR Head Mounted Displays",
"Hand Representation",
"Keyboards",
"Sensors",
"Visualization",
"Pins",
"Resists",
"Microcontrollers",
"Standards",
"H 5 2 User Interfaces Input Devices And Strategies"
],
"authors": [
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Alexander Otte",
"givenName": "Alexander",
"surname": "Otte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Tim Menzner",
"givenName": "Tim",
"surname": "Menzner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Travis Gesslein",
"givenName": "Travis",
"surname": "Gesslein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Philipp Gagel",
"givenName": "Philipp",
"surname": "Gagel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Daniel Schneider",
"givenName": "Daniel",
"surname": "Schneider",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Coburg University of Applied Sciences and Arts, Germany",
"fullName": "Jens Grubert",
"givenName": "Jens",
"surname": "Grubert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1729-1732",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798105",
"articleId": "1cJ0Qs2rZCg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798189",
"articleId": "1cJ0GcCwwO4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446059",
"title": "Text Entry in Immersive Head-Mounted Display-Based Virtual Reality Using Standard Keyboards",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446059/13bd1eSlysI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446250",
"title": "Effects of Hand Representations for Typing in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446250/13bd1eTtWYT",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049665",
"title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049665/1KYooqYQbF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a140",
"title": "Direct Interaction Word-Gesture Text Input in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a140/1KmF8k8WXi8",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797754",
"title": "A Capacitive-sensing Physical Keyboard for VR Text Entry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797754/1cJ1cJDgPXq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794572",
"title": "ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794572/1dXEHv0aKMo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a387",
"title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089533",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1e5Ze7LbDUs",
"title": "2019 International Conference on Green and Human Information Technology (ICGHIT)",
"acronym": "icghit",
"groupId": "1833564",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1e5ZfgVovLi",
"doi": "10.1109/ICGHIT.2019.00020",
"title": "Wireless Text Entry and Mouse System for the Handicapped",
"normalizedTitle": "Wireless Text Entry and Mouse System for the Handicapped",
"abstract": "Text entry system can be found commonly in modern computer and mobile devices such as smartphone and tablet. However, most of these systems are not easily operable by the handicapped; the system either has small area size of each key or requires major hand movement, such as in the case of the common \"QWERTY\" keyboard that contains over a hundred keys. All of these present systems are less than ideal for the handicapped to use as input device. In this paper, a new wireless text entry system is proposed to help the handicapped people to input text efficiently with minimum hand movement. From the design of the prototype, it is smaller than the traditional keyboard, portable, lightweight, user friendly, and long operating hours with built-in mouse function. Practically, it is tested and proven that only one hand is needed to hold the equipment and one finger to interact as well as to input the text. The algorithm used is: Press-Drag-Release algorithm, namely the user just need to presses on the infrared board, followed by finger dragging around the board for letter selection, and lastly, releasing of finger for input the letter. From the result of experiments, the proposed system was tested and proven portable (7.5cm × 5.5cm × 5.5cm), lightweight (100 grams excluding battery), user-friendly with many built-in functions (Esc, Copy, Cut, Paste, Ctrl-A, and Mouse), and long operating hours (29.4 hours operating time with 10,000 mAH battery). For Words Per Minute (WPM), the experiment shows that it can achieve as high as 5 to 10 WPM depends on the skillfulness of the user.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text entry system can be found commonly in modern computer and mobile devices such as smartphone and tablet. However, most of these systems are not easily operable by the handicapped; the system either has small area size of each key or requires major hand movement, such as in the case of the common \"QWERTY\" keyboard that contains over a hundred keys. All of these present systems are less than ideal for the handicapped to use as input device. In this paper, a new wireless text entry system is proposed to help the handicapped people to input text efficiently with minimum hand movement. From the design of the prototype, it is smaller than the traditional keyboard, portable, lightweight, user friendly, and long operating hours with built-in mouse function. Practically, it is tested and proven that only one hand is needed to hold the equipment and one finger to interact as well as to input the text. The algorithm used is: Press-Drag-Release algorithm, namely the user just need to presses on the infrared board, followed by finger dragging around the board for letter selection, and lastly, releasing of finger for input the letter. From the result of experiments, the proposed system was tested and proven portable (7.5cm × 5.5cm × 5.5cm), lightweight (100 grams excluding battery), user-friendly with many built-in functions (Esc, Copy, Cut, Paste, Ctrl-A, and Mouse), and long operating hours (29.4 hours operating time with 10,000 mAH battery). For Words Per Minute (WPM), the experiment shows that it can achieve as high as 5 to 10 WPM depends on the skillfulness of the user.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text entry system can be found commonly in modern computer and mobile devices such as smartphone and tablet. However, most of these systems are not easily operable by the handicapped; the system either has small area size of each key or requires major hand movement, such as in the case of the common \"QWERTY\" keyboard that contains over a hundred keys. All of these present systems are less than ideal for the handicapped to use as input device. In this paper, a new wireless text entry system is proposed to help the handicapped people to input text efficiently with minimum hand movement. From the design of the prototype, it is smaller than the traditional keyboard, portable, lightweight, user friendly, and long operating hours with built-in mouse function. Practically, it is tested and proven that only one hand is needed to hold the equipment and one finger to interact as well as to input the text. The algorithm used is: Press-Drag-Release algorithm, namely the user just need to presses on the infrared board, followed by finger dragging around the board for letter selection, and lastly, releasing of finger for input the letter. From the result of experiments, the proposed system was tested and proven portable (7.5cm × 5.5cm × 5.5cm), lightweight (100 grams excluding battery), user-friendly with many built-in functions (Esc, Copy, Cut, Paste, Ctrl-A, and Mouse), and long operating hours (29.4 hours operating time with 10,000 mAH battery). For Words Per Minute (WPM), the experiment shows that it can achieve as high as 5 to 10 WPM depends on the skillfulness of the user.",
"fno": "062700a055",
"keywords": [
"Handicapped Aids",
"Keyboards",
"Modern Computer",
"Mobile Devices",
"Smartphone Tablet",
"Input Device",
"Wireless Text Entry System",
"Handicapped People",
"Input Text",
"Minimum Hand Movement",
"Portable User Friendly",
"Lightweight User Friendly",
"Mouse Function",
"Size 7 5 Cm",
"Size 5 5 Cm",
"Time 29 4 Hour",
"Mice",
"Keyboards",
"Graphical User Interfaces",
"Receivers",
"Transmitters",
"Batteries",
"Hardware",
"Text Entry Method",
"Handicapped Aids",
"Human Computer Interaction",
"Efficient Text Entry Model"
],
"authors": [
{
"affiliation": "University Tunku Abdul Rahman",
"fullName": "Chun Hou Cheah",
"givenName": "Chun Hou",
"surname": "Cheah",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Tunku Abdul Rahman",
"fullName": "Wai Lum Chooi",
"givenName": "Wai Lum",
"surname": "Chooi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Tunku Abdul Rahman",
"fullName": "Heng Yew Lee",
"givenName": "Heng Yew",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Tunku Abdul Rahman",
"fullName": "Shen Khang Teoh",
"givenName": "Shen Khang",
"surname": "Teoh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University Tunku Abdul Rahman",
"fullName": "Chun Farn Leong",
"givenName": "Chun Farn",
"surname": "Leong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icghit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-01-01T00:00:00",
"pubType": "proceedings",
"pages": "55-59",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0627-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "062700a049",
"articleId": "1e5ZftMaxSo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "062700a060",
"articleId": "1e5ZfDlZnZm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iswc/2004/2186/0/21860094",
"title": "Expert Chording Text Entry on the Twiddler One-Handed Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2004/21860094/12OmNBqv2p0",
"parentPublication": {
"id": "proceedings/iswc/2004/2186/0",
"title": "Eighth International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iih-msp/2009/3762/0/3762a583",
"title": "A Fast Text-Based Communication System for Handicapped Aphasiacs",
"doi": null,
"abstractUrl": "/proceedings-article/iih-msp/2009/3762a583/12OmNC36tOF",
"parentPublication": {
"id": "proceedings/iih-msp/2009/3762/0",
"title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2007/2882/0/28820299",
"title": "Static Finger Language Recognition for Handicapped Aphasiacs",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2007/28820299/12OmNs59K41",
"parentPublication": {
"id": "proceedings/icicic/2007/2882/0",
"title": "2007 Second International Conference on Innovative Computing, Information and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2005/2424/0/24240891",
"title": "Four-Key Text Entry Augmented with Color Blinking Feedback for Print-Handicapped People with Ocular Pathology",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2005/24240891/12OmNvjQ95C",
"parentPublication": {
"id": "proceedings/dexa/2005/2424/0",
"title": "16th International Workshop on Database and Expert Systems Applications (DEXA'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2012/4697/0/4697a041",
"title": "Huffman Base-4 Text Entry Glove (H4 TEG)",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2012/4697a041/12OmNxwncll",
"parentPublication": {
"id": "proceedings/iswc/2012/4697/0",
"title": "2012 16th International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456570",
"title": "PizzaText: Text Entry for Virtual Reality Systems Using Dual Thumbsticks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456570/14M3DYGRu3o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2017/0443/0/08103481",
"title": "Text entry using five to seven physical keys",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2017/08103481/17D45XDIXRA",
"parentPublication": {
"id": "proceedings/vlhcc/2017/0443/0",
"title": "2017 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a694",
"title": "From 2D to 3D: Facilitating Single-Finger Mid-Air Typing on Virtual Keyboards with Probabilistic Touch Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a694/1CJf9WRhN84",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874256",
"title": "Efficient Flower Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874256/1GjwONKhl84",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089533",
"title": "HiPad: Text entry for Head-Mounted Displays Using Circular Touchpad",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089533/1jIx7JtSOTC",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysyrYBX5C",
"doi": "10.1109/ISMAR50242.2020.00061",
"title": "Exploration of Hands-free Text Entry Techniques For Virtual Reality",
"normalizedTitle": "Exploration of Hands-free Text Entry Techniques For Virtual Reality",
"abstract": "Text entry is a common activity in virtual reality (VR) systems. There is a limited number of available hands-free techniques, which allow users to carry out text entry when users’ hands are busy such as holding items or hand-based devices are not available. The most used hands-free text entry technique is DwellType, where a user selects a letter by dwelling over it for a specific period. However, its performance is limited due to the fixed dwell time for each character selection. In this paper, we explore two other hands-free text entry mechanisms in VR: BlinkType and NeckType, which leverage users’ eye blinks and neck’s forward and backward movements to select letters. With a user study, we compare the performance of the two techniques with DwellType. Results show that users can achieve an average text entry rate of 13.47, 11.18 and 11.65 words per minute with BlinkType, NeckType, and DwellType, respectively. Users’ subjective feedback shows BlinkType as the preferred technique for text entry in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text entry is a common activity in virtual reality (VR) systems. There is a limited number of available hands-free techniques, which allow users to carry out text entry when users’ hands are busy such as holding items or hand-based devices are not available. The most used hands-free text entry technique is DwellType, where a user selects a letter by dwelling over it for a specific period. However, its performance is limited due to the fixed dwell time for each character selection. In this paper, we explore two other hands-free text entry mechanisms in VR: BlinkType and NeckType, which leverage users’ eye blinks and neck’s forward and backward movements to select letters. With a user study, we compare the performance of the two techniques with DwellType. Results show that users can achieve an average text entry rate of 13.47, 11.18 and 11.65 words per minute with BlinkType, NeckType, and DwellType, respectively. Users’ subjective feedback shows BlinkType as the preferred technique for text entry in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text entry is a common activity in virtual reality (VR) systems. There is a limited number of available hands-free techniques, which allow users to carry out text entry when users’ hands are busy such as holding items or hand-based devices are not available. The most used hands-free text entry technique is DwellType, where a user selects a letter by dwelling over it for a specific period. However, its performance is limited due to the fixed dwell time for each character selection. In this paper, we explore two other hands-free text entry mechanisms in VR: BlinkType and NeckType, which leverage users’ eye blinks and neck’s forward and backward movements to select letters. With a user study, we compare the performance of the two techniques with DwellType. Results show that users can achieve an average text entry rate of 13.47, 11.18 and 11.65 words per minute with BlinkType, NeckType, and DwellType, respectively. Users’ subjective feedback shows BlinkType as the preferred technique for text entry in VR.",
"fno": "850800a344",
"keywords": [
"Keyboards",
"Text Analysis",
"User Interfaces",
"Virtual Reality",
"Hands Free Text Entry Technique",
"VR",
"Text Entry Rate",
"Virtual Reality Systems",
"Hand Based Devices",
"Character Selection",
"Users Eye Blinks",
"Performance Evaluation",
"Head Mounted Displays",
"Resists",
"Gaze Tracking",
"Indexes",
"Augmented Reality",
"Virtual Reality",
"Text Entry",
"Dwelling",
"Eye Blinking",
"Neck Type",
"Head Mounted Display",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Techniques",
"Text Input"
],
"authors": [
{
"affiliation": "Xi’an Jiaotong-Liverpool University",
"fullName": "Xueshi Lu",
"givenName": "Xueshi",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Melbourne",
"fullName": "Difeng Yu",
"givenName": "Difeng",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University",
"fullName": "Hai-Ning Liang",
"givenName": "Hai-Ning",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University",
"fullName": "Wenge Xu",
"givenName": "Wenge",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University",
"fullName": "Yuzheng Chen",
"givenName": "Yuzheng",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University",
"fullName": "Xiang Li",
"givenName": "Xiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia - Okanagan",
"fullName": "Khalad Hasan",
"givenName": "Khalad",
"surname": "Hasan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "344-349",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a332",
"articleId": "1pysxWDVgS4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a350",
"articleId": "1pysyvL4CwU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446259",
"title": "Hands-Free Interaction for Augmented Reality in Vascular Interventions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446259/13bd1gQYgEU",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642443",
"title": "RingText: Dwell-free and hands-free Text Entry for Mobile Head-Mounted Displays using Head Motions",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642443/17PYEjrlgBP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a646",
"title": "A Pinch-based Text Entry Method for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a646/1CJeVfhmmkg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049695",
"title": "CrowbarLimbs: A Fatigue-Reducing Virtual Reality Text Entry Metaphor",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049695/1KYowtn3pok",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08723303",
"title": "Errata to “RingText: Dwell-Free and Hands-Free Text Entry for Mobile Head-Mounted Displays Using Head Motions” [May 19 1991-2001]",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08723303/1aqzjJfQFCU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797901",
"title": "DepthText: Leveraging Head Movements towards the Depth Dimension for Hands-free Text Entry in Mobile Virtual Reality Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797901/1cJ13BSrOkU",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797740",
"title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a387",
"title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a118",
"title": "Exploring Head-based Mode-Switching in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a118/1yeD1RhEseY",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrMHOcV",
"doi": "",
"title": "Head pose-free appearance-based gaze sensing via eye image synthesis",
"normalizedTitle": "Head pose-free appearance-based gaze sensing via eye image synthesis",
"abstract": "This paper addresses the problem of estimating human gaze from eye appearance under free head motion. Allowing head motion remains challenging because eye appearance changes significantly for different head poses, and thus new head poses require new training images. To avoid repetitive training, we propose to produce synthetic training images for varying head poses. First, we model pixel displacements between head-moving eye images as 1D pixel flows, and then produce such flows to synthesize new training images from the original training images captured under a fixed default head pose. Specifically, we produce all the required 1D flows by using only four additionally captured images. Our method was successfully tested with extensive experiments to demonstrate its effectiveness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper addresses the problem of estimating human gaze from eye appearance under free head motion. Allowing head motion remains challenging because eye appearance changes significantly for different head poses, and thus new head poses require new training images. To avoid repetitive training, we propose to produce synthetic training images for varying head poses. First, we model pixel displacements between head-moving eye images as 1D pixel flows, and then produce such flows to synthesize new training images from the original training images captured under a fixed default head pose. Specifically, we produce all the required 1D flows by using only four additionally captured images. Our method was successfully tested with extensive experiments to demonstrate its effectiveness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper addresses the problem of estimating human gaze from eye appearance under free head motion. Allowing head motion remains challenging because eye appearance changes significantly for different head poses, and thus new head poses require new training images. To avoid repetitive training, we propose to produce synthetic training images for varying head poses. First, we model pixel displacements between head-moving eye images as 1D pixel flows, and then produce such flows to synthesize new training images from the original training images captured under a fixed default head pose. Specifically, we produce all the required 1D flows by using only four additionally captured images. Our method was successfully tested with extensive experiments to demonstrate its effectiveness.",
"fno": "06460306",
"keywords": [
"Eye",
"Pose Estimation",
"Head Pose Free Appearance Based Gaze Sensing",
"Eye Image Synthesis",
"Human Gaze Estimation",
"Eye Appearance",
"Free Head Motion",
"Synthetic Training Images",
"Pixel Displacements",
"Head Moving Eye Images",
"1 D Pixel Flows",
"Head",
"Training",
"Cameras",
"Estimation",
"Accuracy",
"Image Generation",
"Feature Extraction"
],
"authors": [
{
"affiliation": "Institute of Industrial Science, the University of Tokyo, Japan",
"fullName": "Feng Lu",
"givenName": "Feng",
"surname": "Lu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Industrial Science, the University of Tokyo, Japan",
"fullName": "Yusuke Sugano",
"givenName": "Yusuke",
"surname": "Sugano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Industrial Science, the University of Tokyo, Japan",
"fullName": "Takahiro Okabe",
"givenName": "Takahiro",
"surname": "Okabe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Industrial Science, the University of Tokyo, Japan",
"fullName": "Yoichi Sato",
"givenName": "Yoichi",
"surname": "Sato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1008-1011",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460305",
"articleId": "12OmNxX3uHB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460307",
"articleId": "12OmNz2kqdi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118b821",
"title": "Learning-by-Synthesis for Appearance-Based 3D Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118b821/12OmNCbU31L",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2011/4419/0/4419a186",
"title": "The Importance of Eye Gaze and Head Pose to Estimating Levels of Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2011/4419a186/12OmNqyDjtb",
"parentPublication": {
"id": "proceedings/vs-games/2011/4419/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d756",
"title": "Rendering of Eyes for Eye-Shape Registration and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d756/12OmNrGsDou",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206640",
"title": "Robustifying eye center localization by head pose cues",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206640/12OmNwM6A3V",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109d870",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109d870/12OmNyRg4Cq",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2017/4941/0/07912208",
"title": "Gaze Estimation Based on Eyeball-Head Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2017/07912208/12OmNzWfoVQ",
"parentPublication": {
"id": "proceedings/wacvw/2017/4941/0",
"title": "2017 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/10/06777326",
"title": "Adaptive Linear Regression for Appearance-Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2014/10/06777326/13rRUEgartY",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/01/08122058",
"title": "MPIIGaze: Real-World Dataset and Deep Appearance-Based Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2019/01/08122058/17D45WZZ7E5",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a935",
"title": "A Deep Learning Approach to Appearance-Based Gaze Estimation under Head Pose Variations",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a935/17D45XacGif",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAY79oS",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvjyxUU",
"doi": "10.1109/ICME.2014.6890322",
"title": "Realtime gaze estimation with online calibration",
"normalizedTitle": "Realtime gaze estimation with online calibration",
"abstract": "For an eye gaze estimation system, calibration is an unavoidable procedure to determine certain person-specific parameters, either explicitly or implicitly. Although several offline implicit calibration methods have been proposed to ease the calibration burden, the calibration procedure is still cumbersome and the gaze estimation accuracy needs further improvement. In this paper, we propose a novel 3D gaze estimation system with online calibration. The proposed system uses a new 3D model-based gaze estimation method with a single consumer camera (Kinect). Unlike previous gaze estimation methods using explicit offline calibration with fixed number of calibration points or implicit calibration, our approach constantly improves person-specific eye parameters through online calibration, which enables the system to adapt gradually to a new user. The experimental results and the human-computer interaction (HCI) application show that the proposed system can work in realtime with superior gaze estimation accuracy (< 2°) and minimal calibration burden.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For an eye gaze estimation system, calibration is an unavoidable procedure to determine certain person-specific parameters, either explicitly or implicitly. Although several offline implicit calibration methods have been proposed to ease the calibration burden, the calibration procedure is still cumbersome and the gaze estimation accuracy needs further improvement. In this paper, we propose a novel 3D gaze estimation system with online calibration. The proposed system uses a new 3D model-based gaze estimation method with a single consumer camera (Kinect). Unlike previous gaze estimation methods using explicit offline calibration with fixed number of calibration points or implicit calibration, our approach constantly improves person-specific eye parameters through online calibration, which enables the system to adapt gradually to a new user. The experimental results and the human-computer interaction (HCI) application show that the proposed system can work in realtime with superior gaze estimation accuracy (< 2°) and minimal calibration burden.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For an eye gaze estimation system, calibration is an unavoidable procedure to determine certain person-specific parameters, either explicitly or implicitly. Although several offline implicit calibration methods have been proposed to ease the calibration burden, the calibration procedure is still cumbersome and the gaze estimation accuracy needs further improvement. In this paper, we propose a novel 3D gaze estimation system with online calibration. The proposed system uses a new 3D model-based gaze estimation method with a single consumer camera (Kinect). Unlike previous gaze estimation methods using explicit offline calibration with fixed number of calibration points or implicit calibration, our approach constantly improves person-specific eye parameters through online calibration, which enables the system to adapt gradually to a new user. The experimental results and the human-computer interaction (HCI) application show that the proposed system can work in realtime with superior gaze estimation accuracy (< 2°) and minimal calibration burden.",
"fno": "06890322",
"keywords": [
"Calibration",
"Three Dimensional Displays",
"Estimation",
"Iris",
"Head",
"Solid Modeling",
"Cameras",
"HCI",
"Gaze Estimation",
"3 D Model Based",
"Gaze Direction",
"Online Calibration"
],
"authors": [
{
"affiliation": "Zhejiang University, Hangzhou, 310027, P.R. China",
"fullName": "Li Sun",
"givenName": "Li",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University, Hangzhou, 310027, P.R. China",
"fullName": "Mingli Song",
"givenName": "Mingli",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research, Redmond, WA 98052",
"fullName": "Zicheng Liu",
"givenName": "Zicheng",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Washingtong, Seattle, WA 98195",
"fullName": "Ming-Ting Sun",
"givenName": "Ming-Ting",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4761-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06890321",
"articleId": "12OmNvjgWv5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06890323",
"articleId": "12OmNAlvI6F",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761343",
"title": "3D gaze estimation with a single camera without IR illumination",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761343/12OmNvvLi4R",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771469",
"title": "Constraint-based gaze estimation without active calibration",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771469/12OmNwdbVch",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2015/9403/0/9403a351",
"title": "Gaze Estimation Using Human Joint Rotation Angel",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2015/9403a351/12OmNx57HJj",
"parentPublication": {
"id": "proceedings/cw/2015/9403/0",
"title": "2015 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a606",
"title": "Eye-Model-Based Gaze Estimation by RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a606/12OmNyqiaTI",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2015/6683/0/6683a642",
"title": "Towards Convenient Calibration for Cross-Ratio Based Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683a642/12OmNzE54Hh",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a176",
"title": "Mobile 3D Gaze Tracking Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a176/12OmNzzxusS",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2014/04/mmu2014040028",
"title": "Real-Time Gaze Estimation with Online Calibration",
"doi": null,
"abstractUrl": "/magazine/mu/2014/04/mmu2014040028/13rRUx0geby",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/03/08920005",
"title": "A Differential Approach for Gaze Estimation",
"doi": null,
"abstractUrl": "/journal/tp/2021/03/08920005/1fsFnejO2IM",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093419",
"title": "Offset Calibration for Appearance-Based Gaze Estimation via Gaze Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093419/1jPbibCw0gw",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy314bx",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvlxJrb",
"doi": "10.1109/WACV.2017.101",
"title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems",
"normalizedTitle": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems",
"abstract": "We present a novel, automatic eye gaze tracking scheme inspired by smooth pursuit eye motion while playing mobile games or watching virtual reality contents. Our algorithm continuously calibrates an eye tracking system for a head mounted display. This eliminates the need for an explicit calibration step and automatically compensates for small movements of the headset with respect to the head. The algorithm finds correspondences between corneal motion and screen space motion, and uses these to generate Gaussian Process Regression models. A combination of those models provides a continuous mapping from corneal position to screen space position. Accuracy is nearly as good as achieved with an explicit calibration step.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel, automatic eye gaze tracking scheme inspired by smooth pursuit eye motion while playing mobile games or watching virtual reality contents. Our algorithm continuously calibrates an eye tracking system for a head mounted display. This eliminates the need for an explicit calibration step and automatically compensates for small movements of the headset with respect to the head. The algorithm finds correspondences between corneal motion and screen space motion, and uses these to generate Gaussian Process Regression models. A combination of those models provides a continuous mapping from corneal position to screen space position. Accuracy is nearly as good as achieved with an explicit calibration step.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel, automatic eye gaze tracking scheme inspired by smooth pursuit eye motion while playing mobile games or watching virtual reality contents. Our algorithm continuously calibrates an eye tracking system for a head mounted display. This eliminates the need for an explicit calibration step and automatically compensates for small movements of the headset with respect to the head. The algorithm finds correspondences between corneal motion and screen space motion, and uses these to generate Gaussian Process Regression models. A combination of those models provides a continuous mapping from corneal position to screen space position. Accuracy is nearly as good as achieved with an explicit calibration step.",
"fno": "07926684",
"keywords": [
"Computer Vision",
"Gaussian Processes",
"Gaze Tracking",
"Helmet Mounted Displays",
"Regression Analysis",
"Statistical Analysis",
"Virtual Reality",
"Statistical Approach",
"Self Calibrating Eye Gaze Tracking",
"Head Mounted Virtual Reality System",
"Automatic Eye Gaze Tracking Scheme",
"Eye Motion",
"Gaussian Process Regression Model",
"Cameras",
"Calibration",
"Gaze Tracking",
"Solid Modeling",
"Resists",
"Prototypes",
"Headphones"
],
"authors": [
{
"affiliation": null,
"fullName": "Subarna Tripathi",
"givenName": "Subarna",
"surname": "Tripathi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Brian Guenter",
"givenName": "Brian",
"surname": "Guenter",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-03-01T00:00:00",
"pubType": "proceedings",
"pages": "862-870",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-4822-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07926683",
"articleId": "12OmNBSSV7W",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07926685",
"articleId": "12OmNvSbBIZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2018/2335/0/233501a789",
"title": "Human Computer Interaction with Head Pose, Eye Gaze and Body Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a789/12OmNASILS4",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995675",
"title": "Probabilistic gaze estimation without active personal calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995675/12OmNC8MsAV",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032b003",
"title": "Real Time Eye Gaze Tracking with 3D Deformable Eye-Face Model",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032b003/12OmNwNeYAV",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2003/1900/2/190020451",
"title": "Eye Gaze Tracking Using an Active Stereo Head",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2003/190020451/12OmNxRWI2Y",
"parentPublication": {
"id": "proceedings/cvpr/2003/1900/2",
"title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2018/6857/0/08643332",
"title": "Open framework for error-compensated gaze data collection with eye tracking glasses",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2018/08643332/17QjJdei3Y0",
"parentPublication": {
"id": "proceedings/ism/2018/6857/0",
"title": "2018 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2022/0915/0/091500d937",
"title": "Event-Based Kilohertz Eye Tracking using Coded Differential Lighting",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2022/091500d937/1B13uiL4IUM",
"parentPublication": {
"id": "proceedings/wacv/2022/0915/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090461",
"title": "Front Camera Eye Tracking For Mobile VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412066",
"title": "Detection and Correspondence Matching of Corneal Reflections for Eye Tracking Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412066/1tmjH1aA4dG",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRg4Cq",
"doi": "10.1109/ICPR.2010.1160",
"title": "Visual Gaze Estimation by Joint Head and Eye Information",
"normalizedTitle": "Visual Gaze Estimation by Joint Head and Eye Information",
"abstract": "In this paper, we present an unconstrained visual gaze estimation system. The proposed method extracts the visual field of view of a person looking at a target scene in order to estimate the approximate location of interest (visual gaze). The novelty of the system is the joint use of head pose and eye location information to fine tune the visual gaze estimated by the head pose only, so that the system can be used in multiple scenarios. The improvements obtained by the proposed approach are validated using the Boston University head pose dataset, on which the standard deviation of the joint visual gaze estimation improved by 61:06% horizontally and 52:23% vertically with respect to the gaze estimation obtained by the head pose only. A user study shows the potential of the proposed system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present an unconstrained visual gaze estimation system. The proposed method extracts the visual field of view of a person looking at a target scene in order to estimate the approximate location of interest (visual gaze). The novelty of the system is the joint use of head pose and eye location information to fine tune the visual gaze estimated by the head pose only, so that the system can be used in multiple scenarios. The improvements obtained by the proposed approach are validated using the Boston University head pose dataset, on which the standard deviation of the joint visual gaze estimation improved by 61:06% horizontally and 52:23% vertically with respect to the gaze estimation obtained by the head pose only. A user study shows the potential of the proposed system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present an unconstrained visual gaze estimation system. The proposed method extracts the visual field of view of a person looking at a target scene in order to estimate the approximate location of interest (visual gaze). The novelty of the system is the joint use of head pose and eye location information to fine tune the visual gaze estimated by the head pose only, so that the system can be used in multiple scenarios. The improvements obtained by the proposed approach are validated using the Boston University head pose dataset, on which the standard deviation of the joint visual gaze estimation improved by 61:06% horizontally and 52:23% vertically with respect to the gaze estimation obtained by the head pose only. A user study shows the potential of the proposed system.",
"fno": "4109d870",
"keywords": [
"Gaze Estimation",
"Head Pose Estimation",
"Eye Localization"
],
"authors": [
{
"affiliation": null,
"fullName": "Roberto Valenti",
"givenName": "Roberto",
"surname": "Valenti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Adel Lablack",
"givenName": "Adel",
"surname": "Lablack",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nicu Sebe",
"givenName": "Nicu",
"surname": "Sebe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chabane Djeraba",
"givenName": "Chabane",
"surname": "Djeraba",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Theo Gevers",
"givenName": "Theo",
"surname": "Gevers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "3870-3873",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109d866",
"articleId": "12OmNwHhoPu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109d874",
"articleId": "12OmNwoPtoy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/gcis/2009/3571/2/3571b133",
"title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/2",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a789",
"title": "Human Computer Interaction with Head Pose, Eye Gaze and Body Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a789/12OmNASILS4",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2011/4419/0/4419a186",
"title": "The Importance of Eye Gaze and Head Pose to Estimating Levels of Attention",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2011/4419a186/12OmNqyDjtb",
"parentPublication": {
"id": "proceedings/vs-games/2011/4419/0",
"title": "Games and Virtual Worlds for Serious Applications, Conference in",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/3/3736c617",
"title": "Eye Gaze Calculation Based on Nonlinear Polynomial and Generalized Regression Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736c617/12OmNwD1pNV",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/3",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ihmsc/2010/4151/1/4151a300",
"title": "A Novel Simple 2D Model of Eye Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/ihmsc/2010/4151a300/12OmNzQR1nK",
"parentPublication": {
"id": "proceedings/ihmsc/2010/4151/1",
"title": "Intelligent Human-Machine Systems and Cybernetics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2016/03/mex2016030049",
"title": "Driver Gaze Region Estimation without Use of Eye Movement",
"doi": null,
"abstractUrl": "/magazine/ex/2016/03/mex2016030049/13rRUwInv0D",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c237",
"title": "Light-Weight Head Pose Invariant Gaze Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c237/17D45WXIkI8",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600c182",
"title": "Dynamic 3D Gaze from Afar: Deep Gaze Estimation from Temporal Eye-Head-Body Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600c182/1H1mDm1L85i",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a467",
"title": "CUDA-GHR: Controllable Unsupervised Domain Adaptation for Gaze and Head Redirection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a467/1KxVMFU7lkI",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNASrawz",
"title": "2009 IEEE Virtual Reality Conference",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzC5T34",
"doi": "10.1109/VR.2009.4811014",
"title": "Natural Eye Motion Synthesis by Modeling Gaze-Head Coupling",
"normalizedTitle": "Natural Eye Motion Synthesis by Modeling Gaze-Head Coupling",
"abstract": "Due to the intrinsic subtlety and dynamics of eye movements, automated generation of natural and engaging eye motion has been a challenging task for decades. In this paper we present an effective technique to synthesize natural eye gazes given a head motion sequence as input, by statistically modeling the innate coupling between gazes and head movements. We first simultaneously recorded head motions and eye gazes of human subjects, using a novel hybrid data acquisition solution consisting of an optical motion capture system and off-the-shelf video cameras. Then, we statistically learn gaze-head coupling patterns using a dynamic coupled component analysis model. Finally, given a head motion sequence as input, we can synthesize its corresponding natural eye gazes based on the constructed gaze-head coupling model. Through comparative user studies and evaluations, we found that comparing with the state of the art algorithms in eye motion synthesis, our approach is more effective to generate natural gazes correlated with given head motions. We also showed the effectiveness of our approach for gaze simulation in two-party conversations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Due to the intrinsic subtlety and dynamics of eye movements, automated generation of natural and engaging eye motion has been a challenging task for decades. In this paper we present an effective technique to synthesize natural eye gazes given a head motion sequence as input, by statistically modeling the innate coupling between gazes and head movements. We first simultaneously recorded head motions and eye gazes of human subjects, using a novel hybrid data acquisition solution consisting of an optical motion capture system and off-the-shelf video cameras. Then, we statistically learn gaze-head coupling patterns using a dynamic coupled component analysis model. Finally, given a head motion sequence as input, we can synthesize its corresponding natural eye gazes based on the constructed gaze-head coupling model. Through comparative user studies and evaluations, we found that comparing with the state of the art algorithms in eye motion synthesis, our approach is more effective to generate natural gazes correlated with given head motions. We also showed the effectiveness of our approach for gaze simulation in two-party conversations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Due to the intrinsic subtlety and dynamics of eye movements, automated generation of natural and engaging eye motion has been a challenging task for decades. In this paper we present an effective technique to synthesize natural eye gazes given a head motion sequence as input, by statistically modeling the innate coupling between gazes and head movements. We first simultaneously recorded head motions and eye gazes of human subjects, using a novel hybrid data acquisition solution consisting of an optical motion capture system and off-the-shelf video cameras. Then, we statistically learn gaze-head coupling patterns using a dynamic coupled component analysis model. Finally, given a head motion sequence as input, we can synthesize its corresponding natural eye gazes based on the constructed gaze-head coupling model. Through comparative user studies and evaluations, we found that comparing with the state of the art algorithms in eye motion synthesis, our approach is more effective to generate natural gazes correlated with given head motions. We also showed the effectiveness of our approach for gaze simulation in two-party conversations.",
"fno": "04811014",
"keywords": [
"Avatars",
"Image Motion Analysis",
"Image Sequences",
"Statistical Analysis",
"Natural Eye Motion Synthesis",
"Gaze Head Coupling",
"Eye Movements",
"Head Motion Sequence",
"Hybrid Data Acquisition Solution",
"Optical Motion Capture System",
"Off The Shelf Video Cameras",
"Dynamic Coupled Component Analysis Model",
"Magnetic Heads",
"Computer Graphics",
"Optical Recording",
"Cameras",
"Coupled Mode Analysis",
"Avatars",
"Humans",
"Data Acquisition",
"Facial Animation",
"Virtual Reality",
"Gaze Head Coupling",
"Eye Motion",
"Facial Animation",
"Digital Avatars",
"I 3 7 Computer Graphics Three Dimensional Graphics And Realism Animation",
"H 5 2 Information Interfaces And Presentation User Interfaces Graphical User Interfaces GUI"
],
"authors": [
{
"affiliation": "Computer Graphics and Interactive Media Lab, Department of Computer Science University of Houston, Houston, TX e-mail: xiaohan@cs.uh.edu",
"fullName": "Xiaohan Ma",
"givenName": "Xiaohan",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Computer Graphics and Interactive Media Lab, Department of Computer Science University of Houston, Houston, TX e-mail: zdeng@cs.uh.edu",
"fullName": "Zhigang Deng",
"givenName": "Zhigang",
"surname": "Deng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "143-150",
"year": "2009",
"issn": "1087-8270",
"isbn": "978-1-4244-3943-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04811012",
"articleId": "12OmNy3iFtV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04811015",
"articleId": "12OmNqFa5ps",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/1991/2164/0/00128995",
"title": "Non-invasive eye-gaze position detecting method used on man/machine interface for the disabled",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1991/00128995/12OmNCgJe2T",
"parentPublication": {
"id": "proceedings/cbms/1991/2164/0",
"title": "Computer-Based Medical Systems - Proceedings of the Fourth Annual IEEE Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460306",
"title": "Head pose-free appearance-based gaze sensing via eye image synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460306/12OmNrMHOcV",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2003/1900/2/190020451",
"title": "Eye Gaze Tracking Using an Active Stereo Head",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2003/190020451/12OmNxRWI2Y",
"parentPublication": {
"id": "proceedings/cvpr/2003/1900/2",
"title": "2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b869",
"title": "Appearance-Based Gaze Tracking with Free Head Movement",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2006/2697/0/26970070",
"title": "Comparison of head gaze and head and eye gaze within an immersive environment",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2006/26970070/12OmNzFdt6h",
"parentPublication": {
"id": "proceedings/ds-rt/2006/2697/0",
"title": "Distributed Simulation and Real Time Applications, IEEE/ACM International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/11/ttg2012111902",
"title": "Live Speech Driven Head-and-Eye Motion Generators",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111902/13rRUyv53Fo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493406",
"title": "A Model for Eye and Head Motion for Virtual Agents",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493406/14tNJoD4Uxi",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1mDm1L85i",
"doi": "10.1109/CVPR52688.2022.00223",
"title": "Dynamic 3D Gaze from Afar: Deep Gaze Estimation from Temporal Eye-Head-Body Coordination",
"normalizedTitle": "Dynamic 3D Gaze from Afar: Deep Gaze Estimation from Temporal Eye-Head-Body Coordination",
"abstract": "We introduce a novel method and dataset for 3D gaze estimation of a freely moving person from a distance, typically in surveillance views. Eyes cannot be clearly seen in such cases due to occlusion and lacking resolution. Existing gaze estimation methods suffer or fall back to approximating gaze with head pose as they primarily rely on clear, close-up views of the eyes. Our key idea is to instead leverage the intrinsic gaze, head, and body coordination of people. Our method formulates gaze estimation as Bayesian prediction given temporal estimates of head and body orientations which can be reliably estimated from a far. We model the head and body orientation likelihoods and the conditional prior of gaze direction on those with separate neural networks which are then cascaded to output the 3D gaze direction. We introduce an extensive new dataset that consists of surveillance videos annotated with 3D gaze directions captured in 5 indoor and outdoor scenes. Experimental results on this and other datasets validate the accuracy of our method and demonstrate that gaze can be accurately estimated from a typical surveillance distance even when the person's face is not visible to the camera.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a novel method and dataset for 3D gaze estimation of a freely moving person from a distance, typically in surveillance views. Eyes cannot be clearly seen in such cases due to occlusion and lacking resolution. Existing gaze estimation methods suffer or fall back to approximating gaze with head pose as they primarily rely on clear, close-up views of the eyes. Our key idea is to instead leverage the intrinsic gaze, head, and body coordination of people. Our method formulates gaze estimation as Bayesian prediction given temporal estimates of head and body orientations which can be reliably estimated from a far. We model the head and body orientation likelihoods and the conditional prior of gaze direction on those with separate neural networks which are then cascaded to output the 3D gaze direction. We introduce an extensive new dataset that consists of surveillance videos annotated with 3D gaze directions captured in 5 indoor and outdoor scenes. Experimental results on this and other datasets validate the accuracy of our method and demonstrate that gaze can be accurately estimated from a typical surveillance distance even when the person's face is not visible to the camera.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a novel method and dataset for 3D gaze estimation of a freely moving person from a distance, typically in surveillance views. Eyes cannot be clearly seen in such cases due to occlusion and lacking resolution. Existing gaze estimation methods suffer or fall back to approximating gaze with head pose as they primarily rely on clear, close-up views of the eyes. Our key idea is to instead leverage the intrinsic gaze, head, and body coordination of people. Our method formulates gaze estimation as Bayesian prediction given temporal estimates of head and body orientations which can be reliably estimated from a far. We model the head and body orientation likelihoods and the conditional prior of gaze direction on those with separate neural networks which are then cascaded to output the 3D gaze direction. We introduce an extensive new dataset that consists of surveillance videos annotated with 3D gaze directions captured in 5 indoor and outdoor scenes. Experimental results on this and other datasets validate the accuracy of our method and demonstrate that gaze can be accurately estimated from a typical surveillance distance even when the person's face is not visible to the camera.",
"fno": "694600c182",
"keywords": [
"Cameras",
"Computer Vision",
"Eye",
"Face Recognition",
"Feature Extraction",
"Image Motion Analysis",
"Image Sequences",
"Neural Nets",
"Object Detection",
"Object Tracking",
"Pose Estimation",
"Robot Vision",
"Surveillance",
"Target Tracking",
"Video Signal Processing",
"Video Surveillance",
"Deep Gaze Estimation",
"Temporal Eye Head Body Coordination",
"Gaze Estimation Methods Suffer",
"Intrinsic Gaze",
"Method Formulates Gaze",
"Temporal Estimates",
"Body Orientations",
"Body Orientation Likelihoods",
"Gaze Direction",
"Solid Modeling",
"Computer Vision",
"Three Dimensional Displays",
"Surveillance",
"Face Recognition",
"Neural Networks",
"Estimation"
],
"authors": [
{
"affiliation": "Graduate School of Informatics, Kyoto University",
"fullName": "Soma Nonaka",
"givenName": "Soma",
"surname": "Nonaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Informatics, Kyoto University",
"fullName": "Shohei Nobuhara",
"givenName": "Shohei",
"surname": "Nobuhara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate School of Informatics, Kyoto University",
"fullName": "Ko Nishino",
"givenName": "Ko",
"surname": "Nishino",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "2182-2191",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1mDhI5oqs",
"name": "pcvpr202269460-09879695s1-mm_694600c182.zip",
"size": "13.8 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879695s1-mm_694600c182.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600c171",
"articleId": "1H1khxjXUnm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600c192",
"articleId": "1H1iR0NRBAc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2018/2335/0/233501a789",
"title": "Human Computer Interaction with Head Pose, Eye Gaze and Body Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a789/12OmNASILS4",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2011/0844/0/06027285",
"title": "Gaze and body pose estimation from a distance",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2011/06027285/12OmNvSKNZj",
"parentPublication": {
"id": "proceedings/avss/2011/0844/0",
"title": "2011 8th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981691",
"title": "Using eye gaze, head pose, and facial expression for personalized non-player character interaction",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981691/12OmNvT2oTP",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2010/6331/0/05460171",
"title": "Animating Gaze Shifts for Virtual Characters Based on Head Movement Propensity",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2010/05460171/12OmNyKa67Q",
"parentPublication": {
"id": "proceedings/vs-games/2010/6331/0",
"title": "2010 2nd International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2017/4941/0/07912208",
"title": "Gaze Estimation Based on Eyeball-Head Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2017/07912208/12OmNzWfoVQ",
"parentPublication": {
"id": "proceedings/wacvw/2017/4941/0",
"title": "2017 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437a792",
"title": "Person-Independent 3D Gaze Estimation Using Face Frontalization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437a792/12OmNzYwbWh",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a935",
"title": "A Deep Learning Approach to Appearance-Based Gaze Estimation under Head Pose Variations",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a935/17D45XacGif",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08643434",
"title": "SGaze: A Data-Driven Eye-Head Coordination Model for Realtime Gaze Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08643434/18K0lRIKi7m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a082",
"title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797852",
"title": "Perception of Volumetric Characters' Eye-Gaze Direction in Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797852/1cJ0UskDCRa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwNeYB5",
"doi": "10.1109/VR.2017.7892346",
"title": "Transfer of a skilled motor learning task between virtual and conventional environments",
"normalizedTitle": "Transfer of a skilled motor learning task between virtual and conventional environments",
"abstract": "Immersive, head-mounted virtual reality (HMD-VR) can be a potentially useful tool for motor rehabilitation. However, it is unclear whether the motor skills learned in HMD-VR transfer to the non-virtual world and vice-versa. Here we used a well-established test of skilled motor learning, the Sequential Visual Isometric Pinch Task (SVIPT), to train individuals in either an HMD-VR or conventional training (CT) environment. Participants were then tested in both environments. Our results show that participants who train in the CT environment have an improvement in motor performance when they transfer to the HMD-VR environment. In contrast, participants who train in the HMD-VR environment show a decrease in skill level when transferring to the CT environment. This has implications for how training in HMD-VR and CT may affect performance in different environments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive, head-mounted virtual reality (HMD-VR) can be a potentially useful tool for motor rehabilitation. However, it is unclear whether the motor skills learned in HMD-VR transfer to the non-virtual world and vice-versa. Here we used a well-established test of skilled motor learning, the Sequential Visual Isometric Pinch Task (SVIPT), to train individuals in either an HMD-VR or conventional training (CT) environment. Participants were then tested in both environments. Our results show that participants who train in the CT environment have an improvement in motor performance when they transfer to the HMD-VR environment. In contrast, participants who train in the HMD-VR environment show a decrease in skill level when transferring to the CT environment. This has implications for how training in HMD-VR and CT may affect performance in different environments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive, head-mounted virtual reality (HMD-VR) can be a potentially useful tool for motor rehabilitation. However, it is unclear whether the motor skills learned in HMD-VR transfer to the non-virtual world and vice-versa. Here we used a well-established test of skilled motor learning, the Sequential Visual Isometric Pinch Task (SVIPT), to train individuals in either an HMD-VR or conventional training (CT) environment. Participants were then tested in both environments. Our results show that participants who train in the CT environment have an improvement in motor performance when they transfer to the HMD-VR environment. In contrast, participants who train in the HMD-VR environment show a decrease in skill level when transferring to the CT environment. This has implications for how training in HMD-VR and CT may affect performance in different environments.",
"fno": "07892346",
"keywords": [
"Training",
"Logic Gates",
"Virtual Reality",
"Computed Tomography",
"Electroencephalography",
"Visualization",
"Indexes",
"Virtual Reality",
"Skilled Motor Learning",
"Transfer"
],
"authors": [
{
"affiliation": "University of Southern California, Los Angeles, California, United States of America",
"fullName": "Julia Anglin",
"givenName": "Julia",
"surname": "Anglin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Southern California, Los Angeles, California, United States of America",
"fullName": "David Saldana",
"givenName": "David",
"surname": "Saldana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Southern California, Los Angeles, California, United States of America",
"fullName": "Allie Schmiesing",
"givenName": "Allie",
"surname": "Schmiesing",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Southern California, Los Angeles, California, United States of America",
"fullName": "Sook-Lei Liew",
"givenName": "Sook-Lei",
"surname": "Liew",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "401-402",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892345",
"articleId": "12OmNzd7bOL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892347",
"articleId": "12OmNvD8RA7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892338",
"title": "REINVENT: A low-cost, virtual reality brain-computer interface for severe stroke upper limb motor recovery",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892338/12OmNxWui8h",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2018/5892/0/08466538",
"title": "Design of a VR-Based Upper Limb Gross Motor and Fine Motor Task Platform for Post-Stroke Survivors",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2018/08466538/13JkraH3ZF6",
"parentPublication": {
"id": "proceedings/icis/2018/5892/0",
"title": "2018 IEEE/ACIS 17th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446068",
"title": "Neurophysiology of Visual-Motor Learning During a Simulated Marksmanship Task in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446068/13bd1fZBGbG",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446466",
"title": "Predicting Performance During a Dynamic Target Acquisition Task in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446466/13bd1fdV4lE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642297",
"title": "Immersive Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642297/17PYElZaeVr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2022/2335/0/233500a081",
"title": "Virtual Inspection of Additively Manufactured Parts",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2022/233500a081/1E2whJdBsOI",
"parentPublication": {
"id": "proceedings/pacificvis/2022/2335/0",
"title": "2022 IEEE 15th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a592",
"title": "A Haptic Stimulation-Based Training Method to Improve the Quality of Motor Imagery EEG Signal in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a592/1MNgVlvp10Q",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798369",
"title": "Brain Activity in Virtual Reality: Assessing Signal Quality of High-Resolution EEG While Using Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798369/1cJ18Pncw9y",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2019/5604/0/560400a163",
"title": "Deep Learning on VR-Induced Attention",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2019/560400a163/1grOm5JWBY4",
"parentPublication": {
"id": "proceedings/aivr/2019/5604/0",
"title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a260",
"title": "Influence of hand visualization on tool-based motor skills training in an immersive VR simulator",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a260/1pyswAXnugM",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx7ouTZ",
"title": "Biomedical Visualization",
"acronym": "biomedvis",
"groupId": "1002317",
"volume": "0",
"displayVolume": "0",
"year": "1995",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy49sFe",
"doi": "10.1109/BIOVIS.1995.528702",
"title": "3D virtual colonoscopy",
"normalizedTitle": "3D virtual colonoscopy",
"abstract": "The authors present here a method called 3D virtual colonoscopy, which is an alternative method to existing procedures of imaging the mucosal surface of the colon. Using 3D reconstruction of helical CT data and volume visualization techniques, the authors generate images of the inner surface of the colon as if the viewer's eyes were inside the colon. They also create interactive flythroughs and off-line automatically-produced animations through the inside of the colon. The visualization is accomplished with VolVis, which is a comprehensive system for interactive volume visualization. The authors are specifically interested in visualizing colonic polyps larger than one cm since these have a high probability of containing carcinoma. The authors present testing results of their method as applied to two plastic pipe simulations and to the Visible Human data set.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors present here a method called 3D virtual colonoscopy, which is an alternative method to existing procedures of imaging the mucosal surface of the colon. Using 3D reconstruction of helical CT data and volume visualization techniques, the authors generate images of the inner surface of the colon as if the viewer's eyes were inside the colon. They also create interactive flythroughs and off-line automatically-produced animations through the inside of the colon. The visualization is accomplished with VolVis, which is a comprehensive system for interactive volume visualization. The authors are specifically interested in visualizing colonic polyps larger than one cm since these have a high probability of containing carcinoma. The authors present testing results of their method as applied to two plastic pipe simulations and to the Visible Human data set.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors present here a method called 3D virtual colonoscopy, which is an alternative method to existing procedures of imaging the mucosal surface of the colon. Using 3D reconstruction of helical CT data and volume visualization techniques, the authors generate images of the inner surface of the colon as if the viewer's eyes were inside the colon. They also create interactive flythroughs and off-line automatically-produced animations through the inside of the colon. The visualization is accomplished with VolVis, which is a comprehensive system for interactive volume visualization. The authors are specifically interested in visualizing colonic polyps larger than one cm since these have a high probability of containing carcinoma. The authors present testing results of their method as applied to two plastic pipe simulations and to the Visible Human data set.",
"fno": "71980026",
"keywords": [
"Image Reconstruction Medical Image Processing Computerised Tomography 3 D Virtual Colonoscopy Colon Mucosal Surface Imaging 3 D Reconstruction Helical CT Data Volume Visualization Colon Inner Surface Images Generation Medical Diagnostic Imaging Interactive Flythroughs Off Line Automatically Produced Animations Vol Vis Colonic Polyps Carcinoma Plastic Pipe Simulations Visible Human Data Set 1 Cm"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., State Univ. of New York, Stony Brook, NY, USA",
"fullName": "Lichan Hong",
"givenName": "Lichan",
"surname": "Hong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., State Univ. of New York, Stony Brook, NY, USA",
"fullName": "A. Kaufman",
"givenName": "A.",
"surname": "Kaufman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., State Univ. of New York, Stony Brook, NY, USA",
"fullName": "Yi-Chih Wei",
"givenName": "Yi-Chih",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., State Univ. of New York, Stony Brook, NY, USA",
"fullName": "A. Viswambharan",
"givenName": "A.",
"surname": "Viswambharan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., State Univ. of New York, Stony Brook, NY, USA",
"fullName": "M. Wax",
"givenName": "M.",
"surname": "Wax",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., State Univ. of New York, Stony Brook, NY, USA",
"fullName": "Zhengrong Liang",
"givenName": "Zhengrong",
"surname": "Liang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "biomedvis",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1995-10-01T00:00:00",
"pubType": "proceedings",
"pages": "26",
"year": "1995",
"issn": null,
"isbn": "0-8186-7198-X",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "71980018",
"articleId": "12OmNx3Zjjf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "71980033",
"articleId": "12OmNzayNjg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNx8wTfS",
"title": "2014 IEEE VIS International Workshop on 3DVis (3DVis)",
"acronym": "3dvis",
"groupId": "1807724",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNylKB4e",
"doi": "10.1109/3DVis.2014.7160105",
"title": "Benefits of 3D immersion for virtual colonoscopy",
"normalizedTitle": "Benefits of 3D immersion for virtual colonoscopy",
"abstract": "Virtual Colonoscopy (VC) is a non-invasive clinical procedure that detects colon cancer in humans. VC seeks to supplement and improve the compliance rates for diagnosed patients, since the traditional optical colonoscopy is more painful, and less effective for cancer detection. In this paper, we discuss the benefits of using a 3D immersive user interface for VC. We discuss various design choices that we can make for such a design, leveraging the effects of the various combinations of virtual reality (VR) system components, as in previous VR empirical studies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Colonoscopy (VC) is a non-invasive clinical procedure that detects colon cancer in humans. VC seeks to supplement and improve the compliance rates for diagnosed patients, since the traditional optical colonoscopy is more painful, and less effective for cancer detection. In this paper, we discuss the benefits of using a 3D immersive user interface for VC. We discuss various design choices that we can make for such a design, leveraging the effects of the various combinations of virtual reality (VR) system components, as in previous VR empirical studies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Colonoscopy (VC) is a non-invasive clinical procedure that detects colon cancer in humans. VC seeks to supplement and improve the compliance rates for diagnosed patients, since the traditional optical colonoscopy is more painful, and less effective for cancer detection. In this paper, we discuss the benefits of using a 3D immersive user interface for VC. We discuss various design choices that we can make for such a design, leveraging the effects of the various combinations of virtual reality (VR) system components, as in previous VR empirical studies.",
"fno": "07160105",
"keywords": [
"Colon",
"Virtual Colonoscopy",
"Three Dimensional Displays",
"Visualization",
"Data Visualization",
"Shape",
"Navigation",
"Display Fidelity",
"Immersion",
"Virtual Reality",
"3 D Visualization",
"Virtual Colonoscopy",
"3 D Interaction",
"System Fidelity"
],
"authors": [
{
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"fullName": "Koosha Mirhosseini",
"givenName": "Koosha",
"surname": "Mirhosseini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"fullName": "Qi Sun",
"givenName": "Qi",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"fullName": "Krishna C. Gurijala",
"givenName": "Krishna C.",
"surname": "Gurijala",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"fullName": "Bireswar Laha",
"givenName": "Bireswar",
"surname": "Laha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, Stony Brook University, USA",
"fullName": "Arie E. Kaufman",
"givenName": "Arie E.",
"surname": "Kaufman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dvis",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "75-79",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6826-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07160104",
"articleId": "12OmNxwWoB8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07160106",
"articleId": "12OmNywxlNC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cbms/2011/1189/0/05999137",
"title": "Computer-aided detection of retroflexion in colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999137/12OmNAm4TK4",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/3/01394625",
"title": "A framework for parsing colonoscopy videos for semantic units",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394625/12OmNvD8RFG",
"parentPublication": {
"id": "proceedings/icme/2004/8603/3",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883508",
"title": "C2A: Crowd consensus analytics for virtual colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883508/12OmNvTBB47",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2009/3656/0/05211317",
"title": "3D Reconstruction of Colon Segments from Colonoscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2009/05211317/12OmNxXCGFm",
"parentPublication": {
"id": "proceedings/bibe/2009/3656/0",
"title": "2009 Ninth IEEE International Conference on Bioinformatics and Bioengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biomedvis/1995/7198/0/71980026",
"title": "3D virtual colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/biomedvis/1995/71980026/12OmNy49sFe",
"parentPublication": {
"id": "proceedings/biomedvis/1995/7198/0",
"title": "Biomedical Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0885",
"title": "Lines of Curvature for Polyp Detection in Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0885/13rRUyuegoZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aike/2018/9555/0/955500a208",
"title": "Colorectal Segmentation Using Multiple Encoder-Decoder Network in Colonoscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/aike/2018/955500a208/17D45Wuc39Z",
"parentPublication": {
"id": "proceedings/aike/2018/9555/0",
"title": "2018 IEEE First International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642297",
"title": "Immersive Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642297/17PYElZaeVr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e695",
"title": "Augmenting Colonoscopy Using Extended and Directional CycleGAN for Lossy Image Translation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e695/1m3o8Getc4w",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2020/9899/0/09224725",
"title": "Automatic Identification of Appendiceal Orifice on Colonoscopy Images Using Deep Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2020/09224725/1nWNY45uaac",
"parentPublication": {
"id": "proceedings/ewdts/2020/9899/0",
"title": "2020 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxogcnA3K",
"doi": "10.1109/VRW50115.2020.00225",
"title": "Improving Camera Travel for Immersive Colonography",
"normalizedTitle": "Improving Camera Travel for Immersive Colonography",
"abstract": "Colonography allows radiologists to navigate intricate subject-specific 3D colon images. Typically, travel is performed via Fly-Through or Fly-Over techniques that enable semi-automatic traveling through a constrained, well-defined path. While these techniques have been studied in non-VR desktop environments, their performance is yet not well understood in VR setups. In this paper, we study the effect of both techniques in immersive colonography and introduce the Elevator technique, which maintains a fixed camera orientation throughout navigation. Results suggest Fly-Over was overall the best for lesion detection at the cost of slower procedures, while Fly-Through may offer a more balanced trade-off between speed and effectiveness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Colonography allows radiologists to navigate intricate subject-specific 3D colon images. Typically, travel is performed via Fly-Through or Fly-Over techniques that enable semi-automatic traveling through a constrained, well-defined path. While these techniques have been studied in non-VR desktop environments, their performance is yet not well understood in VR setups. In this paper, we study the effect of both techniques in immersive colonography and introduce the Elevator technique, which maintains a fixed camera orientation throughout navigation. Results suggest Fly-Over was overall the best for lesion detection at the cost of slower procedures, while Fly-Through may offer a more balanced trade-off between speed and effectiveness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Colonography allows radiologists to navigate intricate subject-specific 3D colon images. Typically, travel is performed via Fly-Through or Fly-Over techniques that enable semi-automatic traveling through a constrained, well-defined path. While these techniques have been studied in non-VR desktop environments, their performance is yet not well understood in VR setups. In this paper, we study the effect of both techniques in immersive colonography and introduce the Elevator technique, which maintains a fixed camera orientation throughout navigation. Results suggest Fly-Over was overall the best for lesion detection at the cost of slower procedures, while Fly-Through may offer a more balanced trade-off between speed and effectiveness.",
"fno": "09090498",
"keywords": [
"Cameras",
"Colon",
"Elevators",
"Task Analysis",
"Three Dimensional Displays",
"Navigation",
"Colonography"
],
"authors": [
{
"affiliation": "INESC-ID Lisboa",
"fullName": "Soraia F. Paulo",
"givenName": "Soraia F.",
"surname": "Paulo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Victoria University of Wellington,CMIC",
"fullName": "Daniel Medeiros",
"givenName": "Daniel",
"surname": "Medeiros",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Instituto Superior Técnico,ULisboa",
"fullName": "Pedro B. Borges",
"givenName": "Pedro B.",
"surname": "Borges",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INESC-ID Lisboa",
"fullName": "Joaquim Jorge",
"givenName": "Joaquim",
"surname": "Jorge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INESC-ID Lisboa",
"fullName": "Daniel S. Lopes",
"givenName": "Daniel S.",
"surname": "Lopes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "748-749",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090668",
"articleId": "1jIxwUJWcGA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090662",
"articleId": "1jIxmuXW5Es",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2008/2174/0/04760969",
"title": "Detection of anatomical landmarks in human colon from computed tomographic colonography images",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04760969/12OmNC8MsH2",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892295",
"title": "Gauntlet: Travel technique for immersive environments using non-dominant hand",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892295/12OmNCxbXAC",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04760992",
"title": "Matching colonic polyps from prone and supine CT colonography scans based on statistical curvature information",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04760992/12OmNqzu6IB",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04760993",
"title": "Thin layer tissue classification for electronic cleansing of CT colonography data",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04760993/12OmNwMFMm0",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532806",
"title": "Teniae coli guided navigation and registration for virtual colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532806/12OmNzUPpfH",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imvip/2011/0230/0/06167847",
"title": "A Fast and Accurate Method for Automatic Segmentation of Colons at CT Colonography Based on Colon Geometrical Features",
"doi": null,
"abstractUrl": "/proceedings-article/imvip/2011/06167847/12OmNzYeAUw",
"parentPublication": {
"id": "proceedings/imvip/2011/0230/0",
"title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642297",
"title": "Immersive Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642297/17PYElZaeVr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a238",
"title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2022/9558/0/09806115",
"title": "A 2-stages feature selection framework for colon cancer classification using SVM",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2022/09806115/1EBWtiiFbsQ",
"parentPublication": {
"id": "proceedings/iscv/2022/9558/0",
"title": "2022 International Conference on Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e695",
"title": "Augmenting Colonoscopy Using Extended and Directional CycleGAN for Lossy Image Translation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e695/1m3o8Getc4w",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3o8Getc4w",
"doi": "10.1109/CVPR42600.2020.00475",
"title": "Augmenting Colonoscopy Using Extended and Directional CycleGAN for Lossy Image Translation",
"normalizedTitle": "Augmenting Colonoscopy Using Extended and Directional CycleGAN for Lossy Image Translation",
"abstract": "Colorectal cancer screening modalities, such as optical colonoscopy (OC) and virtual colonoscopy (VC), are critical for diagnosing and ultimately removing polyps (precursors for colon cancer). The non-invasive VC is normally used to inspect a 3D reconstructed colon (from computed tomography scans) for polyps and if found, the OC procedure is performed to physically traverse the colon via endoscope and remove these polyps. In this paper, we present a deep learning framework, Extended and Directional CycleGAN, for lossy unpaired image-to-image translation between OC and VC to augment OC video sequences with scale-consistent depth information from VC and VC with patient-specific textures, color and specular highlights from OC (e.g. for realistic polyp synthesis). Both OC and VC contain structural information, but it is obscured in OC by additional patient-specific texture and specular highlights, hence making the translation from OC to VC lossy. The existing CycleGAN approaches do not handle lossy transformations. To address this shortcoming, we introduce an extended cycle consistency loss, which compares the geometric structures from OC in the VC domain. This loss removes the need for the CycleGAN to embed OC information in the VC domain. To handle a stronger removal of the textures and lighting, a Directional Discriminator is introduced to differentiate the direction of translation (by creating paired information for the discriminator), as opposed to the standard CycleGAN which is direction-agnostic. Combining the extended cycle consistency loss and the Directional Discriminator, we show state-of-the-art results on scale-consistent depth inference for phantom, textured VC and for real polyp and normal colon video sequences. We also present results for realistic pendunculated and flat polyp synthesis from bumps introduced in 3D VC models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Colorectal cancer screening modalities, such as optical colonoscopy (OC) and virtual colonoscopy (VC), are critical for diagnosing and ultimately removing polyps (precursors for colon cancer). The non-invasive VC is normally used to inspect a 3D reconstructed colon (from computed tomography scans) for polyps and if found, the OC procedure is performed to physically traverse the colon via endoscope and remove these polyps. In this paper, we present a deep learning framework, Extended and Directional CycleGAN, for lossy unpaired image-to-image translation between OC and VC to augment OC video sequences with scale-consistent depth information from VC and VC with patient-specific textures, color and specular highlights from OC (e.g. for realistic polyp synthesis). Both OC and VC contain structural information, but it is obscured in OC by additional patient-specific texture and specular highlights, hence making the translation from OC to VC lossy. The existing CycleGAN approaches do not handle lossy transformations. To address this shortcoming, we introduce an extended cycle consistency loss, which compares the geometric structures from OC in the VC domain. This loss removes the need for the CycleGAN to embed OC information in the VC domain. To handle a stronger removal of the textures and lighting, a Directional Discriminator is introduced to differentiate the direction of translation (by creating paired information for the discriminator), as opposed to the standard CycleGAN which is direction-agnostic. Combining the extended cycle consistency loss and the Directional Discriminator, we show state-of-the-art results on scale-consistent depth inference for phantom, textured VC and for real polyp and normal colon video sequences. We also present results for realistic pendunculated and flat polyp synthesis from bumps introduced in 3D VC models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Colorectal cancer screening modalities, such as optical colonoscopy (OC) and virtual colonoscopy (VC), are critical for diagnosing and ultimately removing polyps (precursors for colon cancer). The non-invasive VC is normally used to inspect a 3D reconstructed colon (from computed tomography scans) for polyps and if found, the OC procedure is performed to physically traverse the colon via endoscope and remove these polyps. In this paper, we present a deep learning framework, Extended and Directional CycleGAN, for lossy unpaired image-to-image translation between OC and VC to augment OC video sequences with scale-consistent depth information from VC and VC with patient-specific textures, color and specular highlights from OC (e.g. for realistic polyp synthesis). Both OC and VC contain structural information, but it is obscured in OC by additional patient-specific texture and specular highlights, hence making the translation from OC to VC lossy. The existing CycleGAN approaches do not handle lossy transformations. To address this shortcoming, we introduce an extended cycle consistency loss, which compares the geometric structures from OC in the VC domain. This loss removes the need for the CycleGAN to embed OC information in the VC domain. To handle a stronger removal of the textures and lighting, a Directional Discriminator is introduced to differentiate the direction of translation (by creating paired information for the discriminator), as opposed to the standard CycleGAN which is direction-agnostic. Combining the extended cycle consistency loss and the Directional Discriminator, we show state-of-the-art results on scale-consistent depth inference for phantom, textured VC and for real polyp and normal colon video sequences. We also present results for realistic pendunculated and flat polyp synthesis from bumps introduced in 3D VC models.",
"fno": "716800e695",
"keywords": [
"Biological Organs",
"Biomedical Optical Imaging",
"Cancer",
"Endoscopes",
"Image Reconstruction",
"Image Texture",
"Learning Artificial Intelligence",
"Medical Image Processing",
"Phantoms",
"Virtual Reality",
"Endoscope",
"Computed Tomography Scans",
"Deep Learning Framework",
"Patient Specific Textures",
"Optical Colonoscopy",
"Directional Discriminator",
"Extended Cycle GAN",
"Directional Cycle GAN",
"Patient Specific Texture",
"Textured Virtual Colonoscopy",
"Colon Video Sequences",
"Flat Polyp Synthesis",
"Realistic Pendunculated Polyp Synthesis",
"Extended Cycle Consistency Loss",
"Scale Consistent Depth Information",
"OC Video Sequences",
"3 D Reconstructed Colon",
"Colorectal Cancer Screening Modalities",
"Lossy Image Translation",
"Colon",
"Three Dimensional Displays",
"Gallium Nitride",
"Image Reconstruction",
"Cancer",
"Endoscopes",
"Machine Learning"
],
"authors": [
{
"affiliation": "Stony Brook University",
"fullName": "Shawn Mathew",
"givenName": "Shawn",
"surname": "Mathew",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Memorial Sloan Kettering Cancer Center",
"fullName": "Saad Nadeem",
"givenName": "Saad",
"surname": "Nadeem",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stony Brook University",
"fullName": "Sruti Kumari",
"givenName": "Sruti",
"surname": "Kumari",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stony Brook University",
"fullName": "Arie Kaufman",
"givenName": "Arie",
"surname": "Kaufman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4695-4704",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800e685",
"articleId": "1m3ngsvU9zO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800e705",
"articleId": "1m3nfPabQBi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/27660036",
"title": "Teniae Coli Guided Navigation and Registration for Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660036/12OmNAq3hQF",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icraect/2017/6701/0/6701a005",
"title": "Principal Curvature Based Polyp Detection in Wireless Capsule Endoscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/icraect/2017/6701a005/12OmNwGZNGI",
"parentPublication": {
"id": "proceedings/icraect/2017/6701/0",
"title": "2017 International Conference on Recent Advances in Electronics and Communication Technology (ICRAECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2009/3656/0/05211317",
"title": "3D Reconstruction of Colon Segments from Colonoscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2009/05211317/12OmNxXCGFm",
"parentPublication": {
"id": "proceedings/bibe/2009/3656/0",
"title": "2009 Ninth IEEE International Conference on Bioinformatics and Bioengineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biomedvis/1995/7198/0/71980026",
"title": "3D virtual colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/biomedvis/1995/71980026/12OmNy49sFe",
"parentPublication": {
"id": "proceedings/biomedvis/1995/7198/0",
"title": "Biomedical Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dvis/2014/6826/0/07160105",
"title": "Benefits of 3D immersion for virtual colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/3dvis/2014/07160105/12OmNylKB4e",
"parentPublication": {
"id": "proceedings/3dvis/2014/6826/0",
"title": "2014 IEEE VIS International Workshop on 3DVis (3DVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532806",
"title": "Teniae coli guided navigation and registration for virtual colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532806/12OmNzUPpfH",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0861",
"title": "A Pipeline for Computer Aided Polyp Detection",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0861/13rRUIJuxpp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642297",
"title": "Immersive Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642297/17PYElZaeVr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2018/1360/0/136000b460",
"title": "Voice Conversion Using Conditional CycleGAN",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2018/136000b460/1gjRCuY0w6c",
"parentPublication": {
"id": "proceedings/csci/2018/1360/0",
"title": "2018 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400b012",
"title": "Segmentation and Volumetric Analysis of Colon Wall for Detection of Flat Polyp Candidates via CT Colonography",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400b012/1jdDVcUA31m",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJbHdnVzd6",
"doi": "10.1109/VR51125.2022.00086",
"title": "Optimal Pose Guided Redirected Walking with Pose Score Precomputation",
"normalizedTitle": "Optimal Pose Guided Redirected Walking with Pose Score Precomputation",
"abstract": "Redirected walking (RDW) aims to reduce the collisions in the physical space for VR applications. However, most of the previous RDW methods do not consider future possibilities of collisions after imperceptibly redirecting users. In this paper, we combine the subtle RDW methods and reset strategy in our method design and propose a novel solution for RDW that can make better use of physical space and trigger fewer resets. The key idea of our method is to discretize the representation of possible user positions and orientations by a series of standard poses and rate them based on the possibilities of hitting obstacles of their reachable poses. A transfer path algorithm is proposed to measure the accessibility among standard poses and is used to support the calculation of the scores of standard poses. Using our method, the user can be redirected imperceptibly to the optimal pose with the best score among all the reachable poses from the user’s current pose during walking. Experiments demonstrate that our method outperforms state-of-the-art methods in various environment sizes and obstacle layouts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking (RDW) aims to reduce the collisions in the physical space for VR applications. However, most of the previous RDW methods do not consider future possibilities of collisions after imperceptibly redirecting users. In this paper, we combine the subtle RDW methods and reset strategy in our method design and propose a novel solution for RDW that can make better use of physical space and trigger fewer resets. The key idea of our method is to discretize the representation of possible user positions and orientations by a series of standard poses and rate them based on the possibilities of hitting obstacles of their reachable poses. A transfer path algorithm is proposed to measure the accessibility among standard poses and is used to support the calculation of the scores of standard poses. Using our method, the user can be redirected imperceptibly to the optimal pose with the best score among all the reachable poses from the user’s current pose during walking. Experiments demonstrate that our method outperforms state-of-the-art methods in various environment sizes and obstacle layouts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking (RDW) aims to reduce the collisions in the physical space for VR applications. However, most of the previous RDW methods do not consider future possibilities of collisions after imperceptibly redirecting users. In this paper, we combine the subtle RDW methods and reset strategy in our method design and propose a novel solution for RDW that can make better use of physical space and trigger fewer resets. The key idea of our method is to discretize the representation of possible user positions and orientations by a series of standard poses and rate them based on the possibilities of hitting obstacles of their reachable poses. A transfer path algorithm is proposed to measure the accessibility among standard poses and is used to support the calculation of the scores of standard poses. Using our method, the user can be redirected imperceptibly to the optimal pose with the best score among all the reachable poses from the user’s current pose during walking. Experiments demonstrate that our method outperforms state-of-the-art methods in various environment sizes and obstacle layouts.",
"fno": "961700a655",
"keywords": [
"Collision Avoidance",
"Mobile Robots",
"Pose Estimation",
"Virtual Reality",
"Optimal Pose Guided Redirected Walking",
"Pose Score Precomputation",
"Physical Space",
"VR Applications",
"Previous RDW Methods",
"Future Possibilities",
"Subtle RDW Methods",
"Method Design",
"Trigger Fewer Resets",
"Possible User Positions",
"Standard Poses",
"Reachable Poses",
"Legged Locomotion",
"Three Dimensional Displays",
"Design Methodology",
"Current Measurement",
"Conferences",
"Layout",
"Virtual Reality",
"Computing Methodologies",
"Computer Graphics",
"Graphics Systems And Interfaces",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Tsinghua University,BIMSA",
"fullName": "Sen-Zhe Xu",
"givenName": "Sen-Zhe",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Tian Lv",
"givenName": "Tian",
"surname": "Lv",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Guangrong He",
"givenName": "Guangrong",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University",
"fullName": "Chia-Hao Chen",
"givenName": "Chia-Hao",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Victoria University of Wellington",
"fullName": "Fang-Lue Zhang",
"givenName": "Fang-Lue",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tsinghua University,BNRist",
"fullName": "Song-Hai Zhang",
"givenName": "Song-Hai",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "655-663",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJbH6tARVK",
"name": "pvr202296170-09756797s1-mm_961700a655.zip",
"size": "27.2 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756797s1-mm_961700a655.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a644",
"articleId": "1CJclx5Qacw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a664",
"articleId": "1CJc6OEy8WA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/01/mcg2013010006",
"title": "Using Perceptual Illusions for Redirected Walking",
"doi": null,
"abstractUrl": "/magazine/cg/2013/01/mcg2013010006/13rRUB6SpRZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a550",
"title": "Where are you? Influence of Redirected Walking on Audio-Visual Position Estimation of Co-Located Users",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a550/1tnWDmPDtHG",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a498",
"title": "Redirected Walking using Noisy Galvanic Vestibular Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a498/1yeCU92Xt5K",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxfFs8qgo",
"doi": "10.1109/VR46266.2020.00033",
"title": "Dynamic Artificial Potential Fields for Multi-User Redirected Walking",
"normalizedTitle": "Dynamic Artificial Potential Fields for Multi-User Redirected Walking",
"abstract": "Immersive Virtual Reality (VR) systems that combine Head Mounted Displays (HMDs) and a position tracking system support the multiple users or participants to collaborate in the same physical space for a large-scale virtual environment. Because the multiple users sharing the same physical space are in a dynamic state, the key technique of multi-user VR system is how to solve the problem of potential collisions among the users who are moving both virtually and physically. In order to better solve the collision problem caused by such dynamic changes, this work presents a new strategy of multi-user redirected walking using dynamic artificial potential fields, which generates repulsion to ‘push’ users away from obstacles and other users, and uses gravity to ‘attract’ users to an open or unobstructed space. In this method, the users not only get repulsive forces from walls, but also from other users and their future states that are called avatars. At the same time, the users will get gravitational force from the steering target. The target selection considers the size of open space, the distance between the steering target and the boundary of physical space, and the distance between the steering target and the center of the physical space. Therefore, the system can steer users to an open area in the physical space to further reduce collisions. To verify the validity of our method, we developed a software to statistically analyze the influence of different factors, such as the physical space size and the number of users. Data from experiments shows that our method reduces the potential user resets by about 20%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive Virtual Reality (VR) systems that combine Head Mounted Displays (HMDs) and a position tracking system support the multiple users or participants to collaborate in the same physical space for a large-scale virtual environment. Because the multiple users sharing the same physical space are in a dynamic state, the key technique of multi-user VR system is how to solve the problem of potential collisions among the users who are moving both virtually and physically. In order to better solve the collision problem caused by such dynamic changes, this work presents a new strategy of multi-user redirected walking using dynamic artificial potential fields, which generates repulsion to ‘push’ users away from obstacles and other users, and uses gravity to ‘attract’ users to an open or unobstructed space. In this method, the users not only get repulsive forces from walls, but also from other users and their future states that are called avatars. At the same time, the users will get gravitational force from the steering target. The target selection considers the size of open space, the distance between the steering target and the boundary of physical space, and the distance between the steering target and the center of the physical space. Therefore, the system can steer users to an open area in the physical space to further reduce collisions. To verify the validity of our method, we developed a software to statistically analyze the influence of different factors, such as the physical space size and the number of users. Data from experiments shows that our method reduces the potential user resets by about 20%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive Virtual Reality (VR) systems that combine Head Mounted Displays (HMDs) and a position tracking system support the multiple users or participants to collaborate in the same physical space for a large-scale virtual environment. Because the multiple users sharing the same physical space are in a dynamic state, the key technique of multi-user VR system is how to solve the problem of potential collisions among the users who are moving both virtually and physically. In order to better solve the collision problem caused by such dynamic changes, this work presents a new strategy of multi-user redirected walking using dynamic artificial potential fields, which generates repulsion to ‘push’ users away from obstacles and other users, and uses gravity to ‘attract’ users to an open or unobstructed space. In this method, the users not only get repulsive forces from walls, but also from other users and their future states that are called avatars. At the same time, the users will get gravitational force from the steering target. The target selection considers the size of open space, the distance between the steering target and the boundary of physical space, and the distance between the steering target and the center of the physical space. Therefore, the system can steer users to an open area in the physical space to further reduce collisions. To verify the validity of our method, we developed a software to statistically analyze the influence of different factors, such as the physical space size and the number of users. Data from experiments shows that our method reduces the potential user resets by about 20%.",
"fno": "09089569",
"keywords": [
"Legged Locomotion",
"Space Vehicles",
"Virtual Environments",
"Force",
"Heuristic Algorithms",
"Prediction Algorithms",
"Redirected Walking",
"Virtual Reality",
"Head Mounted Display",
"Multiple Users",
"Virtual Roaming"
],
"authors": [
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Tianyang Dong",
"givenName": "Tianyang",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Xianwei Chen",
"givenName": "Xianwei",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Yifan Song",
"givenName": "Yifan",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Wenyuan Ying",
"givenName": "Wenyuan",
"surname": "Ying",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Jing Fan",
"givenName": "Jing",
"surname": "Fan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "146-154",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089579",
"articleId": "1jIx7XMm676",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089532",
"articleId": "1jIx7m6wYKc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892235",
"title": "An evaluation of strategies for two-user redirected walking in shared physical spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892235/12OmNy87Qwg",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798118",
"title": "PReWAP: Predictive Redirected Walking Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798118/1cJ0XGXV02s",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797983",
"title": "A General Reactive Algorithm for Redirected Walking Using Artificial Potential Functions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797983/1cJ12ULGPzq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797818",
"title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a626",
"title": "Dynamic Density-based Redirected Walking Towards Multi-user Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a626/1tuAz6T7lXG",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAz6T7lXG",
"doi": "10.1109/VR50410.2021.00088",
"title": "Dynamic Density-based Redirected Walking Towards Multi-user Virtual Environments",
"normalizedTitle": "Dynamic Density-based Redirected Walking Towards Multi-user Virtual Environments",
"abstract": "With more attention being paid to large scale virtual environments, the demand for more users to collaborate in the same physical space is growing rapidly. Due to the complex collision problem caused by the limitation of physical space, the multi-user redirected walking methods are devoted to improving the ability of multi-user navigation in large-scale virtual environments by reducing the disturbance of resets. Because the existing multi-user redirected walking methods do not consider the density of users in the physical space, there are more boundary conflicts in the real walking for the multi-user virtual environment. In order to decrease the boundary conflicts, this paper presents a novel method of dynamic density-based redirected walking towards multi-user virtual environments. This method dynamically adjusts the user distribution to a state with high center density and low boundary density through the density force, which is generated by the density difference between standard density and actual density. In our method, the users in high-density areas are guided by a repulsive force away from the central area while the users in low-density areas are guided by the gravitational forces towards the central area. Our method can select a double-density optimal gravitational point as the turning target, so all users can move to the area of minimum density to make better use of the whole physical space. Our method also adopts the artificial potential field (APF) forces to prevent user collisions caused by usergathering. The users are guided to move in the direction of the resultant force vector of density force, gravitational force and APF force. In addition, this paper introduces a matching resetting method to further adjust the density distribution while dealing with user conflicts. The results of experiments show that our method successfully reduces the potential conflicts about 30% on average compared with the existing reactive multi-user redirection algorithms. Especially as the number of users increases, our method can avoid more boundary conflicts by using the adjustment of density.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With more attention being paid to large scale virtual environments, the demand for more users to collaborate in the same physical space is growing rapidly. Due to the complex collision problem caused by the limitation of physical space, the multi-user redirected walking methods are devoted to improving the ability of multi-user navigation in large-scale virtual environments by reducing the disturbance of resets. Because the existing multi-user redirected walking methods do not consider the density of users in the physical space, there are more boundary conflicts in the real walking for the multi-user virtual environment. In order to decrease the boundary conflicts, this paper presents a novel method of dynamic density-based redirected walking towards multi-user virtual environments. This method dynamically adjusts the user distribution to a state with high center density and low boundary density through the density force, which is generated by the density difference between standard density and actual density. In our method, the users in high-density areas are guided by a repulsive force away from the central area while the users in low-density areas are guided by the gravitational forces towards the central area. Our method can select a double-density optimal gravitational point as the turning target, so all users can move to the area of minimum density to make better use of the whole physical space. Our method also adopts the artificial potential field (APF) forces to prevent user collisions caused by usergathering. The users are guided to move in the direction of the resultant force vector of density force, gravitational force and APF force. In addition, this paper introduces a matching resetting method to further adjust the density distribution while dealing with user conflicts. The results of experiments show that our method successfully reduces the potential conflicts about 30% on average compared with the existing reactive multi-user redirection algorithms. Especially as the number of users increases, our method can avoid more boundary conflicts by using the adjustment of density.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With more attention being paid to large scale virtual environments, the demand for more users to collaborate in the same physical space is growing rapidly. Due to the complex collision problem caused by the limitation of physical space, the multi-user redirected walking methods are devoted to improving the ability of multi-user navigation in large-scale virtual environments by reducing the disturbance of resets. Because the existing multi-user redirected walking methods do not consider the density of users in the physical space, there are more boundary conflicts in the real walking for the multi-user virtual environment. In order to decrease the boundary conflicts, this paper presents a novel method of dynamic density-based redirected walking towards multi-user virtual environments. This method dynamically adjusts the user distribution to a state with high center density and low boundary density through the density force, which is generated by the density difference between standard density and actual density. In our method, the users in high-density areas are guided by a repulsive force away from the central area while the users in low-density areas are guided by the gravitational forces towards the central area. Our method can select a double-density optimal gravitational point as the turning target, so all users can move to the area of minimum density to make better use of the whole physical space. Our method also adopts the artificial potential field (APF) forces to prevent user collisions caused by usergathering. The users are guided to move in the direction of the resultant force vector of density force, gravitational force and APF force. In addition, this paper introduces a matching resetting method to further adjust the density distribution while dealing with user conflicts. The results of experiments show that our method successfully reduces the potential conflicts about 30% on average compared with the existing reactive multi-user redirection algorithms. Especially as the number of users increases, our method can avoid more boundary conflicts by using the adjustment of density.",
"fno": "255600a626",
"keywords": [
"Collision Avoidance",
"Virtual Reality",
"Density Distribution",
"User Conflicts",
"Users Increases",
"Boundary Conflicts",
"Dynamic Density",
"Multiuser Virtual Environment",
"Physical Space",
"Walking Methods",
"Multiuser Navigation",
"Large Scale Virtual Environments",
"User Distribution",
"High Center Density",
"Low Boundary Density",
"Density Force",
"Density Difference",
"High Density Areas",
"Low Density Areas",
"Gravitational Force",
"Double Density Optimal Gravitational Point",
"User Collisions",
"Reactive Multiuser Redirection",
"Artificial Potential Field Forces",
"Legged Locomotion",
"Three Dimensional Displays",
"Navigation",
"Heuristic Algorithms",
"Force",
"Virtual Environments",
"User Interfaces",
"Virtual Reality VR",
"Redirected Walking RDW",
"Multiple Users",
"Artificial Potential Field",
"Collision Avoidance"
],
"authors": [
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Tianyang Dong",
"givenName": "Tianyang",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Yue Shen",
"givenName": "Yue",
"surname": "Shen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Tieqi Gao",
"givenName": "Tieqi",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University of Technology,China",
"fullName": "Jing Fan",
"givenName": "Jing",
"surname": "Fan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "626-634",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "255600a616",
"articleId": "1tuAWRhj1Uk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a635",
"articleId": "1tuAUYLfJyo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504745",
"title": "Acoustic redirected walking with auditory cues by means of wave field synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504745/12OmNxYtu4K",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089569",
"title": "Dynamic Artificial Potential Fields for Multi-User Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089569/1jIxfFs8qgo",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382909",
"title": "ARC: Alignment-based Redirection Controller for Redirected Walking in Complex Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382909/1saZt58Vwf6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzw8jgZ",
"title": "2011 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8MsBR",
"doi": "10.1109/VR.2011.5759454",
"title": "Velocity-dependent dynamic curvature gain for redirected walking",
"normalizedTitle": "Velocity-dependent dynamic curvature gain for redirected walking",
"abstract": "The aim of Redirected Walking (RDW) is to redirect a person along their path of travel in a Virtual Environment (VE) in order to increase the virtual space that can be explored in a given tracked area. Among other techniques, the user is redirected on a curved real-world path while visually walking straight in the VE (curvature gain). In this paper, we describe two experiments we conducted to test and extend RDW techniques. In Experiment 1, we measured the effect of walking speed on the detection threshold for curvature of the walking path. In a head-mounted display (HMD) VE, we found a decreased sensitivity for curvature for the slowest walking speed. When participants walked at 0.75 m/s, their detection threshold was approximately 0.1m",
"abstracts": [
{
"abstractType": "Regular",
"content": "The aim of Redirected Walking (RDW) is to redirect a person along their path of travel in a Virtual Environment (VE) in order to increase the virtual space that can be explored in a given tracked area. Among other techniques, the user is redirected on a curved real-world path while visually walking straight in the VE (curvature gain). In this paper, we describe two experiments we conducted to test and extend RDW techniques. In Experiment 1, we measured the effect of walking speed on the detection threshold for curvature of the walking path. In a head-mounted display (HMD) VE, we found a decreased sensitivity for curvature for the slowest walking speed. When participants walked at 0.75 m/s, their detection threshold was approximately 0.1m",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The aim of Redirected Walking (RDW) is to redirect a person along their path of travel in a Virtual Environment (VE) in order to increase the virtual space that can be explored in a given tracked area. Among other techniques, the user is redirected on a curved real-world path while visually walking straight in the VE (curvature gain). In this paper, we describe two experiments we conducted to test and extend RDW techniques. In Experiment 1, we measured the effect of walking speed on the detection threshold for curvature of the walking path. In a head-mounted display (HMD) VE, we found a decreased sensitivity for curvature for the slowest walking speed. When participants walked at 0.75 m/s, their detection threshold was approximately 0.1m",
"fno": "05759454",
"keywords": [
"Mean Walked Distance",
"Velocity Dependent Dynamic Curvature Gain",
"Redirected Walking",
"Virtual Environment",
"Head Mounted Display",
"Virtual City Model",
"Dynamic Gain Controller",
"Avatar Controller",
"Virtual Space"
],
"authors": [
{
"affiliation": "Max Planck Inst. for Biol. Cybern., Reutlingen Univ., Reutlingen, Germany",
"fullName": "C T Neth",
"givenName": "C T",
"surname": "Neth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Inst. for Biol. Cybern., Tu¨bingen, Germany",
"fullName": "J L Souman",
"givenName": "J L",
"surname": "Souman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Inst. for Biol. Cybern., Tu¨bingen, Germany",
"fullName": "D Engel",
"givenName": "D",
"surname": "Engel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Reutlingen Univ., Reutlingen, Germany",
"fullName": "U Kloos",
"givenName": "U",
"surname": "Kloos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Brain & Cognitive Eng., Korea Univ., Seoul, South Korea",
"fullName": "Heinrich H Bu¨lthoff",
"givenName": "Heinrich H",
"surname": "Bu¨lthoff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Max Planck Inst. for Biol. Cybern., Tu¨bingen, Germany",
"fullName": "B J Mohler",
"givenName": "B J",
"surname": "Mohler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-03-01T00:00:00",
"pubType": "proceedings",
"pages": "151-158",
"year": "2011",
"issn": null,
"isbn": "978-1-4577-0039-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05759453",
"articleId": "12OmNqNosaO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05759455",
"articleId": "12OmNwFid4R",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446479",
"title": "Adopting the Roll Manipulation for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446587",
"title": "Do Textures and Global Illumination Influence the Perception of Redirected Walking Based on Translational Gain?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446587/13bd1gJ1v0m",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/06200791",
"title": "Velocity-Dependent Dynamic Curvature Gain for Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/06200791/13rRUyuNswW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523890",
"title": "Redirected Walking using Continuous Curvature Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523890/1wpqBpgOKUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0KBrAUYE",
"doi": "10.1109/VR.2019.8798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"normalizedTitle": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"abstract": "One of the major challenges in virtual reality (VR) is to create a perception of a large virtual space within a limited physical space. Here, we explore the effect of haptic-based navigation by Hanger Reflex (HR) on the perception in redirected walking (RDW) with visual manipulation. Seven individuals walked along a straight path in VR while, unbeknown to them, the visual scene was rotated with a curvature gain of π/36, forcing them to walk in a circular path in real space. HR rotation (in the left, right, and neutral direction) was induced by a wearable haptic device during each of the walking trials, and they reported their perceived walking direction and effort to walk along the path on visual analog scale. Experiment results showed that HR can influence the perception in RDW, but the effects may be complex and therefore require further investigation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the major challenges in virtual reality (VR) is to create a perception of a large virtual space within a limited physical space. Here, we explore the effect of haptic-based navigation by Hanger Reflex (HR) on the perception in redirected walking (RDW) with visual manipulation. Seven individuals walked along a straight path in VR while, unbeknown to them, the visual scene was rotated with a curvature gain of π/36, forcing them to walk in a circular path in real space. HR rotation (in the left, right, and neutral direction) was induced by a wearable haptic device during each of the walking trials, and they reported their perceived walking direction and effort to walk along the path on visual analog scale. Experiment results showed that HR can influence the perception in RDW, but the effects may be complex and therefore require further investigation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the major challenges in virtual reality (VR) is to create a perception of a large virtual space within a limited physical space. Here, we explore the effect of haptic-based navigation by Hanger Reflex (HR) on the perception in redirected walking (RDW) with visual manipulation. Seven individuals walked along a straight path in VR while, unbeknown to them, the visual scene was rotated with a curvature gain of π/36, forcing them to walk in a circular path in real space. HR rotation (in the left, right, and neutral direction) was induced by a wearable haptic device during each of the walking trials, and they reported their perceived walking direction and effort to walk along the path on visual analog scale. Experiment results showed that HR can influence the perception in RDW, but the effects may be complex and therefore require further investigation.",
"fno": "08798231",
"keywords": [
"Haptic Interfaces",
"Virtual Reality",
"Curvature Gain",
"Visual Scene",
"Visual Manipulation",
"Haptic Based Navigation",
"VR",
"Virtual Reality Redirected Walking",
"Hanger Reflex",
"RDW",
"Visual Analog Scale",
"Perceived Walking Direction",
"Wearable Haptic Device",
"Neutral Direction",
"Legged Locomotion",
"Visualization",
"Haptic Interfaces",
"Trajectory",
"Virtual Reality",
"Resists",
"Face",
"Virtual Reality",
"Redirected Walking",
"Hanger Reflex",
"Visual Manipulation",
"Human Centered Computing X 007 E User Studies",
"Human Centered Computing X 007 E Virtual Reality",
"Human Centered Computing X 007 E Haptic Devices"
],
"authors": [
{
"affiliation": "School of Integrative and Global Majors, University of Tsukuba, Japan",
"fullName": "Chun Xie",
"givenName": "Chun",
"surname": "Xie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Integrative and Global Majors, University of Tsukuba, Japan",
"fullName": "Chun Kwang Tan",
"givenName": "Chun Kwang",
"surname": "Tan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Integrative and Global Majors, University of Tsukuba, Japan",
"fullName": "Taisei Sugiyama",
"givenName": "Taisei",
"surname": "Sugiyama",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1243-1244",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797791",
"articleId": "1cJ12HLy2ac",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798050",
"articleId": "1cJ1c2miivC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a498",
"title": "Redirected Walking using Noisy Galvanic Vestibular Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a498/1yeCU92Xt5K",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxqcIwi64",
"doi": "10.1109/VRW50115.2020.00032",
"title": "The Influence of Full-Body Representation on Translation and Curvature Gain",
"normalizedTitle": "The Influence of Full-Body Representation on Translation and Curvature Gain",
"abstract": "Redirected Walking (RDW) techniques allow users to navigate immersive virtual environments much larger than the available tracking space by natural walking. Whereas several approaches exist, numerous RDW techniques operate by applying gains of different types to the user’s viewport. These gains must remain undetected by the user in order for a RDW technique to support plausible navigation within a virtual environment. The present paper explores the relationship between detection thresholds of redirection gains and the presence of a self-avatar within the virtual environment. In four psychophysical experiments we estimated the thresholds of curvature and translation gain with and without a virtual body. The goal was to evaluate whether a full-body representation has an impact on the detection thresholds of these gains. The results indicate that although the presence of a virtual body does not significantly affect the detectability of these gains, it supports users with the illusion of easier detection. We discuss the possibility of a future combination of full-body representations and redirected walking and if these findings influence the implementation of large virtual environments with immersive virtual body representation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected Walking (RDW) techniques allow users to navigate immersive virtual environments much larger than the available tracking space by natural walking. Whereas several approaches exist, numerous RDW techniques operate by applying gains of different types to the user’s viewport. These gains must remain undetected by the user in order for a RDW technique to support plausible navigation within a virtual environment. The present paper explores the relationship between detection thresholds of redirection gains and the presence of a self-avatar within the virtual environment. In four psychophysical experiments we estimated the thresholds of curvature and translation gain with and without a virtual body. The goal was to evaluate whether a full-body representation has an impact on the detection thresholds of these gains. The results indicate that although the presence of a virtual body does not significantly affect the detectability of these gains, it supports users with the illusion of easier detection. We discuss the possibility of a future combination of full-body representations and redirected walking and if these findings influence the implementation of large virtual environments with immersive virtual body representation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected Walking (RDW) techniques allow users to navigate immersive virtual environments much larger than the available tracking space by natural walking. Whereas several approaches exist, numerous RDW techniques operate by applying gains of different types to the user’s viewport. These gains must remain undetected by the user in order for a RDW technique to support plausible navigation within a virtual environment. The present paper explores the relationship between detection thresholds of redirection gains and the presence of a self-avatar within the virtual environment. In four psychophysical experiments we estimated the thresholds of curvature and translation gain with and without a virtual body. The goal was to evaluate whether a full-body representation has an impact on the detection thresholds of these gains. The results indicate that although the presence of a virtual body does not significantly affect the detectability of these gains, it supports users with the illusion of easier detection. We discuss the possibility of a future combination of full-body representations and redirected walking and if these findings influence the implementation of large virtual environments with immersive virtual body representation.",
"fno": "09090671",
"keywords": [
"Legged Locomotion",
"Avatars",
"Task Analysis",
"Tracking",
"Virtual Environments",
"Visualization",
"Foot",
"Redirected Walking",
"Body Representation",
"Curvature Gain",
"Translation Gain"
],
"authors": [
{
"affiliation": "Vienna University of Technology",
"fullName": "Dennis Reimer",
"givenName": "Dennis",
"surname": "Reimer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hamburg",
"fullName": "Eike Langbehn",
"givenName": "Eike",
"surname": "Langbehn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vienna University of Technology",
"fullName": "Hannes Kaufmann",
"givenName": "Hannes",
"surname": "Kaufmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ravensburg-Weingarten University",
"fullName": "Daniel Scherzer",
"givenName": "Daniel",
"surname": "Scherzer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "154-159",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090505",
"articleId": "1jIxoIQrIVq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090583",
"articleId": "1jIxAIXTd3q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892279",
"title": "Curvature gains in redirected walking: A closer look",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446479",
"title": "Adopting the Roll Manipulation for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446216",
"title": "I Can See on My Feet While Walking: Sensitivity to Translation Gains with Visible Feet",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446216/13bd1gJ1v0k",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a115",
"title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a708",
"title": "Relationship Between the Sensory Processing Patterns and the Detection Threshold of Curvature Gain",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a708/1CJdg0YyFSo",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09893374",
"title": "A Segmented Redirection Mapping Method for Roadmaps of Large Constrained Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09893374/1GGLIh8KmSA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798345",
"title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090453",
"title": "Perception of Walking Self-body Avatar Enhances Virtual-walking Sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090453/1jIxoojmMy4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyPQ4vC",
"title": "Virtual Reality Conference, IEEE",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2000",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAY79hZ",
"doi": "10.1109/VR.2000.840486",
"title": "Enhancing Fish Tank VR",
"normalizedTitle": "Enhancing Fish Tank VR",
"abstract": "Fish tank VR systems provide head coupled perspective projected stereo images on a display device of limited dimensions that resides at a fixed location. Therefore, fish tank VR systems provide only a limited virtual workspace. As a result, such systems are less suited for displaying virtual worlds that extend beyond the available workspace and depth perception problems arise when displaying objects (virtually) located on the edge of the workspace in between the viewer and the display screen.In this paper we present two techniques to reduce this disadvantage: cadre viewing and amplified head rotations. The first aims to eliminate the problems in depth perception for objects with negative parallax touching the screen surround. Subjective observations from an informal user study indicate a reduction of confusion in depth perception. The second provides a transparent navigation technique to allow users to view larger portions of the virtual world without the need for an additional input device to navigate. A user study shows it performs equally well when compared to a technique based on the use of an additional spatial input device.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Fish tank VR systems provide head coupled perspective projected stereo images on a display device of limited dimensions that resides at a fixed location. Therefore, fish tank VR systems provide only a limited virtual workspace. As a result, such systems are less suited for displaying virtual worlds that extend beyond the available workspace and depth perception problems arise when displaying objects (virtually) located on the edge of the workspace in between the viewer and the display screen.In this paper we present two techniques to reduce this disadvantage: cadre viewing and amplified head rotations. The first aims to eliminate the problems in depth perception for objects with negative parallax touching the screen surround. Subjective observations from an informal user study indicate a reduction of confusion in depth perception. The second provides a transparent navigation technique to allow users to view larger portions of the virtual world without the need for an additional input device to navigate. A user study shows it performs equally well when compared to a technique based on the use of an additional spatial input device.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Fish tank VR systems provide head coupled perspective projected stereo images on a display device of limited dimensions that resides at a fixed location. Therefore, fish tank VR systems provide only a limited virtual workspace. As a result, such systems are less suited for displaying virtual worlds that extend beyond the available workspace and depth perception problems arise when displaying objects (virtually) located on the edge of the workspace in between the viewer and the display screen.In this paper we present two techniques to reduce this disadvantage: cadre viewing and amplified head rotations. The first aims to eliminate the problems in depth perception for objects with negative parallax touching the screen surround. Subjective observations from an informal user study indicate a reduction of confusion in depth perception. The second provides a transparent navigation technique to allow users to view larger portions of the virtual world without the need for an additional input device to navigate. A user study shows it performs equally well when compared to a technique based on the use of an additional spatial input device.",
"fno": "04780091",
"keywords": [
"Fish Tank VR",
"Stereo Computer Graphics",
"Depth Perception",
"Navigation"
],
"authors": [
{
"affiliation": "CWI",
"fullName": "Jurriaan D. Mulder",
"givenName": "Jurriaan D.",
"surname": "Mulder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CWI",
"fullName": "Robert Van Liere",
"givenName": "Robert",
"surname": "Van Liere",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2000-03-01T00:00:00",
"pubType": "proceedings",
"pages": "91",
"year": "2000",
"issn": "1087-8270",
"isbn": "0-7695-0478-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04780083",
"articleId": "12OmNzVoBut",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04780099",
"articleId": "12OmNAlvI9D",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC2OSOD",
"doi": "10.1109/VR.2017.7892376",
"title": "3DPS: An auto-calibrated three-dimensional perspective-corrected spherical display",
"normalizedTitle": "3DPS: An auto-calibrated three-dimensional perspective-corrected spherical display",
"abstract": "We describe an auto-calibrated 3D perspective-corrected spherical display that uses multiple rear projected pico-projectors. The display system is auto-calibrated via 3D reconstruction of each projected pixel on the display using a single inexpensive camera. With the automatic calibration, the multiple-projector system supports a seamless blended imagery on the spherical screen. Furthermore, we incorporate head tracking with the display to present 3D content with motion parallax by rendering perspective-corrected images based on the viewpoint. To show the effectiveness of this design, we implemented a view-dependent application that allows walk-around visualization from all angles for a single head-tracked user. We also implemented a view-independent application that supports a wall-papered rendering for multi-user viewing. Thus, both view-dependent 3D VR content and spherical 2D content, such as a globe, can be easily experienced with this display.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe an auto-calibrated 3D perspective-corrected spherical display that uses multiple rear projected pico-projectors. The display system is auto-calibrated via 3D reconstruction of each projected pixel on the display using a single inexpensive camera. With the automatic calibration, the multiple-projector system supports a seamless blended imagery on the spherical screen. Furthermore, we incorporate head tracking with the display to present 3D content with motion parallax by rendering perspective-corrected images based on the viewpoint. To show the effectiveness of this design, we implemented a view-dependent application that allows walk-around visualization from all angles for a single head-tracked user. We also implemented a view-independent application that supports a wall-papered rendering for multi-user viewing. Thus, both view-dependent 3D VR content and spherical 2D content, such as a globe, can be easily experienced with this display.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe an auto-calibrated 3D perspective-corrected spherical display that uses multiple rear projected pico-projectors. The display system is auto-calibrated via 3D reconstruction of each projected pixel on the display using a single inexpensive camera. With the automatic calibration, the multiple-projector system supports a seamless blended imagery on the spherical screen. Furthermore, we incorporate head tracking with the display to present 3D content with motion parallax by rendering perspective-corrected images based on the viewpoint. To show the effectiveness of this design, we implemented a view-dependent application that allows walk-around visualization from all angles for a single head-tracked user. We also implemented a view-independent application that supports a wall-papered rendering for multi-user viewing. Thus, both view-dependent 3D VR content and spherical 2D content, such as a globe, can be easily experienced with this display.",
"fno": "07892376",
"keywords": [
"Three Dimensional Displays",
"Calibration",
"Visualization",
"Cameras",
"Rendering Computer Graphics",
"Fish",
"Virtual Reality",
"H 5 1 Multimedia Information Systems Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Dept. of Electrical and Computer Engineering, University of British Columbia, BC, Canada",
"fullName": "Qian Zhou",
"givenName": "Qian",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electrical and Computer Engineering, University of British Columbia, BC, Canada",
"fullName": "Kai Wu",
"givenName": "Kai",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electrical and Computer Engineering, University of British Columbia, BC, Canada",
"fullName": "Gregor Miller",
"givenName": "Gregor",
"surname": "Miller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Computer Science, University of Saskatchewan, SK, Canada",
"fullName": "Ian Stavness",
"givenName": "Ian",
"surname": "Stavness",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electrical and Computer Engineering, University of British Columbia, BC, Canada",
"fullName": "Sidney Fels",
"givenName": "Sidney",
"surname": "Fels",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "455-456",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892375",
"articleId": "12OmNy5hRo2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892377",
"articleId": "12OmNznkJU6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a318",
"title": "Real-Time 3D Video Acquisition and Auto-Stereoscopic Display End-to-End Algorithm Based on Tiled Multi-projectors",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a318/12OmNBO3Kkf",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223343",
"title": "Portable-Spheree: A portable 3D perspective-corrected interactive spherical scalable display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223343/12OmNC8dgaB",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532846",
"title": "A handheld flexible display system",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532846/12OmNCbkQBW",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209b675",
"title": "Sensor-Display Registration for 3D Physical User Interaction Using a Flat-Panel Display",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209b675/12OmNy3iFk7",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/01/08478369",
"title": "Semi-Calibrated Photometric Stereo",
"doi": null,
"abstractUrl": "/journal/tp/2020/01/08478369/141AnpAbeCh",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642347",
"title": "An Evaluation of Depth and Size Perception on a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642347/17PYEjbrJk7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699170",
"title": "A Low-Latency, High-Precision Handheld Perspective Corrected Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699170/19F1O4Ukqhq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798063",
"title": "I Got Your Point: An Investigation of Pointing Cues in a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798063/1cJ1boUCpNK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvidl/2020/9481/0/948100a064",
"title": "Considering Spherical Refraction in Visual Ocean Gas Release Quantification",
"doi": null,
"abstractUrl": "/proceedings-article/cvidl/2020/948100a064/1pbe5LaFrbO",
"parentPublication": {
"id": "proceedings/cvidl/2020/9481/0",
"title": "2020 International Conference on Computer Vision, Image and Deep Learning (CVIDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8dgaB",
"doi": "10.1109/VR.2015.7223343",
"title": "Portable-Spheree: A portable 3D perspective-corrected interactive spherical scalable display",
"normalizedTitle": "Portable-Spheree: A portable 3D perspective-corrected interactive spherical scalable display",
"abstract": "In this poster we present Portable-Spheree, an interactive spherical rear-projected 3D-content-display that provides perspective-corrected views according to the user's head position, to provide parallax, shading and occlusion depth cues. Portable-Spheree is an evolution of the Spheree and it is developed in a smaller form factor, using more projectors and a dark-translucent screen with increased contrast. We present some preliminary results of this new configuration as well as applications with spatial interaction that might benefit from this new form factor.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this poster we present Portable-Spheree, an interactive spherical rear-projected 3D-content-display that provides perspective-corrected views according to the user's head position, to provide parallax, shading and occlusion depth cues. Portable-Spheree is an evolution of the Spheree and it is developed in a smaller form factor, using more projectors and a dark-translucent screen with increased contrast. We present some preliminary results of this new configuration as well as applications with spatial interaction that might benefit from this new form factor.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this poster we present Portable-Spheree, an interactive spherical rear-projected 3D-content-display that provides perspective-corrected views according to the user's head position, to provide parallax, shading and occlusion depth cues. Portable-Spheree is an evolution of the Spheree and it is developed in a smaller form factor, using more projectors and a dark-translucent screen with increased contrast. We present some preliminary results of this new configuration as well as applications with spatial interaction that might benefit from this new form factor.",
"fno": "07223343",
"keywords": [
"Three Dimensional Displays",
"Cameras",
"Solid Modeling",
"Rendering Computer Graphics",
"Calibration",
"Head",
"Mirrors"
],
"authors": [
{
"affiliation": "U of São Paulo",
"fullName": "M. Cabral",
"givenName": "M.",
"surname": "Cabral",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fed. U of ABC",
"fullName": "F. Ferreira",
"givenName": "F.",
"surname": "Ferreira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "U of São Paulo",
"fullName": "O. Belloc",
"givenName": "O.",
"surname": "Belloc",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fed. U of ABC",
"fullName": "G. Miller",
"givenName": "G.",
"surname": "Miller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fed. U of ABC",
"fullName": "C. Kurashima",
"givenName": "C.",
"surname": "Kurashima",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "U of São Paulo",
"fullName": "R. Lopes",
"givenName": "R.",
"surname": "Lopes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "U of Saskatchewan",
"fullName": "I. Stavness",
"givenName": "I.",
"surname": "Stavness",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Fed. U of São Carlos",
"fullName": "J. Anacleto",
"givenName": "J.",
"surname": "Anacleto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "U of British Columbia",
"fullName": "S. Fels",
"givenName": "S.",
"surname": "Fels",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "U of São Paulo",
"fullName": "M. Zuffo",
"givenName": "M.",
"surname": "Zuffo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "157-158",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223342",
"articleId": "12OmNB8TU5g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223344",
"articleId": "12OmNy2agQe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2006/0226/0/02260048",
"title": "Portable Haptic Display for Large Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260048/12OmNA0dMUo",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2004/2244/0/01410480",
"title": "A survey of multi-projector tiled display wall construction",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2004/01410480/12OmNAWH9up",
"parentPublication": {
"id": "proceedings/icig/2004/2244/0",
"title": "Proceedings. Third International Conference on Image and Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892376",
"title": "3DPS: An auto-calibrated three-dimensional perspective-corrected spherical display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892376/12OmNC2OSOD",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2009/3893/0/3893a118",
"title": "A Portable Projector Extended for Object-Centered Real-Time Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2009/3893a118/12OmNC8MsyT",
"parentPublication": {
"id": "proceedings/cvmp/2009/3893/0",
"title": "2009 Conference for Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532846",
"title": "A handheld flexible display system",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532846/12OmNCbkQBW",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660075",
"title": "A Handheld Flexible Display System",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660075/12OmNqI04RI",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444711",
"title": "Comparison of multimodal interactions in perspective-corrected multi-display environment",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444711/12OmNrIrPqL",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223326",
"title": "Touch sensing on non-parametric rear-projection surfaces: A physical-virtual head for hands-on healthcare training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223326/12OmNvT2oVf",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/01/ttg2008010084",
"title": "Toward the Light Field Display: Autostereoscopic Rendering via a Cluster of Projectors",
"doi": null,
"abstractUrl": "/journal/tg/2008/01/ttg2008010084/13rRUwI5TXu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699170",
"title": "A Low-Latency, High-Precision Handheld Perspective Corrected Display",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699170/19F1O4Ukqhq",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs0kyru",
"title": "2007 IEEE Virtual Reality Conference",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvkplbf",
"doi": "10.1109/VR.2007.352460",
"title": "Interscopic User Interface Concepts for Fish Tank Virtual Reality Systems",
"normalizedTitle": "Interscopic User Interface Concepts for Fish Tank Virtual Reality Systems",
"abstract": "In this paper we introduce new user interface concepts for fish tank virtual reality (VR) systems based on autostereoscopic (AS) display technologies. Such AS displays allow to view stereoscopic content without requiring special glasses. Unfortunately, until now simultaneous monoscopic and stereoscopic display was not possible. Hence prior work on fish tank VR systems focussed either on 2D or 3D interactions. In this paper we introduce so called interscopic interaction concepts providing an improved working experience, which enable great potentials in terms of the interaction between 2D elements, which may be displayed either in monoscopic or stereoscopic, e.g., GUI items, and the 3D virtual environment usually displayed stereoscopically. We present a framework which is based on a software layer between the operating system and its graphical user interface supporting the display of both mono- as well as stereoscopic content in arbitrary regions of an autostereoscopic display. The proposed concepts open up new vistas for the interaction in environments where essential parts of the GUI are displayed monoscopically and other parts are rendered stereoscopically. We address some essential issues of such fish tank VR systems and introduce intuitive interaction concepts which we have realized",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we introduce new user interface concepts for fish tank virtual reality (VR) systems based on autostereoscopic (AS) display technologies. Such AS displays allow to view stereoscopic content without requiring special glasses. Unfortunately, until now simultaneous monoscopic and stereoscopic display was not possible. Hence prior work on fish tank VR systems focussed either on 2D or 3D interactions. In this paper we introduce so called interscopic interaction concepts providing an improved working experience, which enable great potentials in terms of the interaction between 2D elements, which may be displayed either in monoscopic or stereoscopic, e.g., GUI items, and the 3D virtual environment usually displayed stereoscopically. We present a framework which is based on a software layer between the operating system and its graphical user interface supporting the display of both mono- as well as stereoscopic content in arbitrary regions of an autostereoscopic display. The proposed concepts open up new vistas for the interaction in environments where essential parts of the GUI are displayed monoscopically and other parts are rendered stereoscopically. We address some essential issues of such fish tank VR systems and introduce intuitive interaction concepts which we have realized",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we introduce new user interface concepts for fish tank virtual reality (VR) systems based on autostereoscopic (AS) display technologies. Such AS displays allow to view stereoscopic content without requiring special glasses. Unfortunately, until now simultaneous monoscopic and stereoscopic display was not possible. Hence prior work on fish tank VR systems focussed either on 2D or 3D interactions. In this paper we introduce so called interscopic interaction concepts providing an improved working experience, which enable great potentials in terms of the interaction between 2D elements, which may be displayed either in monoscopic or stereoscopic, e.g., GUI items, and the 3D virtual environment usually displayed stereoscopically. We present a framework which is based on a software layer between the operating system and its graphical user interface supporting the display of both mono- as well as stereoscopic content in arbitrary regions of an autostereoscopic display. The proposed concepts open up new vistas for the interaction in environments where essential parts of the GUI are displayed monoscopically and other parts are rendered stereoscopically. We address some essential issues of such fish tank VR systems and introduce intuitive interaction concepts which we have realized",
"fno": "04161002",
"keywords": [
"Graphical User Interfaces",
"Operating Systems Computers",
"Stereo Image Processing",
"Three Dimensional Displays",
"Virtual Reality",
"Interscopic User Interface Concepts",
"Fish Tank Virtual Reality Systems",
"Autostereoscopic Display Technology",
"Monoscopic Display",
"Stereoscopic Display",
"Interscopic Interaction",
"Virtual Environment",
"Operating System",
"Graphical User Interface",
"User Interfaces",
"Marine Animals",
"Virtual Reality",
"Graphical User Interfaces",
"Computer Displays",
"Mice",
"Visualization",
"Glass",
"Virtual Environment",
"Costs",
"Fish Tank VR",
"Autostereoscopic Displays",
"Interscopic User Interfaces",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial",
"Augmented And Virtual Realities",
"H 5 2 Information Interfaces And Presentation User Interfaces Graphical User Interfaces GUI",
"Interaction Styles"
],
"authors": [
{
"affiliation": "Visualization and Computer Graphics (VisCG) Research Group, Department of Computer Science, Westälische Wilhelms-Universität Münster. e-mail: fsteini@math.uni-muenster.de",
"fullName": "Frank Stenicke",
"givenName": "Frank",
"surname": "Stenicke",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization and Computer Graphics (VisCG) Research Group, Department of Computer Science, Westälische Wilhelms-Universität Münster. e-mail: ropinski@math.uni-muenster.de",
"fullName": "Timo Ropinski",
"givenName": "Timo",
"surname": "Ropinski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization and Computer Graphics (VisCG) Research Group, Department of Computer Science, Westälische Wilhelms-Universität Münster. e-mail: g_brud01@math.uni-muenster.de",
"fullName": "Gerd Bruder",
"givenName": "Gerd",
"surname": "Bruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visualization and Computer Graphics (VisCG) Research Group, Department of Computer Science, Westälische Wilhelms-Universität Münster. e-mail: khh@math.uni-muenster.de",
"fullName": "Klaus Hinrichs",
"givenName": "Klaus",
"surname": "Hinrichs",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "27-34",
"year": "2007",
"issn": "1087-8270",
"isbn": "1-4244-0905-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04161001",
"articleId": "12OmNxwWoq6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04160987",
"articleId": "12OmNxI0KxE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2000/0478/0/04780091",
"title": "Enhancing Fish Tank VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780091/12OmNAY79hZ",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hcc/2001/7198/0/00995254",
"title": "How to teach a fish to swim [virtual fish tank]",
"doi": null,
"abstractUrl": "/proceedings-article/hcc/2001/00995254/12OmNC8uRyj",
"parentPublication": {
"id": "proceedings/hcc/2001/7198/0",
"title": "Proceedings of HCC 2001. IEEE Symposium on Human-Centric Computing Languages and Environments",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836500",
"title": "A Tangible Volume for Portable 3D Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836500/12OmNCm7BGU",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2001/1007/0/10070325",
"title": "What Do You Think You're Doing? Measuring Perception in Fish Tank Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2001/10070325/12OmNx0A7HB",
"parentPublication": {
"id": "proceedings/cgi/2001/1007/0",
"title": "Proceedings. Computer Graphics International 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/1997/7825/0/78250039",
"title": "Integrating Flying and Fish Tank Metaphors with Cyclopean Scale",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/1997/78250039/12OmNxdVgJO",
"parentPublication": {
"id": "proceedings/cgi/1997/7825/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1995/7084/0/70840094",
"title": "A vision-based head tracker for fish tank virtual reality-VR without head gear",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840094/12OmNzmclTQ",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642347",
"title": "An Evaluation of Depth and Size Perception on a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642347/17PYEjbrJk7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798063",
"title": "I Got Your Point: An Investigation of Pointing Cues in a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798063/1cJ1boUCpNK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090626",
"title": "A Low-Cost Approach to Fish Tank Virtual Reality with Semi-Automatic Calibration Support",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090626/1jIxkDnMXPG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrNh0vw",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNy3iFk7",
"doi": "10.1109/ICPR.2014.295",
"title": "Sensor-Display Registration for 3D Physical User Interaction Using a Flat-Panel Display",
"normalizedTitle": "Sensor-Display Registration for 3D Physical User Interaction Using a Flat-Panel Display",
"abstract": "We present a method to register a camera-type sensor and a flat panel display to each other to build a 3D physical user interface. In the system, the sensor can not observe the display directly, which makes the registration non-trivial. We develop an auxiliary device containing a camera which can see the display and a plane pattern which can be seen by the sensor to make a transformation loop between sensors and the display. By using the transformation loop, the relative pose between the sensor and the display can be linearly estimated without any user interaction. Experiments show that the proposed method is accurate enough to build a 3D physical user interface using a commercial display and a low-cost sensor.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method to register a camera-type sensor and a flat panel display to each other to build a 3D physical user interface. In the system, the sensor can not observe the display directly, which makes the registration non-trivial. We develop an auxiliary device containing a camera which can see the display and a plane pattern which can be seen by the sensor to make a transformation loop between sensors and the display. By using the transformation loop, the relative pose between the sensor and the display can be linearly estimated without any user interaction. Experiments show that the proposed method is accurate enough to build a 3D physical user interface using a commercial display and a low-cost sensor.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method to register a camera-type sensor and a flat panel display to each other to build a 3D physical user interface. In the system, the sensor can not observe the display directly, which makes the registration non-trivial. We develop an auxiliary device containing a camera which can see the display and a plane pattern which can be seen by the sensor to make a transformation loop between sensors and the display. By using the transformation loop, the relative pose between the sensor and the display can be linearly estimated without any user interaction. Experiments show that the proposed method is accurate enough to build a 3D physical user interface using a commercial display and a low-cost sensor.",
"fno": "5209b675",
"keywords": [
"Cameras",
"Robot Sensing Systems",
"Three Dimensional Displays",
"Optimization",
"Estimation",
"Calibration",
"Rendering Computer Graphics"
],
"authors": [
{
"affiliation": null,
"fullName": "Jun-Sik Kim",
"givenName": "Jun-Sik",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jung-Min Park",
"givenName": "Jung-Min",
"surname": "Park",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1675-1680",
"year": "2014",
"issn": "1051-4651",
"isbn": "978-1-4799-5209-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5209b669",
"articleId": "12OmNybfqZC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5209b681",
"articleId": "12OmNBBzoi9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmens/2005/2398/0/23980077",
"title": "Impact of Nanotechnology in Flat Panel Display Industry",
"doi": null,
"abstractUrl": "/proceedings-article/icmens/2005/23980077/12OmNAlNiSX",
"parentPublication": {
"id": "proceedings/icmens/2005/2398/0",
"title": "Proceedings. 2005 International Conference on MEMS, NANO and Smart Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480768",
"title": "Automultiscopic display by revolving flat-panel displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480768/12OmNAolGTH",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892376",
"title": "3DPS: An auto-calibrated three-dimensional perspective-corrected spherical display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892376/12OmNC2OSOD",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1998/8821/1/882110005",
"title": "Video Processing for Flat Panel Display",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1998/882110005/12OmNC8dgbs",
"parentPublication": {
"id": "proceedings/icip/1998/8821/1",
"title": "Proceedings of IPCIP'98 International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460047",
"title": "Evaluation of user-centric optical see-through head-mounted display calibration using a leap motion controller",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460047/12OmNrJRPdz",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2001/7200/0/7200yang",
"title": "PixelFlex: A Reconfigurable Multi-Projector Display System",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2001/7200yang/12OmNvnOwwY",
"parentPublication": {
"id": "proceedings/ieee-vis/2001/7200/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/01/ttg2008010084",
"title": "Toward the Light Field Display: Autostereoscopic Rendering via a Cluster of Projectors",
"doi": null,
"abstractUrl": "/journal/tg/2008/01/ttg2008010084/13rRUwI5TXu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1368",
"title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1988/02/mcg1988020071",
"title": "FIDS-A Flat-Panel Interactive Display System",
"doi": null,
"abstractUrl": "/magazine/cg/1988/02/mcg1988020071/13rRUxEhFuP",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/07/09416801",
"title": "Designing Display Pixel Layouts for Under-Panel Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2021/07/09416801/1t8VREDi7Cg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ16r0nRSM",
"doi": "10.1109/VR.2019.8798362",
"title": "Match the Cube: Investigation of the Head-coupled Input with a Spherical Fish Tank Virtual Reality Display",
"normalizedTitle": "Match the Cube: Investigation of the Head-coupled Input with a Spherical Fish Tank Virtual Reality Display",
"abstract": "Fish Tank Virtual Reality (FTVR) displays create a compelling 3D effect with the motion parallax cue using the head-coupled perspective. While the head-coupled viewpoint control provides natural visuomotor coupling, the motion parallax cue has been found to be underutilized with minimal head motion detected when manual input becomes available to users. We investigate whether users can effectively use head-coupling in conjunction with manual input in a mental rotation task involving inspection and comparison of a pair of 3D cubes. We found that participants managed to incorporate the head-coupled viewpoint control with the manual touch input in the task. They used the touch input as the primary input and the head as the secondary input with the input ratio of 4.2:1. The combined input approach appears to be sequential with only 8.63% duration when the head and manual input are co-activated. The result of this study provides insights for designing head-coupled interactions in many 3D interactive applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Fish Tank Virtual Reality (FTVR) displays create a compelling 3D effect with the motion parallax cue using the head-coupled perspective. While the head-coupled viewpoint control provides natural visuomotor coupling, the motion parallax cue has been found to be underutilized with minimal head motion detected when manual input becomes available to users. We investigate whether users can effectively use head-coupling in conjunction with manual input in a mental rotation task involving inspection and comparison of a pair of 3D cubes. We found that participants managed to incorporate the head-coupled viewpoint control with the manual touch input in the task. They used the touch input as the primary input and the head as the secondary input with the input ratio of 4.2:1. The combined input approach appears to be sequential with only 8.63% duration when the head and manual input are co-activated. The result of this study provides insights for designing head-coupled interactions in many 3D interactive applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Fish Tank Virtual Reality (FTVR) displays create a compelling 3D effect with the motion parallax cue using the head-coupled perspective. While the head-coupled viewpoint control provides natural visuomotor coupling, the motion parallax cue has been found to be underutilized with minimal head motion detected when manual input becomes available to users. We investigate whether users can effectively use head-coupling in conjunction with manual input in a mental rotation task involving inspection and comparison of a pair of 3D cubes. We found that participants managed to incorporate the head-coupled viewpoint control with the manual touch input in the task. They used the touch input as the primary input and the head as the secondary input with the input ratio of 4.2:1. The combined input approach appears to be sequential with only 8.63% duration when the head and manual input are co-activated. The result of this study provides insights for designing head-coupled interactions in many 3D interactive applications.",
"fno": "08798362",
"keywords": [
"Computer Displays",
"Helmet Mounted Displays",
"Human Computer Interaction",
"Image Matching",
"Shape Recognition",
"Virtual Reality",
"Head Coupled Interactions",
"Head Coupled Input",
"Spherical Fish Tank Virtual Reality Display",
"Motion Parallax Cue",
"Head Coupled Viewpoint Control",
"Natural Visuomotor Coupling",
"Minimal Head Motion",
"Manual Touch Input",
"Cube Matching",
"FTVR Displays",
"Mental Rotation Task",
"Three Dimensional Displays",
"Task Analysis",
"Manuals",
"Training",
"Head",
"Virtual Reality",
"Two Dimensional Displays",
"Human Centered Computing",
"Human Computer Interaction",
"Interaction Design",
"Empirical Studies In Interaction Design"
],
"authors": [
{
"affiliation": "University of British Columbia, BC, Canada",
"fullName": "Qian Zhou",
"givenName": "Qian",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia, BC, Canada",
"fullName": "Fan Wu",
"givenName": "Fan",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Saskatchewan, SK, Canada",
"fullName": "Ian Stavness",
"givenName": "Ian",
"surname": "Stavness",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia, BC, Canada",
"fullName": "Sidney Fels",
"givenName": "Sidney",
"surname": "Fels",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1281-1282",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797715",
"articleId": "1cJ1bkM12dq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797735",
"articleId": "1cJ0VXaBytq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2000/0478/0/04780091",
"title": "Enhancing Fish Tank VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780091/12OmNAY79hZ",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1994/6660/0/00363621",
"title": "Viewing a graph in a virtual reality display is three times as good as a 2D diagram",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1994/00363621/12OmNAkWvKo",
"parentPublication": {
"id": "proceedings/vl/1994/6660/0",
"title": "Proceedings of 1994 IEEE Symposium on Visual Languages",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223343",
"title": "Portable-Spheree: A portable 3D perspective-corrected interactive spherical scalable display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223343/12OmNC8dgaB",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2016/7258/0/07552858",
"title": "Depth augmented stereo panorama for cinematic virtual reality with head-motion parallax",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2016/07552858/12OmNs0TKW6",
"parentPublication": {
"id": "proceedings/icme/2016/7258/0",
"title": "2016 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130278",
"title": "MoPaCo: High telepresence video communication system using motion parallax with monocular camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130278/12OmNvT2oLu",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836518",
"title": "AR Tabletop Interface using a Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836518/12OmNyoiYW4",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642347",
"title": "An Evaluation of Depth and Size Perception on a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642347/17PYEjbrJk7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797905",
"title": "Investigating Spherical Fish Tank Virtual Reality Displays for Establishing Realistic Eye-Contact",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797905/1cJ0PcNhP1K",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798063",
"title": "I Got Your Point: An Investigation of Pointing Cues in a Spherical Fish Tank Virtual Reality Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798063/1cJ1boUCpNK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.