data
dict
{ "proceeding": { "id": "12OmNzTppA7", "title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)", "acronym": "isuvr", "groupId": "1002009", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNAkWvFD", "doi": "10.1109/ISUVR.2017.13", "title": "Estimating Gaze Depth Using Multi-Layer Perceptron", "normalizedTitle": "Estimating Gaze Depth Using Multi-Layer Perceptron", "abstract": "In this paper we describe a new method for determining gaze depth in a head mounted eye-tracker. Eye-trackers are being incorporated into head mounted displays (HMDs), and eye-gaze is being used for interaction in Virtual and Augmented Reality. For some interaction methods, it is important to accurately measure the x-and y-direction of the eye-gaze and especially the focal depth information. Generally, eye tracking technology has a high accuracy in x-and y-directions, but not in depth. We used a binocular gaze tracker with two eye cameras, and the gaze vector was input to an MLP neural network for training and estimation. For the performance evaluation, data was obtained from 13 people gazing at fixed points at distances from 1m to 5m. The gaze classification into fixed distances produced an average classification error of nearly 10%, and an average error distance of 0.42m. This is sufficient for some Augmented Reality applications, but more research is needed to provide an estimate of a user's gaze moving in continuous space.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we describe a new method for determining gaze depth in a head mounted eye-tracker. Eye-trackers are being incorporated into head mounted displays (HMDs), and eye-gaze is being used for interaction in Virtual and Augmented Reality. For some interaction methods, it is important to accurately measure the x-and y-direction of the eye-gaze and especially the focal depth information. Generally, eye tracking technology has a high accuracy in x-and y-directions, but not in depth. We used a binocular gaze tracker with two eye cameras, and the gaze vector was input to an MLP neural network for training and estimation. For the performance evaluation, data was obtained from 13 people gazing at fixed points at distances from 1m to 5m. The gaze classification into fixed distances produced an average classification error of nearly 10%, and an average error distance of 0.42m. This is sufficient for some Augmented Reality applications, but more research is needed to provide an estimate of a user's gaze moving in continuous space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we describe a new method for determining gaze depth in a head mounted eye-tracker. Eye-trackers are being incorporated into head mounted displays (HMDs), and eye-gaze is being used for interaction in Virtual and Augmented Reality. For some interaction methods, it is important to accurately measure the x-and y-direction of the eye-gaze and especially the focal depth information. Generally, eye tracking technology has a high accuracy in x-and y-directions, but not in depth. We used a binocular gaze tracker with two eye cameras, and the gaze vector was input to an MLP neural network for training and estimation. For the performance evaluation, data was obtained from 13 people gazing at fixed points at distances from 1m to 5m. The gaze classification into fixed distances produced an average classification error of nearly 10%, and an average error distance of 0.42m. This is sufficient for some Augmented Reality applications, but more research is needed to provide an estimate of a user's gaze moving in continuous space.", "fno": "3091a026", "keywords": [ "Three Dimensional Displays", "Meters", "Cameras", "Training", "Error Analysis", "Resists", "Calibration", "Eye Gaze", "3 D Gaze", "Machine Learning", "Augmented Reality", "Head Mounted Display" ], "authors": [ { "affiliation": null, "fullName": "Youngho Lee", "givenName": "Youngho", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Choonsung Shin", "givenName": "Choonsung", "surname": "Shin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Alexander Plopski", "givenName": "Alexander", "surname": "Plopski", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yuta Itoh", "givenName": "Yuta", "surname": "Itoh", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Thammathip Piumsomboon", "givenName": "Thammathip", "surname": "Piumsomboon", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Arindam Dey", "givenName": "Arindam", "surname": "Dey", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gun Lee", "givenName": "Gun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Seungwon Kim", "givenName": "Seungwon", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" } ], "idPrefix": "isuvr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-06-01T00:00:00", "pubType": "proceedings", "pages": "26-29", "year": "2017", "issn": null, "isbn": "978-1-5386-3091-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3091a024", "articleId": "12OmNzlUKdk", "__typename": "AdjacentArticleType" }, "next": { "fno": "3091a030", "articleId": "12OmNyKJiAV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2008/2174/0/04761019", "title": "Gaze tracking by Binocular Vision and LBP features", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761019/12OmNqBtiNI", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2013/5545/0/06553735", "title": "Combining first-person and third-person gaze for attention recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2013/06553735/12OmNvEhg0x", "parentPublication": { "id": "proceedings/fg/2013/5545/0", "title": "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890322", "title": "Realtime gaze estimation with online calibration", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890322/12OmNvjyxUU", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926684", "title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a788", "title": "Depth Compensation Model for Gaze Estimation in Sport Analysis", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a788/12OmNz2C1or", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a176", "title": "Mobile 3D Gaze Tracking Calibration", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a176/12OmNzzxusS", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874254", "title": "Gaze-Vergence-Controlled See-Through Vision in Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874254/1GjwOCjuXkY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a082", "title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/01/08818661", "title": "Realtime and Accurate 3D Eye Gaze Capture with DCNN-Based Iris and Pupil Segmentation", "doi": null, "abstractUrl": "/journal/tg/2021/01/08818661/1cRBtd0YTN6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/02/09028119", "title": "Are You Really Looking at Me? A Feature-Extraction Framework for Estimating Interpersonal Eye Gaze From Conventional Video", "doi": null, "abstractUrl": "/journal/ta/2022/02/09028119/1i3AKComau4", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRw", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNB0nWd6", "doi": "10.1109/ISMAR.2014.6948442", "title": "[Poster] HMD Video see though AR with unfixed cameras vergence", "normalizedTitle": "[Poster] HMD Video see though AR with unfixed cameras vergence", "abstract": "Stereoscopic video see though AR systems permit accurate marker video based registration. To guarantee accurate registration, cameras are normally rigidly blocked while the user could require changing their vergence. We propose a solution working with lightweight hardware that, without the need for a new calibration of the cameras relative pose after each vergence adjustment, guarantees registration accuracy using pre-determined calibration data.", "abstracts": [ { "abstractType": "Regular", "content": "Stereoscopic video see though AR systems permit accurate marker video based registration. To guarantee accurate registration, cameras are normally rigidly blocked while the user could require changing their vergence. We propose a solution working with lightweight hardware that, without the need for a new calibration of the cameras relative pose after each vergence adjustment, guarantees registration accuracy using pre-determined calibration data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Stereoscopic video see though AR systems permit accurate marker video based registration. To guarantee accurate registration, cameras are normally rigidly blocked while the user could require changing their vergence. We propose a solution working with lightweight hardware that, without the need for a new calibration of the cameras relative pose after each vergence adjustment, guarantees registration accuracy using pre-determined calibration data.", "fno": "06948442", "keywords": [ "Cameras", "Calibration", "Stereo Image Processing", "Fasteners", "Surgery", "Augmented Reality", "Hardware", "Vergence", "HMD", "Video See Though" ], "authors": [ { "affiliation": "EndoCAS Center, University of Pisa", "fullName": "Vincenzo Ferrari", "givenName": "Vincenzo", "surname": "Ferrari", "__typename": "ArticleAuthorType" }, { "affiliation": "EndoCAS Center, University of Pisa", "fullName": "Fabrizio Cutolo", "givenName": "Fabrizio", "surname": "Cutolo", "__typename": "ArticleAuthorType" }, { "affiliation": "EndoCAS Center, University of Pisa", "fullName": "Emanuele Maria Calabro", "givenName": "Emanuele Maria", "surname": "Calabro", "__typename": "ArticleAuthorType" }, { "affiliation": "EndoCAS Center, University of Pisa", "fullName": "Mauro Ferrari", "givenName": "Mauro", "surname": "Ferrari", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "265-266", "year": "2014", "issn": null, "isbn": "978-1-4799-6184-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06948441", "articleId": "12OmNvEyR95", "__typename": "AdjacentArticleType" }, "next": { "fno": "06948443", "articleId": "12OmNviHK8p", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948446", "title": "[Poster] Non-parametric camera-based calibration of optical see-through glasses for augmented reality applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948446/12OmNASILJd", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a033", "title": "Non-parametric Camera-Based Calibration of Optical See-Through Glasses for AR Applications", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a033/12OmNASrawO", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1993/3870/0/00378176", "title": "Relative depth from vergence micromovements", "doi": null, "abstractUrl": "/proceedings-article/iccv/1993/00378176/12OmNrkBwyx", "parentPublication": { "id": "proceedings/iccv/1993/3870/0", "title": "1993 (4th) International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwar/1999/0359/0/03590075", "title": "A Method for Calibrating See-Through Head-Mounted Displays for AR", "doi": null, "abstractUrl": "/proceedings-article/iwar/1999/03590075/12OmNxTVU20", "parentPublication": { "id": "proceedings/iwar/1999/0359/0", "title": "Augmented Reality, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139576", "title": "Active surface reconstruction by integrating focus, vergence, stereo, and camera calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139576/12OmNym2c6Y", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446429", "title": "Impact of Alignment Point Distance Distribution on SPAAM Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446429/13bd1gCd7Sz", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1993/10/i1007", "title": "Active Stereo: Integrating Disparity, Vergence, Focus, Aperture and Calibration for Surface Estimation", "doi": null, "abstractUrl": "/journal/tp/1993/10/i1007/13rRUxAASUc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a702", "title": "The Virtual-Augmented Reality Simulator: Evaluating OST-HMD AR calibration algorithms in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a702/1CJe0D4B6b6", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874254", "title": "Gaze-Vergence-Controlled See-Through Vision in Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874254/1GjwOCjuXkY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a320", "title": "Improved vergence and accommodation via Purkinje Image tracking with multiple cameras for AR glasses", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a320/1pysxaykIAo", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwseER5", "title": "1993 (4th) International Conference on Computer Vision", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "1993", "__typename": "ProceedingType" }, "article": { "id": "12OmNrkBwyx", "doi": "10.1109/ICCV.1993.378176", "title": "Relative depth from vergence micromovements", "normalizedTitle": "Relative depth from vergence micromovements", "abstract": "Relative depth information can be obtained using extremely fine vergence movements called vergence micromovements about the fixation point with almost no computation and without knowledge of camera parameters. The vergence micromovements approach uses a continuous vergence angle control with simultaneous computation of the local correspondence response of elements with the same relative position in the left and right images. After a complete micromovement cycle a dense relative depth map of the object on the field of view is computed. The relative depth information is stable with respect to the angle of gaze for an initial fixation point slightly far from the midpoint of the interocular line. Experimental results from physiology and psychophysics suggest that the approach is biologically plausible.<>", "abstracts": [ { "abstractType": "Regular", "content": "Relative depth information can be obtained using extremely fine vergence movements called vergence micromovements about the fixation point with almost no computation and without knowledge of camera parameters. The vergence micromovements approach uses a continuous vergence angle control with simultaneous computation of the local correspondence response of elements with the same relative position in the left and right images. After a complete micromovement cycle a dense relative depth map of the object on the field of view is computed. The relative depth information is stable with respect to the angle of gaze for an initial fixation point slightly far from the midpoint of the interocular line. Experimental results from physiology and psychophysics suggest that the approach is biologically plausible.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Relative depth information can be obtained using extremely fine vergence movements called vergence micromovements about the fixation point with almost no computation and without knowledge of camera parameters. The vergence micromovements approach uses a continuous vergence angle control with simultaneous computation of the local correspondence response of elements with the same relative position in the left and right images. After a complete micromovement cycle a dense relative depth map of the object on the field of view is computed. The relative depth information is stable with respect to the angle of gaze for an initial fixation point slightly far from the midpoint of the interocular line. Experimental results from physiology and psychophysics suggest that the approach is biologically plausible.", "fno": "00378176", "keywords": [ "Computer Vision", "Motion Estimation", "Relative Depth Information", "Vergence Micromovements", "Fixation Point", "Camera Parameters", "Continuous Vergence Angle Control", "Simultaneous Computation", "Local Correspondence Response", "Dense Relative Depth Map", "Physiology", "Cameras", "Computer Vision", "Biology Computing", "Stereo Vision", "Optical Sensors", "Laboratories", "Psychology", "Calibration", "Equations", "Joining Processes" ], "authors": [ { "affiliation": "Comput. Vision & Active Perception Lab., R. Inst. of Technol., Stockholm, Sweden", "fullName": "A. Francisco", "givenName": "A.", "surname": "Francisco", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1993-01-01T00:00:00", "pubType": "proceedings", "pages": "481,482,483,484,485,486", "year": "1993", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00378175", "articleId": "12OmNBpVQ4k", "__typename": "AdjacentArticleType" }, "next": { "fno": "00378177", "articleId": "12OmNzt0Is1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2007/2786/0/27860363", "title": "Version and vergence control of a stereo camera head by fitting the movement into the Hering?s law", "doi": null, "abstractUrl": "/proceedings-article/crv/2007/27860363/12OmNAGNCfy", "parentPublication": { "id": "proceedings/crv/2007/2786/0", "title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948442", "title": "[Poster] HMD Video see though AR with unfixed cameras vergence", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948442/12OmNB0nWd6", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1993/3870/0/00378189", "title": "Robust vergence with concurrent detection of occlusion and specular highlights", "doi": null, "abstractUrl": "/proceedings-article/iccv/1993/00378189/12OmNBSjIVt", "parentPublication": { "id": "proceedings/iccv/1993/3870/0", "title": "1993 (4th) International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1993/3870/0/00378186", "title": "Dynamic fixation [active vision]", "doi": null, "abstractUrl": "/proceedings-article/iccv/1993/00378186/12OmNBTawxC", "parentPublication": { "id": "proceedings/iccv/1993/3870/0", "title": "1993 (4th) International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iai/1994/6250/0/00336672", "title": "Vergence control using a hierarchical image structure", "doi": null, "abstractUrl": "/proceedings-article/iai/1994/00336672/12OmNqBtj2A", "parentPublication": { "id": "proceedings/iai/1994/6250/0", "title": "Proceedings of the IEEE Southwest Symposium on Image Analysis and Interpretation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139576", "title": "Active surface reconstruction by integrating focus, vergence, stereo, and camera calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139576/12OmNym2c6Y", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1995/12/i1213", "title": "Performance Analysis of Stereo, Vergence, and Focus as Depth Cues for Active Vision", "doi": null, "abstractUrl": "/journal/tp/1995/12/i1213/13rRUwcS1DZ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1993/10/i1007", "title": "Active Stereo: Integrating Disparity, Vergence, Focus, Aperture and Calibration for Surface Estimation", "doi": null, "abstractUrl": "/journal/tp/1993/10/i1007/13rRUxAASUc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a548", "title": "Control with Vergence Eye Movement in Augmented Reality See-Through Vision", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a548/1CJenT7ps08", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874254", "title": "Gaze-Vergence-Controlled See-Through Vision in Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874254/1GjwOCjuXkY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyaXPPU", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "acronym": "icmew", "groupId": "1801805", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzkMlMY", "doi": "10.1109/ICMEW.2015.7169860", "title": "An Augmented-Reality night vision enhancement application for see-through glasses", "normalizedTitle": "An Augmented-Reality night vision enhancement application for see-through glasses", "abstract": "Nyctalopia, millions of people all over the world suffering from which, brings much trouble to the patients. The available night vision systems are poor in user experience but cost much. In this paper we design and implement an Augmented-Reality night vision enhancement application for see-through glasses. According to our model proposed for the night blindness, fast and efficient algorithms are used for both night vision enhancement and display calibration. The processed images are much brighter and aligned to the real world. The results in different daily life scenes are presented, indicating huge convenience the application will bring to the nyctalopia patients.", "abstracts": [ { "abstractType": "Regular", "content": "Nyctalopia, millions of people all over the world suffering from which, brings much trouble to the patients. The available night vision systems are poor in user experience but cost much. In this paper we design and implement an Augmented-Reality night vision enhancement application for see-through glasses. According to our model proposed for the night blindness, fast and efficient algorithms are used for both night vision enhancement and display calibration. The processed images are much brighter and aligned to the real world. The results in different daily life scenes are presented, indicating huge convenience the application will bring to the nyctalopia patients.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Nyctalopia, millions of people all over the world suffering from which, brings much trouble to the patients. The available night vision systems are poor in user experience but cost much. In this paper we design and implement an Augmented-Reality night vision enhancement application for see-through glasses. According to our model proposed for the night blindness, fast and efficient algorithms are used for both night vision enhancement and display calibration. The processed images are much brighter and aligned to the real world. The results in different daily life scenes are presented, indicating huge convenience the application will bring to the nyctalopia patients.", "fno": "07169860", "keywords": [ "Night Vision", "Cameras", "Glass", "Calibration", "Distortion", "Blindness", "Augmented Reality", "Night Vision Enhancement", "Augmented Reality", "Nyctalopia", "Seethrough Glasses" ], "authors": [ { "affiliation": "Institute of Image Communication and Information Processing, Shanghai Jiao Tong University, China", "fullName": "Chunjia Hu", "givenName": null, "surname": "Chunjia Hu", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Image Communication and Information Processing, Shanghai Jiao Tong University, China", "fullName": "Guangtao Zhai", "givenName": null, "surname": "Guangtao Zhai", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Image Communication and Information Processing, Shanghai Jiao Tong University, China", "fullName": "Duo Li", "givenName": null, "surname": "Duo Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-06-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2015", "issn": null, "isbn": "978-1-4799-7079-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07169859", "articleId": "12OmNxWui9c", "__typename": "AdjacentArticleType" }, "next": { "fno": "07169861", "articleId": "12OmNx7ov4N", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vspets/2005/9424/0/01570912", "title": "Illumination and motion-based video enhancement for night surveillance", "doi": null, "abstractUrl": "/proceedings-article/vspets/2005/01570912/12OmNA0vnSz", "parentPublication": { "id": "proceedings/vspets/2005/9424/0", "title": "Proceedings. 2nd Joint IEEE International Workshop on Visual Surveillance and Performance Evaluation of Tracking and Surveillance (VS-PETS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2010/7029/0/05543571", "title": "Color contrast enhancement for visually impaired people", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2010/05543571/12OmNBf94Z6", "parentPublication": { "id": "proceedings/cvprw/2010/7029/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncvpripg/2011/4599/0/4599a196", "title": "Efficient Color Transfer Method Based on Colormap Clustering for Night Vision Applications", "doi": null, "abstractUrl": "/proceedings-article/ncvpripg/2011/4599a196/12OmNxG1yHa", "parentPublication": { "id": "proceedings/ncvpripg/2011/4599/0", "title": "Computer Vision, Pattern Recognition, Image Processing and Graphics, National Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmip/2017/5954/0/5954a151", "title": "Night Image Enhancement Using Selective Filters", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a151/12OmNyY4rte", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2013/5016/0/5016a823", "title": "Night Vision Image Contrast Enhancement Base on Adaptive Dynamic Histogram", "doi": null, "abstractUrl": "/proceedings-article/icdma/2013/5016a823/12OmNyrIaAa", "parentPublication": { "id": "proceedings/icdma/2013/5016/0", "title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2013/0703/0/06636666", "title": "Study on algorithm for panoramic image basing on high sensitivity and high resolution panoramic surveillance camera", "doi": null, "abstractUrl": "/proceedings-article/avss/2013/06636666/12OmNzl3WUA", "parentPublication": { "id": "proceedings/avss/2013/0703/0", "title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1997/7928/0/79280028", "title": "Studies on vision enhancement with optoelectronical devices", "doi": null, "abstractUrl": "/proceedings-article/cbms/1997/79280028/12OmNzmtWvS", "parentPublication": { "id": "proceedings/cbms/1997/7928/0", "title": "Proceedings of the 26th IEEE International Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/01/ttp2012010094", "title": "Objective Assessment of Multiresolution Image Fusion Algorithms for Context Enhancement in Night Vision: A Comparative Study", "doi": null, "abstractUrl": "/journal/tp/2012/01/ttp2012010094/13rRUILLkwt", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000b234", "title": "IR2VI: Enhanced Night Environmental Perception by Unsupervised Thermal Image Translation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b234/17D45WwsQ4T", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cyberc/2022/3154/0/315400a226", "title": "Research on Multiple Targets Pedestrian Reidentification with Night Scene Image Enhancement", "doi": null, "abstractUrl": "/proceedings-article/cyberc/2022/315400a226/1M66nndgCtO", "parentPublication": { "id": "proceedings/cyberc/2022/3154/0", "title": "2022 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJenT7ps08", "doi": "10.1109/VRW55335.2022.00125", "title": "Control with Vergence Eye Movement in Augmented Reality See-Through Vision", "normalizedTitle": "Control with Vergence Eye Movement in Augmented Reality See-Through Vision", "abstract": "Augmented Reality (AR) see-through vision has become a recent research focus since it enables the user to see through a wall and see the occluded objects. Most existing works only used common modalities to control the display for see-through vision, e.g., button clicking and speech control. However, we use visual system to observe see-through vision. Using an addition interaction channel will distract the user and degrade the user experience. In this paper, we propose a novel interaction method using vergence eye movement for controlling see-through vision in AR. Specifically, we first customize eye cameras and design gaze depth estimation method for Microsoft HoloLens 2. With our algorithm, fixation depth can be computed from the vergence, and used to manage the see-through vision. We also propose two control techniques of gaze vergence. The experimental results show that the gaze depth estimation method is efficient. The difference cannot be found between these two modalities in terms of completion time and the number of successes.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented Reality (AR) see-through vision has become a recent research focus since it enables the user to see through a wall and see the occluded objects. Most existing works only used common modalities to control the display for see-through vision, e.g., button clicking and speech control. However, we use visual system to observe see-through vision. Using an addition interaction channel will distract the user and degrade the user experience. In this paper, we propose a novel interaction method using vergence eye movement for controlling see-through vision in AR. Specifically, we first customize eye cameras and design gaze depth estimation method for Microsoft HoloLens 2. With our algorithm, fixation depth can be computed from the vergence, and used to manage the see-through vision. We also propose two control techniques of gaze vergence. The experimental results show that the gaze depth estimation method is efficient. The difference cannot be found between these two modalities in terms of completion time and the number of successes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented Reality (AR) see-through vision has become a recent research focus since it enables the user to see through a wall and see the occluded objects. Most existing works only used common modalities to control the display for see-through vision, e.g., button clicking and speech control. However, we use visual system to observe see-through vision. Using an addition interaction channel will distract the user and degrade the user experience. In this paper, we propose a novel interaction method using vergence eye movement for controlling see-through vision in AR. Specifically, we first customize eye cameras and design gaze depth estimation method for Microsoft HoloLens 2. With our algorithm, fixation depth can be computed from the vergence, and used to manage the see-through vision. We also propose two control techniques of gaze vergence. The experimental results show that the gaze depth estimation method is efficient. The difference cannot be found between these two modalities in terms of completion time and the number of successes.", "fno": "840200a548", "keywords": [ "Augmented Reality", "Computer Vision", "Eye", "Vergence Eye Movement", "Augmented Reality", "Recent Research Focus", "Occluded Objects", "Common Modalities", "Button Clicking", "Speech Control", "Visual System", "Addition Interaction Channel", "User Experience", "Novel Interaction Method", "Eye Cameras", "Design Gaze Depth Estimation Method", "Fixation Depth", "Control Techniques", "Gaze Vergence", "Human Computer Interaction", "Three Dimensional Displays", "Conferences", "Estimation", "Visual Systems", "Cameras", "User Experience", "Augmented Reality", "See Through Vision", "Vergence Eye Movement", "Human Computer Interaction HCI" ], "authors": [ { "affiliation": "State Key Laboratory of VR Technology and Systems, School of Computer Science and Engineering, Beihang University", "fullName": "Zhimin Wang", "givenName": "Zhimin", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "State Key Laboratory of VR Technology and Systems, School of Computer Science and Engineering, Beihang University", "fullName": "Yuxin Zhao", "givenName": "Yuxin", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "State Key Laboratory of VR Technology and Systems, School of Computer Science and Engineering, Beihang University", "fullName": "Feng Lu", "givenName": "Feng", "surname": "Lu", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "548-549", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "840200a546", "articleId": "1CJfsrBSunS", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a550", "articleId": "1CJdPv2AGJi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2007/2786/0/27860363", "title": "Version and vergence control of a stereo camera head by fitting the movement into the Hering?s law", "doi": null, "abstractUrl": "/proceedings-article/crv/2007/27860363/12OmNAGNCfy", "parentPublication": { "id": "proceedings/crv/2007/2786/0", "title": "Fourth Canadian Conference on Computer and Robot Vision (CRV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/1999/0481/0/04810171", "title": "Keeping an Eye for HCI", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/1999/04810171/12OmNAoUT4O", "parentPublication": { "id": "proceedings/sibgrapi/1999/0481/0", "title": "XII Brazilian Symposium on Computer Graphics and Image Processing (Cat. No.PR00481)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948442", "title": "[Poster] HMD Video see though AR with unfixed cameras vergence", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948442/12OmNB0nWd6", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1994/5825/0/00323879", "title": "Accurate vergence control in complex scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1994/00323879/12OmNBVrjq1", "parentPublication": { "id": "proceedings/cvpr/1994/5825/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2007/0907/0/04142848", "title": "Cascading Hand and Eye Movement for Augmented Reality Videoconferencing", "doi": null, "abstractUrl": "/proceedings-article/3dui/2007/04142848/12OmNyrIatw", "parentPublication": { "id": "proceedings/3dui/2007/0907/0", "title": "2007 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007333", "title": "Cognitive Cost of Using Augmented Reality Displays", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007333/13rRUygT7fg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874254", "title": "Gaze-Vergence-Controlled See-Through Vision in Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874254/1GjwOCjuXkY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794584", "title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089433", "title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z016", "title": "Keynote Speaker: User Experience Considerations for Everyday Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z016/1yeCV2T6UAE", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0T4CUJTq", "doi": "10.1109/VR.2019.8798273", "title": "Required Accuracy of Gaze Tracking for Varifocal Displays", "normalizedTitle": "Required Accuracy of Gaze Tracking for Varifocal Displays", "abstract": "Varifocal displays are a practical method to solve vergence-accommodation conflict in near-eye displays for both virtual and augmented reality, but they are reliant on knowing the user's focal state. One approach for detecting the focal state is to use the link between vergence and accommodation and employ binocular gaze tracking to determine the depth of the fixation point; consequently, the focal depth is also known. In order to ensure the virtual image is in focus, the display must be set to a depth which causes no negative perceptual or physiological effects to the viewer, which indicates error bounds for the calculation of fixation point. I analyze the required gaze tracker accuracy to ensure the display focus is set within the viewer's depth of field, zone of comfort, and zone of clear single binocular vision. My findings indicate that for the median adult using an augmented reality varifocal display, gaze tracking accuracy must be better than 0.541&#x00B0;. In addition, I discuss eye tracking approaches presented in the literature to determine their ability to meet the specified requirements.", "abstracts": [ { "abstractType": "Regular", "content": "Varifocal displays are a practical method to solve vergence-accommodation conflict in near-eye displays for both virtual and augmented reality, but they are reliant on knowing the user's focal state. One approach for detecting the focal state is to use the link between vergence and accommodation and employ binocular gaze tracking to determine the depth of the fixation point; consequently, the focal depth is also known. In order to ensure the virtual image is in focus, the display must be set to a depth which causes no negative perceptual or physiological effects to the viewer, which indicates error bounds for the calculation of fixation point. I analyze the required gaze tracker accuracy to ensure the display focus is set within the viewer's depth of field, zone of comfort, and zone of clear single binocular vision. My findings indicate that for the median adult using an augmented reality varifocal display, gaze tracking accuracy must be better than 0.541&#x00B0;. In addition, I discuss eye tracking approaches presented in the literature to determine their ability to meet the specified requirements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Varifocal displays are a practical method to solve vergence-accommodation conflict in near-eye displays for both virtual and augmented reality, but they are reliant on knowing the user's focal state. One approach for detecting the focal state is to use the link between vergence and accommodation and employ binocular gaze tracking to determine the depth of the fixation point; consequently, the focal depth is also known. In order to ensure the virtual image is in focus, the display must be set to a depth which causes no negative perceptual or physiological effects to the viewer, which indicates error bounds for the calculation of fixation point. I analyze the required gaze tracker accuracy to ensure the display focus is set within the viewer's depth of field, zone of comfort, and zone of clear single binocular vision. My findings indicate that for the median adult using an augmented reality varifocal display, gaze tracking accuracy must be better than 0.541°. In addition, I discuss eye tracking approaches presented in the literature to determine their ability to meet the specified requirements.", "fno": "08798273", "keywords": [ "Augmented Reality", "Eye", "Gaze Tracking", "Three Dimensional Displays", "Visual Perception", "Varifocal Displays", "Near Eye Displays", "Virtual Reality", "Focal State", "Fixation Point", "Focal Depth", "Virtual Image", "Negative Perceptual Effects", "Physiological Effects", "Clear Single Binocular Vision", "Augmented Reality Varifocal Display", "Gaze Tracking Accuracy", "Eye Tracking Approaches", "Gaze Tracker Accuracy", "Binocular Gaze Tracking", "Gaze Tracking", "Three Dimensional Displays", "Hardware", "Optical Imaging", "Optical Variables Measurement", "Augmented Reality", "Optical Sensors", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Devices Hardware", "Hardware Validation", "Functional Verification Hardware", "Functional Verification", "Coverage Metrics Hardware", "Robustness Human Centered Computing", "Interaction Devices", "Displays And Imagers" ], "authors": [ { "affiliation": "UNC-Chapel Hill", "fullName": "David Dunn", "givenName": "David", "surname": "Dunn", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1838-1842", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797758", "articleId": "1cJ10RDnKzS", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798030", "articleId": "1cJ1dsOkvw4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icat/2007/3056/0/30560280", "title": "Interaction Without Gesture or Speech -- A Gaze Controlled AR System", "doi": null, "abstractUrl": "/proceedings-article/icat/2007/30560280/12OmNCcKQtv", "parentPublication": { "id": "proceedings/icat/2007/3056/0", "title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wkdd/2009/3543/0/3543a594", "title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application", "doi": null, "abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn", "parentPublication": { "id": "proceedings/wkdd/2009/3543/0", "title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/04/07829412", "title": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors", "doi": null, "abstractUrl": "/journal/tg/2017/04/07829412/13rRUwcS1D1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/07/07226865", "title": "Resolving the Vergence-Accommodation Conflict in Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2016/07/07226865/13rRUxASuhD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08462792", "title": "The Effect of Focal Distance, Age, and Brightness on Near-Field Augmented Reality Depth Matching", "doi": null, "abstractUrl": "/journal/tg/2020/02/08462792/13w3loWnQPK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a746", "title": "Metameric Varifocal Holograms", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a746/1CJcc750PQI", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874254", "title": "Gaze-Vergence-Controlled See-Through Vision in Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874254/1GjwOCjuXkY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a787", "title": "VRDoc: Gaze-based Interactions for VR Reading Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998348", "title": "Effects of Depth Information on Visual Target Identification Task Performance in Shared Gaze Environments", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998348/1hrXedrZXos", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnXfbb0lFK", "doi": "10.1109/VRW52623.2021.00142", "title": "Subtle Gaze Guidance for 360&#x00B0; Content by Gradual Brightness Modulation and Termination of Modulation by Gaze Approaching", "normalizedTitle": "Subtle Gaze Guidance for 360° Content by Gradual Brightness Modulation and Termination of Modulation by Gaze Approaching", "abstract": "On VR, users do not always see the specific contents that the creators want them to focus on. For the creators to provide users with the immersive experience they intend, it is necessary to naturally guide the user's gaze to relevant spots in the virtual space. In this paper, we propose a subtle gaze guidance method for 360&#x00B0; content combining two techniques; gradual brightness modulation and termination of modulation by gaze approaching the guidance area. The experimental results show that our method significantly contributes to a more natural and less disturbing viewing experience while maintaining relatively high guidance performance.", "abstracts": [ { "abstractType": "Regular", "content": "On VR, users do not always see the specific contents that the creators want them to focus on. For the creators to provide users with the immersive experience they intend, it is necessary to naturally guide the user's gaze to relevant spots in the virtual space. In this paper, we propose a subtle gaze guidance method for 360&#x00B0; content combining two techniques; gradual brightness modulation and termination of modulation by gaze approaching the guidance area. The experimental results show that our method significantly contributes to a more natural and less disturbing viewing experience while maintaining relatively high guidance performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "On VR, users do not always see the specific contents that the creators want them to focus on. For the creators to provide users with the immersive experience they intend, it is necessary to naturally guide the user's gaze to relevant spots in the virtual space. In this paper, we propose a subtle gaze guidance method for 360° content combining two techniques; gradual brightness modulation and termination of modulation by gaze approaching the guidance area. The experimental results show that our method significantly contributes to a more natural and less disturbing viewing experience while maintaining relatively high guidance performance.", "fno": "405700a520", "keywords": [ "User Interfaces", "Virtual Reality", "Gradual Brightness Modulation", "Gaze Approaching", "Specific Contents", "Immersive Experience", "Virtual Space", "Subtle Gaze Guidance Method", "Guidance Area", "Natural Viewing Experience", "Three Dimensional Displays", "Conferences", "Brightness", "Modulation", "Immersive Experience", "Color", "User Interfaces", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality", "Computing Methodologies", "Computer Graphics", "Graphics Systems And Interfaces", "Perception" ], "authors": [ { "affiliation": "Nara Institute of Science and Technology", "fullName": "Masatoshi Yokomi", "givenName": "Masatoshi", "surname": "Yokomi", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology", "fullName": "Naoya Isoyama", "givenName": "Naoya", "surname": "Isoyama", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology", "fullName": "Nobuchika Sakata", "givenName": "Nobuchika", "surname": "Sakata", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology", "fullName": "Kiyoshi Kiyokawa", "givenName": "Kiyoshi", "surname": "Kiyokawa", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "520-521", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "405700a518", "articleId": "1tnWMo8e5Ve", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a522", "articleId": "1tnXaVk09jy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223361", "title": "AR-SSVEP for brain-machine interface: Estimating user's gaze in head-mounted display with USB camera", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223361/12OmNwtEEzT", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446215", "title": "Gaze Guidance in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446215/13bd1gJ1v0y", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/02/07061490", "title": "Material Roughness Modulation via Electrotactile Augmentation", "doi": null, "abstractUrl": "/journal/th/2015/02/07061490/13rRUwI5U82", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a562", "title": "Gaze Capture based Considerate Behaviour Control of Virtual Guiding Agent", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a562/1CJfoWhFCXm", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a620", "title": "Exploring Enhancements towards Gaze Oriented Parallel Views in Immersive Tasks", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a620/1MNgG4plx6w", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a022", "title": "Exploring 3D Interaction with Gaze Guidance in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a022/1MNgYOBne5W", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a243", "title": "Improving Color Discrimination for Color Vision Deficiency (CVD) with Temporal-Domain Modulation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a243/1gysnyqF7Fu", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090600", "title": "Subtle Gaze Direction with Asymmetric Field-of-View Modulation in Headworn Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090600/1jIxwQO6LXa", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a462", "title": "GazeTance Guidance: Gaze and Distance-Based Content Presentation for Virtual Museum", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a462/1tnWZyFmWpG", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a310", "title": "Subtle Attention Guidance for Real Walking in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a310/1yeQK0BEj8Q", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmEBA2TORW", "doi": "10.1109/ICCV48922.2021.00580", "title": "Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields", "normalizedTitle": "Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields", "abstract": "The rendering procedure used by neural radiance fields (NeRF) samples a scene with a single ray per pixel and may therefore produce renderings that are excessively blurred or aliased when training or testing images observe scene content at different resolutions. The straightforward solution of supersampling by rendering with multiple rays per pixel is impractical for NeRF, because rendering each ray requires querying a multilayer perceptron hundreds of times. Our solution, which we call \"mip-NeRF\" (&#x00E0; la \"mipmap\"), extends NeRF to represent the scene at a continuously-valued scale. By efficiently rendering anti-aliased conical frustums instead of rays, mip-NeRF reduces objectionable aliasing artifacts and significantly improves NeRF&#x2019;s ability to represent fine details, while also being 7% faster than NeRF and half the size. Compared to NeRF, mip-NeRF reduces average error rates by 17% on the dataset presented with NeRF and by 60% on a challenging multiscale variant of that dataset that we present. Mip-NeRF is also able to match the accuracy of a brute-force supersampled NeRF on our multiscale dataset while being 22&#x00D7; faster.", "abstracts": [ { "abstractType": "Regular", "content": "The rendering procedure used by neural radiance fields (NeRF) samples a scene with a single ray per pixel and may therefore produce renderings that are excessively blurred or aliased when training or testing images observe scene content at different resolutions. The straightforward solution of supersampling by rendering with multiple rays per pixel is impractical for NeRF, because rendering each ray requires querying a multilayer perceptron hundreds of times. Our solution, which we call \"mip-NeRF\" (&#x00E0; la \"mipmap\"), extends NeRF to represent the scene at a continuously-valued scale. By efficiently rendering anti-aliased conical frustums instead of rays, mip-NeRF reduces objectionable aliasing artifacts and significantly improves NeRF&#x2019;s ability to represent fine details, while also being 7% faster than NeRF and half the size. Compared to NeRF, mip-NeRF reduces average error rates by 17% on the dataset presented with NeRF and by 60% on a challenging multiscale variant of that dataset that we present. Mip-NeRF is also able to match the accuracy of a brute-force supersampled NeRF on our multiscale dataset while being 22&#x00D7; faster.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The rendering procedure used by neural radiance fields (NeRF) samples a scene with a single ray per pixel and may therefore produce renderings that are excessively blurred or aliased when training or testing images observe scene content at different resolutions. The straightforward solution of supersampling by rendering with multiple rays per pixel is impractical for NeRF, because rendering each ray requires querying a multilayer perceptron hundreds of times. Our solution, which we call \"mip-NeRF\" (à la \"mipmap\"), extends NeRF to represent the scene at a continuously-valued scale. By efficiently rendering anti-aliased conical frustums instead of rays, mip-NeRF reduces objectionable aliasing artifacts and significantly improves NeRF’s ability to represent fine details, while also being 7% faster than NeRF and half the size. Compared to NeRF, mip-NeRF reduces average error rates by 17% on the dataset presented with NeRF and by 60% on a challenging multiscale variant of that dataset that we present. Mip-NeRF is also able to match the accuracy of a brute-force supersampled NeRF on our multiscale dataset while being 22× faster.", "fno": "281200f835", "keywords": [ "Training", "Computer Vision", "Image Resolution", "Error Analysis", "Computational Modeling", "Neural Networks", "Multilayer Perceptrons", "Stereo", "3 D From Multiview And Other Sensors", "Low Level And Physics Based Vision" ], "authors": [ { "affiliation": "Google", "fullName": "Jonathan T. Barron", "givenName": "Jonathan T.", "surname": "Barron", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Ben Mildenhall", "givenName": "Ben", "surname": "Mildenhall", "__typename": "ArticleAuthorType" }, { "affiliation": "UC Berkeley", "fullName": "Matthew Tancik", "givenName": "Matthew", "surname": "Tancik", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Peter Hedman", "givenName": "Peter", "surname": "Hedman", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Ricardo Martin-Brualla", "givenName": "Ricardo", "surname": "Martin-Brualla", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Pratul P. Srinivasan", "givenName": "Pratul P.", "surname": "Srinivasan", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "5835-5844", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200f826", "articleId": "1BmEiCkWfU4", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200f845", "articleId": "1BmL0KETWzm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f855", "title": "Baking Neural Radiance Fields for Real-Time View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f855/1BmKZYWAuWY", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5170", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f481", "title": "Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f481/1H1jnh582jK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2851", "title": "Deblur-NeRF: Neural Radiance Fields from Blurry Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2851/1H1kFc1BMLS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8377", "title": "HDR-NeRF: High Dynamic Range Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8377/1H1kSeZPinK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmEtIfeMZW", "doi": "10.1109/ICCV48922.2021.00570", "title": "PlenOctrees for Real-time Rendering of Neural Radiance Fields", "normalizedTitle": "PlenOctrees for Real-time Rendering of Neural Radiance Fields", "abstract": "We introduce a method to render Neural Radiance Fields (NeRFs) in real time using PlenOctrees, an octree-based 3D representation which supports view-dependent effects. Our method can render 800&#x00D7;800 images at more than 150 FPS, which is over 3000 times faster than conventional NeRFs. We do so without sacrificing quality while preserving the ability of NeRFs to perform free-viewpoint rendering of scenes with arbitrary geometry and view-dependent effects. Real-time performance is achieved by pre-tabulating the NeRF into a PlenOctree. In order to preserve view-dependent effects such as specularities, we factorize the appearance via closed-form spherical basis functions. Specifically, we show that it is possible to train NeRFs to predict a spherical harmonic representation of radiance, removing the viewing direction as an input to the neural network. Furthermore, we show that PlenOctrees can be directly optimized to further minimize the reconstruction loss, which leads to equal or better quality compared to competing methods. Moreover, this octree optimization step can be used to reduce the training time, as we no longer need to wait for the NeRF training to converge fully. Our real-time neural rendering approach may potentially enable new applications such as 6-DOF industrial and product visualizations, as well as next generation AR/VR systems. PlenOctrees are amenable to in-browser rendering as well; please visit the project page for the interactive online demo, as well as video and code: https://alexyu.net/plenoctrees.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a method to render Neural Radiance Fields (NeRFs) in real time using PlenOctrees, an octree-based 3D representation which supports view-dependent effects. Our method can render 800&#x00D7;800 images at more than 150 FPS, which is over 3000 times faster than conventional NeRFs. We do so without sacrificing quality while preserving the ability of NeRFs to perform free-viewpoint rendering of scenes with arbitrary geometry and view-dependent effects. Real-time performance is achieved by pre-tabulating the NeRF into a PlenOctree. In order to preserve view-dependent effects such as specularities, we factorize the appearance via closed-form spherical basis functions. Specifically, we show that it is possible to train NeRFs to predict a spherical harmonic representation of radiance, removing the viewing direction as an input to the neural network. Furthermore, we show that PlenOctrees can be directly optimized to further minimize the reconstruction loss, which leads to equal or better quality compared to competing methods. Moreover, this octree optimization step can be used to reduce the training time, as we no longer need to wait for the NeRF training to converge fully. Our real-time neural rendering approach may potentially enable new applications such as 6-DOF industrial and product visualizations, as well as next generation AR/VR systems. PlenOctrees are amenable to in-browser rendering as well; please visit the project page for the interactive online demo, as well as video and code: https://alexyu.net/plenoctrees.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a method to render Neural Radiance Fields (NeRFs) in real time using PlenOctrees, an octree-based 3D representation which supports view-dependent effects. Our method can render 800×800 images at more than 150 FPS, which is over 3000 times faster than conventional NeRFs. We do so without sacrificing quality while preserving the ability of NeRFs to perform free-viewpoint rendering of scenes with arbitrary geometry and view-dependent effects. Real-time performance is achieved by pre-tabulating the NeRF into a PlenOctree. In order to preserve view-dependent effects such as specularities, we factorize the appearance via closed-form spherical basis functions. Specifically, we show that it is possible to train NeRFs to predict a spherical harmonic representation of radiance, removing the viewing direction as an input to the neural network. Furthermore, we show that PlenOctrees can be directly optimized to further minimize the reconstruction loss, which leads to equal or better quality compared to competing methods. Moreover, this octree optimization step can be used to reduce the training time, as we no longer need to wait for the NeRF training to converge fully. Our real-time neural rendering approach may potentially enable new applications such as 6-DOF industrial and product visualizations, as well as next generation AR/VR systems. PlenOctrees are amenable to in-browser rendering as well; please visit the project page for the interactive online demo, as well as video and code: https://alexyu.net/plenoctrees.", "fno": "281200f732", "keywords": [ "Training", "Visualization", "Three Dimensional Displays", "Octrees", "Neural Networks", "Streaming Media", "Rendering Computer Graphics", "Stereo", "3 D From Multiview And Other Sensors", "Vision Applications And Systems" ], "authors": [ { "affiliation": "UC Berkeley", "fullName": "Alex Yu", "givenName": "Alex", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "UC Berkeley", "fullName": "Ruilong Li", "givenName": "Ruilong", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "UC Berkeley", "fullName": "Matthew Tancik", "givenName": "Matthew", "surname": "Tancik", "__typename": "ArticleAuthorType" }, { "affiliation": "UC Berkeley", "fullName": "Hao Li", "givenName": "Hao", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "UC Berkeley", "fullName": "Ren Ng", "givenName": "Ren", "surname": "Ng", "__typename": "ArticleAuthorType" }, { "affiliation": "UC Berkeley", "fullName": "Angjoo Kanazawa", "givenName": "Angjoo", "surname": "Kanazawa", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "5732-5741", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200f721", "articleId": "1BmJaz9pbig", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200f742", "articleId": "1BmFoeDN55e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200o4315", "title": "KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4315/1BmFPB7IKHK", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f855", "title": "Baking Neural Radiance Fields for Real-Time View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f855/1BmKZYWAuWY", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2892", "title": "EfficientNeRF - Efficient Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2892/1H0OvIHTU7S", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i238", "title": "Block-NeRF: Scalable Large Scene Neural View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i238/1H1hVQ0jgBy", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3514", "title": "Fourier PlenOctrees for Dynamic Radiance Field Rendering in Real-time", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3514/1H1m9gTxNYc", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8388", "title": "NeRFReN: Neural Radiance Fields with Reflections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8388/1H1nhdo3vFe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a795", "title": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a795/1KxVhi7yhR6", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a816", "title": "ScanNeRF: a Scalable Benchmark for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a816/1L8qzxMLiDu", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0Nn4Xgsne", "doi": "10.1109/CVPR52688.2022.01781", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "normalizedTitle": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "abstract": "Implicit neural rendering, especially Neural Radiance Field (NeRF), has shown great potential in novel view synthesis of a scene. However, current NeRF-based methods cannot enable users to perform user-controlled shape deformation in the scene. While existing works have proposed some approaches to modify the radiance field according to the user&#x0027;s constraints, the modification is limited to color editing or object translation and rotation. In this paper, we propose a method that allows users to perform controllable shape deformation on the implicit representation of the scene, and synthesizes the novel view images of the edited scene without re-training the network. Specifically, we establish a correspondence between the extracted explicit mesh representation and the implicit neural representation of the target scene. Users can first utilize well-developed mesh-based deformation methods to deform the mesh representation of the scene. Our method then utilizes user edits from the mesh representation to bend the camera rays by introducing a tetrahedra mesh as a proxy, obtaining the rendering results of the edited scene. Extensive experiments demonstrate that our framework can achieve ideal editing results not only on synthetic data, but also on real scenes captured by users.", "abstracts": [ { "abstractType": "Regular", "content": "Implicit neural rendering, especially Neural Radiance Field (NeRF), has shown great potential in novel view synthesis of a scene. However, current NeRF-based methods cannot enable users to perform user-controlled shape deformation in the scene. While existing works have proposed some approaches to modify the radiance field according to the user&#x0027;s constraints, the modification is limited to color editing or object translation and rotation. In this paper, we propose a method that allows users to perform controllable shape deformation on the implicit representation of the scene, and synthesizes the novel view images of the edited scene without re-training the network. Specifically, we establish a correspondence between the extracted explicit mesh representation and the implicit neural representation of the target scene. Users can first utilize well-developed mesh-based deformation methods to deform the mesh representation of the scene. Our method then utilizes user edits from the mesh representation to bend the camera rays by introducing a tetrahedra mesh as a proxy, obtaining the rendering results of the edited scene. Extensive experiments demonstrate that our framework can achieve ideal editing results not only on synthetic data, but also on real scenes captured by users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Implicit neural rendering, especially Neural Radiance Field (NeRF), has shown great potential in novel view synthesis of a scene. However, current NeRF-based methods cannot enable users to perform user-controlled shape deformation in the scene. While existing works have proposed some approaches to modify the radiance field according to the user's constraints, the modification is limited to color editing or object translation and rotation. In this paper, we propose a method that allows users to perform controllable shape deformation on the implicit representation of the scene, and synthesizes the novel view images of the edited scene without re-training the network. Specifically, we establish a correspondence between the extracted explicit mesh representation and the implicit neural representation of the target scene. Users can first utilize well-developed mesh-based deformation methods to deform the mesh representation of the scene. Our method then utilizes user edits from the mesh representation to bend the camera rays by introducing a tetrahedra mesh as a proxy, obtaining the rendering results of the edited scene. Extensive experiments demonstrate that our framework can achieve ideal editing results not only on synthetic data, but also on real scenes captured by users.", "fno": "694600s8332", "keywords": [ "Data Visualisation", "Geometry", "Mesh Generation", "Rendering Computer Graphics", "Solid Modelling", "Mesh Based Deformation Methods", "User Edits", "Edited Scene", "Ideal Editing Results", "Ne RF Editing", "Geometry Editing", "Neural Radiance Fields", "Implicit Neural Rendering", "Neural Radiance Field", "View Synthesis", "Current Ne RF Based Methods", "User Controlled Shape Deformation", "Controllable Shape Deformation", "Implicit Representation", "View Images", "Extracted Explicit Mesh Representation", "Implicit Neural Representation", "Target Scene", "Geometry", "Deep Learning", "Computer Vision", "Shape", "Image Color Analysis", "Neural Networks", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "Institute of Computing Technology, Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device", "fullName": "Yu-Jie Yuan", "givenName": "Yu-Jie", "surname": "Yuan", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Computing Technology, Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device", "fullName": "Yang-Tian Sun", "givenName": "Yang-Tian", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science & Informatics, Cardiff University", "fullName": "Yu-Kun Lai", "givenName": "Yu-Kun", "surname": "Lai", "__typename": "ArticleAuthorType" }, { "affiliation": "Alibaba Group", "fullName": "Yuewen Ma", "givenName": "Yuewen", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": "Alibaba Group", "fullName": "Rongfei Jia", "givenName": "Rongfei", "surname": "Jia", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Computing Technology, Chinese Academy of Sciences,Beijing Key Laboratory of Mobile Computing and Pervasive Device", "fullName": "Lin Gao", "givenName": "Lin", "surname": "Gao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "18332-18343", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0Nn0Q54re", "name": "pcvpr202269460-09879133s1-mm_694600s8332.zip", "size": "19 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879133s1-mm_694600s8332.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600s8321", "articleId": "1H0L3Z762gU", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600s8344", "articleId": "1H0L95fYXbW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f845", "title": "Nerfies: Deformable Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f845/1BmL0KETWzm", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f753", "title": "Editing Conditional Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f753/1BmLkbx0k6c", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f481", "title": "Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f481/1H1jnh582jK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d825", "title": "CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d825/1H1muC7wD0Q", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a795", "title": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a795/1KxVhi7yhR6", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h206", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0OphoghaM", "doi": "10.1109/CVPR52688.2022.00539", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "normalizedTitle": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "abstract": "Though neural radiance fields (NeRF) have demon-strated impressive view synthesis results on objects and small bounded regions of space, they struggle on &#x201C;un-bounded&#x201D; scenes, where the camera may point in any di-rection and content may exist at any distance. In this set-ting, existing NeRF-like models often produce blurry or low-resolution renderings (due to the unbalanced detail and scale of nearby and distant objects), are slow to train, and may exhibit artifacts due to the inherent ambiguity of the task of reconstructing a large scene from a small set of images. We present an extension of mip-NeRF (a NeRF variant that addresses sampling and aliasing) that uses a non-linear scene parameterization, online distillation, and a novel distortion-based regularizer to overcome the chal-lenges presented by unbounded scenes. Our model, which we dub &#x201C;mip-NeRF 360&#x201D; as we target scenes in which the camera rotates 360 degrees around a point, reduces mean-squared error by 57&#x0025; compared to mip-NeRF, and is able to produce realistic synthesized views and detailed depth maps for highly intricate, unbounded real-world scenes.", "abstracts": [ { "abstractType": "Regular", "content": "Though neural radiance fields (NeRF) have demon-strated impressive view synthesis results on objects and small bounded regions of space, they struggle on &#x201C;un-bounded&#x201D; scenes, where the camera may point in any di-rection and content may exist at any distance. In this set-ting, existing NeRF-like models often produce blurry or low-resolution renderings (due to the unbalanced detail and scale of nearby and distant objects), are slow to train, and may exhibit artifacts due to the inherent ambiguity of the task of reconstructing a large scene from a small set of images. We present an extension of mip-NeRF (a NeRF variant that addresses sampling and aliasing) that uses a non-linear scene parameterization, online distillation, and a novel distortion-based regularizer to overcome the chal-lenges presented by unbounded scenes. Our model, which we dub &#x201C;mip-NeRF 360&#x201D; as we target scenes in which the camera rotates 360 degrees around a point, reduces mean-squared error by 57&#x0025; compared to mip-NeRF, and is able to produce realistic synthesized views and detailed depth maps for highly intricate, unbounded real-world scenes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Though neural radiance fields (NeRF) have demon-strated impressive view synthesis results on objects and small bounded regions of space, they struggle on “un-bounded” scenes, where the camera may point in any di-rection and content may exist at any distance. In this set-ting, existing NeRF-like models often produce blurry or low-resolution renderings (due to the unbalanced detail and scale of nearby and distant objects), are slow to train, and may exhibit artifacts due to the inherent ambiguity of the task of reconstructing a large scene from a small set of images. We present an extension of mip-NeRF (a NeRF variant that addresses sampling and aliasing) that uses a non-linear scene parameterization, online distillation, and a novel distortion-based regularizer to overcome the chal-lenges presented by unbounded scenes. Our model, which we dub “mip-NeRF 360” as we target scenes in which the camera rotates 360 degrees around a point, reduces mean-squared error by 57% compared to mip-NeRF, and is able to produce realistic synthesized views and detailed depth maps for highly intricate, unbounded real-world scenes.", "fno": "694600f460", "keywords": [ "Cameras", "Image Reconstruction", "Mean Square Error Methods", "Rendering Computer Graphics", "Mip Ne RF", "Unbounded Anti Aliased", "Neural Radiance Fields", "Demon Strated Impressive View Synthesis Results", "Di Rection", "Set Ting", "Ne RF Like Models", "Low Resolution Renderings", "Nearby Objects", "Distant Objects", "Ne RF Variant", "Aliasing", "Nonlinear Scene Parameterization", "Unbounded Scenes", "Real World Scenes", "Computer Vision", "Three Dimensional Displays", "Nonlinear Distortion", "Machine Learning", "Cameras", "Rendering Computer Graphics", "Sensors" ], "authors": [ { "affiliation": "Google", "fullName": "Jonathan T. Barron", "givenName": "Jonathan T.", "surname": "Barron", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Ben Mildenhall", "givenName": "Ben", "surname": "Mildenhall", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Dor Verbin", "givenName": "Dor", "surname": "Verbin", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Pratul P. Srinivasan", "givenName": "Pratul P.", "surname": "Srinivasan", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Peter Hedman", "givenName": "Peter", "surname": "Hedman", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "5460-5469", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0OpdWeJ68", "name": "pcvpr202269460-09878829s1-mm_694600f460.zip", "size": "2.26 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878829s1-mm_694600f460.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600f449", "articleId": "1H0NNhIDNp6", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600f470", "articleId": "1H1mpdxQEq4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f835", "title": "Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f835/1BmEBA2TORW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859817", "title": "Omni-NeRF: Neural Radiance Field from 360&#x00B0; Image Captures", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859817/1G9DIJAkSzK", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5170", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2851", "title": "Deblur-NeRF: Neural Radiance Fields from Blurry Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2851/1H1kFc1BMLS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f755", "title": "X-NeRF: Explicit Neural Radiance Field for Multi-Scene 360<sup>&#x00B0;</sup> Insufficient RGB-D Views", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f755/1KxV7reNb6E", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h206", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0OvIHTU7S", "doi": "10.1109/CVPR52688.2022.01256", "title": "EfficientNeRF - Efficient Neural Radiance Fields", "normalizedTitle": "EfficientNeRF - Efficient Neural Radiance Fields", "abstract": "Neural Radiance Fields (NeRF) has been wildly applied to various tasks for its high-quality representation of 3D scenes. It takes long per-scene training time and per-image testing time. In this paper, we present EfficientNeRF as an efficient NeRF-based method to represent 3D scene and synthesize novel-view images. Although several ways exist to accelerate the training or testing process, it is still difficult to much reduce time for both phases simultaneously. We analyze the density and weight distribution of the sampled points then propose valid and pivotal sampling at the coarse and fine stage, respectively, to significantly improve sampling efficiency. In addition, we design a novel data structure to cache the whole scene during testing to accelerate the rendering speed. Overall, our method can reduce over 88&#x0025; of training time, reach rendering speed of over 200 FPS, while still achieving competitive accuracy. Experiments prove that our method promotes the practicality of NeRF in the real world and enables many applications. The code is available in https://github.com/dvlabresearch/EfficientNeRF.", "abstracts": [ { "abstractType": "Regular", "content": "Neural Radiance Fields (NeRF) has been wildly applied to various tasks for its high-quality representation of 3D scenes. It takes long per-scene training time and per-image testing time. In this paper, we present EfficientNeRF as an efficient NeRF-based method to represent 3D scene and synthesize novel-view images. Although several ways exist to accelerate the training or testing process, it is still difficult to much reduce time for both phases simultaneously. We analyze the density and weight distribution of the sampled points then propose valid and pivotal sampling at the coarse and fine stage, respectively, to significantly improve sampling efficiency. In addition, we design a novel data structure to cache the whole scene during testing to accelerate the rendering speed. Overall, our method can reduce over 88&#x0025; of training time, reach rendering speed of over 200 FPS, while still achieving competitive accuracy. Experiments prove that our method promotes the practicality of NeRF in the real world and enables many applications. The code is available in https://github.com/dvlabresearch/EfficientNeRF.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Neural Radiance Fields (NeRF) has been wildly applied to various tasks for its high-quality representation of 3D scenes. It takes long per-scene training time and per-image testing time. In this paper, we present EfficientNeRF as an efficient NeRF-based method to represent 3D scene and synthesize novel-view images. Although several ways exist to accelerate the training or testing process, it is still difficult to much reduce time for both phases simultaneously. We analyze the density and weight distribution of the sampled points then propose valid and pivotal sampling at the coarse and fine stage, respectively, to significantly improve sampling efficiency. In addition, we design a novel data structure to cache the whole scene during testing to accelerate the rendering speed. Overall, our method can reduce over 88% of training time, reach rendering speed of over 200 FPS, while still achieving competitive accuracy. Experiments prove that our method promotes the practicality of NeRF in the real world and enables many applications. The code is available in https://github.com/dvlabresearch/EfficientNeRF.", "fno": "694600m2892", "keywords": [ "Data Structures", "Image Representation", "Learning Artificial Intelligence", "Neural Nets", "Rendering Computer Graphics", "Sampling Methods", "Efficient Ne RF Efficient", "Neural Radiance Fields", "High Quality Representation", "Per Scene Training Time", "Per Image Testing Time", "Efficient Ne RF Based Method", "Novel View Images", "Density", "Weight Distribution", "Sampled Points", "Pivotal Sampling", "Coarse Stage", "Fine Stage", "Sampling Efficiency", "Rendering Speed", "Training", "Computer Vision", "Three Dimensional Displays", "Codes", "Life Estimation", "Rendering Computer Graphics", "Data Structures" ], "authors": [ { "affiliation": "The Chinese University of Hong Kong", "fullName": "Tao Hu", "givenName": "Tao", "surname": "Hu", "__typename": "ArticleAuthorType" }, { "affiliation": "SmartMore", "fullName": "Shu Liu", "givenName": "Shu", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong", "fullName": "Yilun Chen", "givenName": "Yilun", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong", "fullName": "Tiancheng Shen", "givenName": "Tiancheng", "surname": "Shen", "__typename": "ArticleAuthorType" }, { "affiliation": "The Chinese University of Hong Kong", "fullName": "Jiaya Jia", "givenName": "Jiaya", "surname": "Jia", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "12892-12901", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0OvEjGAo0", "name": "pcvpr202269460-09879447s1-mm_694600m2892.zip", "size": "8.36 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879447s1-mm_694600m2892.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600m2882", "articleId": "1H0Nhx2wMqQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600m2902", "articleId": "1H0MVoVQNBm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f835", "title": "Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f835/1BmEBA2TORW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f826", "title": "Self-Calibrating Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f826/1BmEiCkWfU4", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4315", "title": "KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4315/1BmFPB7IKHK", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f721", "title": "BARF: Bundle-Adjusting Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f721/1BmJaz9pbig", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8388", "title": "NeRFReN: Neural Radiance Fields with Reflections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8388/1H1nhdo3vFe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2022/5670/0/567000a606", "title": "Cross-Spectral Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/3dv/2022/567000a606/1KYsqz6IyGI", "parentPublication": { "id": "proceedings/3dv/2022/5670/0", "title": "2022 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a795", "title": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a795/1KxVhi7yhR6", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1jhjLRpRu", "doi": "10.1109/CVPR52688.2022.01476", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "normalizedTitle": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "abstract": "Neural Radiance Field (NeRF) regresses a neural param-eterized scene by differentially rendering multi-view images with ground-truth supervision. However, when interpolating novel views, NeRF often yields inconsistent and visually non-smooth geometric results, which we consider as a generalization gap between seen and unseen views. Recent advances in convolutional neural networks have demonstrated the promise of advanced robust data augmentations, either random or learned, in enhancing both in-distribution and out-of-distribution generalization. Inspired by that, we propose Augmented NeRF (Aug-NeRF), which for the first time brings the power of robust data augmentations into regular-izing the NeRF training. Particularly, our proposal learns to seamlessly blend worst-case perturbations into three distinct levels of the NeRF pipeline with physical grounds, including (1) the input coordinates, to simulate imprecise camera parameters at image capture; (2) intermediate features, to smoothen the intrinsic feature manifold; and (3) pre-rendering output, to account for the potential degra-dation factors in the multi-view image supervision. Extensive results demonstrate that Aug-NeRF effectively boosts NeRF performance in both novel view synthesis (up to 1.5dB PSNR gain) and underlying geometry reconstruction. Fur-thermore, thanks to the implicit smooth prior injected by the triple-level augmentations, Aug-NeRF can even recover scenes from heavily corrupted images, a highly challenging setting untackled before. Our codes are available in https://github.com/VITA-Group/Aug-NeRF.", "abstracts": [ { "abstractType": "Regular", "content": "Neural Radiance Field (NeRF) regresses a neural param-eterized scene by differentially rendering multi-view images with ground-truth supervision. However, when interpolating novel views, NeRF often yields inconsistent and visually non-smooth geometric results, which we consider as a generalization gap between seen and unseen views. Recent advances in convolutional neural networks have demonstrated the promise of advanced robust data augmentations, either random or learned, in enhancing both in-distribution and out-of-distribution generalization. Inspired by that, we propose Augmented NeRF (Aug-NeRF), which for the first time brings the power of robust data augmentations into regular-izing the NeRF training. Particularly, our proposal learns to seamlessly blend worst-case perturbations into three distinct levels of the NeRF pipeline with physical grounds, including (1) the input coordinates, to simulate imprecise camera parameters at image capture; (2) intermediate features, to smoothen the intrinsic feature manifold; and (3) pre-rendering output, to account for the potential degra-dation factors in the multi-view image supervision. Extensive results demonstrate that Aug-NeRF effectively boosts NeRF performance in both novel view synthesis (up to 1.5dB PSNR gain) and underlying geometry reconstruction. Fur-thermore, thanks to the implicit smooth prior injected by the triple-level augmentations, Aug-NeRF can even recover scenes from heavily corrupted images, a highly challenging setting untackled before. Our codes are available in https://github.com/VITA-Group/Aug-NeRF.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Neural Radiance Field (NeRF) regresses a neural param-eterized scene by differentially rendering multi-view images with ground-truth supervision. However, when interpolating novel views, NeRF often yields inconsistent and visually non-smooth geometric results, which we consider as a generalization gap between seen and unseen views. Recent advances in convolutional neural networks have demonstrated the promise of advanced robust data augmentations, either random or learned, in enhancing both in-distribution and out-of-distribution generalization. Inspired by that, we propose Augmented NeRF (Aug-NeRF), which for the first time brings the power of robust data augmentations into regular-izing the NeRF training. Particularly, our proposal learns to seamlessly blend worst-case perturbations into three distinct levels of the NeRF pipeline with physical grounds, including (1) the input coordinates, to simulate imprecise camera parameters at image capture; (2) intermediate features, to smoothen the intrinsic feature manifold; and (3) pre-rendering output, to account for the potential degra-dation factors in the multi-view image supervision. Extensive results demonstrate that Aug-NeRF effectively boosts NeRF performance in both novel view synthesis (up to 1.5dB PSNR gain) and underlying geometry reconstruction. Fur-thermore, thanks to the implicit smooth prior injected by the triple-level augmentations, Aug-NeRF can even recover scenes from heavily corrupted images, a highly challenging setting untackled before. Our codes are available in https://github.com/VITA-Group/Aug-NeRF.", "fno": "694600p5170", "keywords": [ "Cameras", "Computational Geometry", "Filtering Theory", "Geometry", "Image Processing", "Image Reconstruction", "Interpolation", "Learning Artificial Intelligence", "Neural Nets", "Rendering Computer Graphics", "Stability", "Video Coding", "Training Stronger Neural Radiance Fields", "Triple Level Physically Grounded Augmentations", "Neural Radiance Field", "Neural Param Eterized Scene", "Multiview Images", "Ground Truth Supervision", "Interpolating Novel Views", "Nonsmooth Geometric Results", "Unseen Views", "Convolutional Neural Networks", "Advanced Robust Data Augmentations", "Augmented Ne RF", "Ne RF Training", "Ne RF Pipeline", "Physical Grounds", "Multiview Image Supervision", "Ne RF Performance", "View Synthesis", "Triple Level Augmentations", "Noise Figure 1 5 D B", "Training", "Manifolds", "Interpolation", "Perturbation Methods", "Pipelines", "Image Capture", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "The University of Texas at Austin", "fullName": "Tianlong Chen", "givenName": "Tianlong", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas at Austin", "fullName": "Peihao Wang", "givenName": "Peihao", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas at Austin", "fullName": "Zhiwen Fan", "givenName": "Zhiwen", "surname": "Fan", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Texas at Austin", "fullName": "Zhangyang Wang", "givenName": "Zhangyang", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "15170-15181", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1jhee7blm", "name": "pcvpr202269460-09878646s1-mm_694600p5170.zip", "size": "956 kB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878646s1-mm_694600p5170.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600p5159", "articleId": "1H1iv8wsVpK", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600p5182", "articleId": "1H0LwsiwnLy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09859817", "title": "Omni-NeRF: Neural Radiance Field from 360&#x00B0; Image Captures", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859817/1G9DIJAkSzK", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d825", "title": "CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d825/1H1muC7wD0Q", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a837", "title": "Ev-NeRF: Event Based Neural Radiance Field", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a837/1L8qjBjOpoc", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h206", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a962", "title": "FiG-NeRF: Figure-Ground Neural Radiance Fields for 3D Object Category Modelling", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a962/1zWEppEX9NS", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1mrGLgvra", "doi": "10.1109/CVPR52688.2022.00536", "title": "Point-NeRF: Point-based Neural Radiance Fields", "normalizedTitle": "Point-NeRF: Point-based Neural Radiance Fields", "abstract": "Volumetric neural rendering methods like NeRF [34] generate high-quality view synthesis results but are optimized per-scene leading to prohibitive reconstruction time. On the other hand, deep multi-view stereo methods can quickly reconstruct scene geometry via direct network inference. Point-NeRF combines the advantages of these two approaches by using neural 3D point clouds, with associated neural features, to model a radiance field. Point-NeRF can be rendered efficiently by aggregating neural point features near scene surfaces, in a ray marching-based rendering pipeline. Moreover, Point-NeRF can be initialized via direct inference of a pre-trained deep network to produce a neural point cloud; this point cloud can be finetuned to surpass the visual quality of NeRF with 30&#x00D7; faster training time. Point-NeRF can be combined with other 3D re-construction methods and handles the errors and outliers in such methods via a novel pruning and growing mechanism.", "abstracts": [ { "abstractType": "Regular", "content": "Volumetric neural rendering methods like NeRF [34] generate high-quality view synthesis results but are optimized per-scene leading to prohibitive reconstruction time. On the other hand, deep multi-view stereo methods can quickly reconstruct scene geometry via direct network inference. Point-NeRF combines the advantages of these two approaches by using neural 3D point clouds, with associated neural features, to model a radiance field. Point-NeRF can be rendered efficiently by aggregating neural point features near scene surfaces, in a ray marching-based rendering pipeline. Moreover, Point-NeRF can be initialized via direct inference of a pre-trained deep network to produce a neural point cloud; this point cloud can be finetuned to surpass the visual quality of NeRF with 30&#x00D7; faster training time. Point-NeRF can be combined with other 3D re-construction methods and handles the errors and outliers in such methods via a novel pruning and growing mechanism.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Volumetric neural rendering methods like NeRF [34] generate high-quality view synthesis results but are optimized per-scene leading to prohibitive reconstruction time. On the other hand, deep multi-view stereo methods can quickly reconstruct scene geometry via direct network inference. Point-NeRF combines the advantages of these two approaches by using neural 3D point clouds, with associated neural features, to model a radiance field. Point-NeRF can be rendered efficiently by aggregating neural point features near scene surfaces, in a ray marching-based rendering pipeline. Moreover, Point-NeRF can be initialized via direct inference of a pre-trained deep network to produce a neural point cloud; this point cloud can be finetuned to surpass the visual quality of NeRF with 30× faster training time. Point-NeRF can be combined with other 3D re-construction methods and handles the errors and outliers in such methods via a novel pruning and growing mechanism.", "fno": "694600f428", "keywords": [ "Geometry", "Image Reconstruction", "Learning Artificial Intelligence", "Neural Nets", "Rendering Computer Graphics", "Stereo Image Processing", "Point Ne RF", "Point Based Neural Radiance Fields", "Volumetric Neural Rendering Methods", "High Quality View Synthesis Results", "Deep Multiview Stereo Methods", "Neural 3 D Point Clouds", "Associated Neural Features", "Neural Point Features", "Neural Point Cloud", "Point Cloud Compression", "Training", "Surface Reconstruction", "Solid Modeling", "Three Dimensional Displays", "Pipelines", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "University of Southern California", "fullName": "Qiangeng Xu", "givenName": "Qiangeng", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Zexiang Xu", "givenName": "Zexiang", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Julien Philip", "givenName": "Julien", "surname": "Philip", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Sai Bi", "givenName": "Sai", "surname": "Bi", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Zhixin Shu", "givenName": "Zhixin", "surname": "Shu", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Kalyan Sunkavalli", "givenName": "Kalyan", "surname": "Sunkavalli", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Southern California", "fullName": "Ulrich Neumann", "givenName": "Ulrich", "surname": "Neumann", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "5428-5438", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1mrD4LpJe", "name": "pcvpr202269460-09880452s1-mm_694600f428.zip", "size": "5.57 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880452s1-mm_694600f428.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600f418", "articleId": "1H1mAakqDhm", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600f439", "articleId": "1H1hvFSItCU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f835", "title": "Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f835/1BmEBA2TORW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5170", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f481", "title": "Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f481/1H1jnh582jK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2851", "title": "Deblur-NeRF: Neural Radiance Fields from Blurry Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2851/1H1kFc1BMLS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f755", "title": "X-NeRF: Explicit Neural Radiance Field for Multi-Scene 360<sup>&#x00B0;</sup> Insufficient RGB-D Views", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f755/1KxV7reNb6E", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h206", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1muC7wD0Q", "doi": "10.1109/CVPR52688.2022.00381", "title": "CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields", "normalizedTitle": "CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields", "abstract": "We present CLIP-NeRF, a multi-modal 3D object manipulation method for neural radiance fields (NeRF). By leveraging the joint language-image embedding space of the recent Contrastive Language-Image Pre-Training (CLIP) model, we propose a unified framework that allows manip-ulating NeRF in a user-friendly way, using either a short text prompt or an exemplar image. Specifically, to combine the novel view synthesis capability of NeRF and the controllable manipulation ability of latent representations from generative models, we introduce a disentangled conditional NeRF architecture that allows individual control over both shape and appearance. This is achieved by performing the shape conditioning via applying a learned deformation field to the positional encoding and deferring color conditioning to the volumetric rendering stage. To bridge this disentangled latent representation to the CLIP embedding, we design two code mappers that take a CLIP embedding as input and update the latent codes to reflect the targeted editing. The mappers are trained with a CLIP-based matching loss to ensure the manipulation accuracy. Furthermore, we propose an inverse optimization method that accurately projects an input image to the latent codes for manipulation to enable editing on real images. We evaluate our approach by extensive experiments on a variety of text prompts and exemplar images and also provide an intuitive interface for interactive editing.", "abstracts": [ { "abstractType": "Regular", "content": "We present CLIP-NeRF, a multi-modal 3D object manipulation method for neural radiance fields (NeRF). By leveraging the joint language-image embedding space of the recent Contrastive Language-Image Pre-Training (CLIP) model, we propose a unified framework that allows manip-ulating NeRF in a user-friendly way, using either a short text prompt or an exemplar image. Specifically, to combine the novel view synthesis capability of NeRF and the controllable manipulation ability of latent representations from generative models, we introduce a disentangled conditional NeRF architecture that allows individual control over both shape and appearance. This is achieved by performing the shape conditioning via applying a learned deformation field to the positional encoding and deferring color conditioning to the volumetric rendering stage. To bridge this disentangled latent representation to the CLIP embedding, we design two code mappers that take a CLIP embedding as input and update the latent codes to reflect the targeted editing. The mappers are trained with a CLIP-based matching loss to ensure the manipulation accuracy. Furthermore, we propose an inverse optimization method that accurately projects an input image to the latent codes for manipulation to enable editing on real images. We evaluate our approach by extensive experiments on a variety of text prompts and exemplar images and also provide an intuitive interface for interactive editing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present CLIP-NeRF, a multi-modal 3D object manipulation method for neural radiance fields (NeRF). By leveraging the joint language-image embedding space of the recent Contrastive Language-Image Pre-Training (CLIP) model, we propose a unified framework that allows manip-ulating NeRF in a user-friendly way, using either a short text prompt or an exemplar image. Specifically, to combine the novel view synthesis capability of NeRF and the controllable manipulation ability of latent representations from generative models, we introduce a disentangled conditional NeRF architecture that allows individual control over both shape and appearance. This is achieved by performing the shape conditioning via applying a learned deformation field to the positional encoding and deferring color conditioning to the volumetric rendering stage. To bridge this disentangled latent representation to the CLIP embedding, we design two code mappers that take a CLIP embedding as input and update the latent codes to reflect the targeted editing. The mappers are trained with a CLIP-based matching loss to ensure the manipulation accuracy. Furthermore, we propose an inverse optimization method that accurately projects an input image to the latent codes for manipulation to enable editing on real images. We evaluate our approach by extensive experiments on a variety of text prompts and exemplar images and also provide an intuitive interface for interactive editing.", "fno": "694600d825", "keywords": [ "Human Computer Interaction", "Image Representation", "Learning Artificial Intelligence", "Manipulators", "Medical Image Processing", "Optimisation", "Rendering Computer Graphics", "Solid Modelling", "CLIP Ne RF", "Neural Radiance Fields", "Multimodal 3 D Object Manipulation Method", "Joint Language Image", "Recent Contrastive Language Image Pre Training Model", "Manip Ulating Ne RF", "Short Text Prompt", "Exemplar Image", "View Synthesis Capability", "Controllable Manipulation Ability", "Latent Representations", "Generative Models", "Disentangled Conditional Ne RF Architecture", "Individual Control", "Shape Conditioning", "Learned Deformation Field", "Positional Encoding", "Deferring Color Conditioning", "Disentangled Latent Representation", "CLIP Embedding", "Latent Codes", "CLIP Based", "Manipulation Accuracy", "Input Image", "Text Prompts", "Codes", "Three Dimensional Displays", "Shape", "Image Color Analysis", "Training Data", "Optimization Methods", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "City University of Hong Kong", "fullName": "Can Wang", "givenName": "Can", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Snap Inc", "fullName": "Menglei Chai", "givenName": "Menglei", "surname": "Chai", "__typename": "ArticleAuthorType" }, { "affiliation": "USC Institute for Creative Technologies", "fullName": "Mingming He", "givenName": "Mingming", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Cloud AI", "fullName": "Dongdong Chen", "givenName": "Dongdong", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "City University of Hong Kong", "fullName": "Jing Liao", "givenName": "Jing", "surname": "Liao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "3825-3834", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1muykuLjq", "name": "pcvpr202269460-09879953s1-mm_694600d825.zip", "size": "9.25 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879953s1-mm_694600d825.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600d815", "articleId": "1H0KCZGVSPm", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600d835", "articleId": "1H0Lx9QOs9i", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f753", "title": "Editing Conditional Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f753/1BmLkbx0k6c", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5170", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f481", "title": "Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f481/1H1jnh582jK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e329", "title": "Control-NeRF: Editable Feature Volumes for Scene Rendering and Manipulation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e329/1L8qzXVyRlS", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h206", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeLpJjmuwE", "doi": "10.1109/CVPR46437.2021.00713", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "normalizedTitle": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "abstract": "We present a learning-based method for synthesizing novel views of complex scenes using only unstructured collections of in-the-wild photographs. We build on Neural Radiance Fields (NeRF), which uses the weights of a multi-layer perceptron to model the density and color of a scene as a function of 3D coordinates. While NeRF works well on images of static subjects captured under controlled settings, it is incapable of modeling many ubiquitous, real-world phenomena in uncontrolled images, such as variable illumination or transient occluders. We introduce a series of extensions to NeRF to address these issues, thereby enabling accurate reconstructions from unstructured image collections taken from the internet. We apply our system, dubbed NeRF-W, to internet photo collections of famous landmarks, and demonstrate temporally consistent novel view renderings that are significantly closer to photorealism than the prior state of the art.", "abstracts": [ { "abstractType": "Regular", "content": "We present a learning-based method for synthesizing novel views of complex scenes using only unstructured collections of in-the-wild photographs. We build on Neural Radiance Fields (NeRF), which uses the weights of a multi-layer perceptron to model the density and color of a scene as a function of 3D coordinates. While NeRF works well on images of static subjects captured under controlled settings, it is incapable of modeling many ubiquitous, real-world phenomena in uncontrolled images, such as variable illumination or transient occluders. We introduce a series of extensions to NeRF to address these issues, thereby enabling accurate reconstructions from unstructured image collections taken from the internet. We apply our system, dubbed NeRF-W, to internet photo collections of famous landmarks, and demonstrate temporally consistent novel view renderings that are significantly closer to photorealism than the prior state of the art.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a learning-based method for synthesizing novel views of complex scenes using only unstructured collections of in-the-wild photographs. We build on Neural Radiance Fields (NeRF), which uses the weights of a multi-layer perceptron to model the density and color of a scene as a function of 3D coordinates. While NeRF works well on images of static subjects captured under controlled settings, it is incapable of modeling many ubiquitous, real-world phenomena in uncontrolled images, such as variable illumination or transient occluders. We introduce a series of extensions to NeRF to address these issues, thereby enabling accurate reconstructions from unstructured image collections taken from the internet. We apply our system, dubbed NeRF-W, to internet photo collections of famous landmarks, and demonstrate temporally consistent novel view renderings that are significantly closer to photorealism than the prior state of the art.", "fno": "450900h206", "keywords": [ "Image Matching", "Image Reconstruction", "Image Representation", "Image Segmentation", "Internet", "Learning Artificial Intelligence", "Multilayer Perceptrons", "Rendering Computer Graphics", "View Renderings", "Internet Photo Collections", "Dubbed Ne RF W", "Unstructured Image Collections", "Variable Illumination", "Uncontrolled Images", "Multilayer Perceptron", "In The Wild Photographs", "Complex Scenes", "Learning Based Method", "Unconstrained Photo Collections", "Neural Radiance Fields", "Learning Systems", "Solid Modeling", "Photorealism", "Three Dimensional Displays", "Image Color Analysis", "Lighting", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "Google Research", "fullName": "Ricardo Martin-Brualla", "givenName": "Ricardo", "surname": "Martin-Brualla", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Noha Radwan", "givenName": "Noha", "surname": "Radwan", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Mehdi S. M. Sajjadi", "givenName": "Mehdi S. M.", "surname": "Sajjadi", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Jonathan T. Barron", "givenName": "Jonathan T.", "surname": "Barron", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Alexey Dosovitskiy", "givenName": "Alexey", "surname": "Dosovitskiy", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Daniel Duckworth", "givenName": "Daniel", "surname": "Duckworth", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "7206-7215", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeLpFg9upy", "name": "pcvpr202145090-09578784s1-mm_450900h206.zip", "size": "3.91 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578784s1-mm_450900h206.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900h196", "articleId": "1yeJ5cUluik", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900h216", "articleId": "1yeKGGIxfnW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/tp/2017/11/07776921", "title": "Adaptive 3D Face Reconstruction from Unconstrained Photo Collections", "doi": null, "abstractUrl": "/journal/tp/2017/11/07776921/13rRUxAAT8W", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8377", "title": "HDR-NeRF: High Dynamic Range Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8377/1H1kSeZPinK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d825", "title": "CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d825/1H1muC7wD0Q", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a837", "title": "Ev-NeRF: Event Based Neural Radiance Field", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a837/1L8qjBjOpoc", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151077", "title": "Semi-supervised 3D Face Representation Learning from Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151077/1lPHfRRaKM8", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a962", "title": "FiG-NeRF: Figure-Ground Neural Radiance Fields for 3D Object Category Modelling", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a962/1zWEppEX9NS", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeLrBwGgik", "doi": "10.1109/CVPR46437.2021.01018", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "normalizedTitle": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "abstract": "Neural rendering techniques combining machine learning with geometric reasoning have arisen as one of the most promising approaches for synthesizing novel views of a scene from a sparse set of images. Among these, stands out the Neural radiance fields (NeRF) [31], which trains a deep network to map 5D input coordinates (representing spatial location and viewing direction) into a volume density and view-dependent emitted radiance. However, despite achieving an unprecedented level of photorealism on the generated images, NeRF is only applicable to static scenes, where the same spatial location can be queried from different images. In this paper we introduce D-NeRF, a method that extends neural radiance fields to a dynamic domain, allowing to reconstruct and render novel images of objects under rigid and non-rigid motions from a single camera moving around the scene. For this purpose we consider time as an additional input to the system, and split the learning process in two main stages: one that encodes the scene into a canonical space and another that maps this canonical representation into the deformed scene at a particular time. Both mappings are simultaneously learned using fully-connected networks. Once the networks are trained, D-NeRF can render novel images, controlling both the camera view and the time variable, and thus, the object movement. We demonstrate the effectiveness of our approach on scenes with objects under rigid, articulated and non-rigid motions. Code, model weights and the dynamic scenes dataset will be available at [1].", "abstracts": [ { "abstractType": "Regular", "content": "Neural rendering techniques combining machine learning with geometric reasoning have arisen as one of the most promising approaches for synthesizing novel views of a scene from a sparse set of images. Among these, stands out the Neural radiance fields (NeRF) [31], which trains a deep network to map 5D input coordinates (representing spatial location and viewing direction) into a volume density and view-dependent emitted radiance. However, despite achieving an unprecedented level of photorealism on the generated images, NeRF is only applicable to static scenes, where the same spatial location can be queried from different images. In this paper we introduce D-NeRF, a method that extends neural radiance fields to a dynamic domain, allowing to reconstruct and render novel images of objects under rigid and non-rigid motions from a single camera moving around the scene. For this purpose we consider time as an additional input to the system, and split the learning process in two main stages: one that encodes the scene into a canonical space and another that maps this canonical representation into the deformed scene at a particular time. Both mappings are simultaneously learned using fully-connected networks. Once the networks are trained, D-NeRF can render novel images, controlling both the camera view and the time variable, and thus, the object movement. We demonstrate the effectiveness of our approach on scenes with objects under rigid, articulated and non-rigid motions. Code, model weights and the dynamic scenes dataset will be available at [1].", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Neural rendering techniques combining machine learning with geometric reasoning have arisen as one of the most promising approaches for synthesizing novel views of a scene from a sparse set of images. Among these, stands out the Neural radiance fields (NeRF) [31], which trains a deep network to map 5D input coordinates (representing spatial location and viewing direction) into a volume density and view-dependent emitted radiance. However, despite achieving an unprecedented level of photorealism on the generated images, NeRF is only applicable to static scenes, where the same spatial location can be queried from different images. In this paper we introduce D-NeRF, a method that extends neural radiance fields to a dynamic domain, allowing to reconstruct and render novel images of objects under rigid and non-rigid motions from a single camera moving around the scene. For this purpose we consider time as an additional input to the system, and split the learning process in two main stages: one that encodes the scene into a canonical space and another that maps this canonical representation into the deformed scene at a particular time. Both mappings are simultaneously learned using fully-connected networks. Once the networks are trained, D-NeRF can render novel images, controlling both the camera view and the time variable, and thus, the object movement. We demonstrate the effectiveness of our approach on scenes with objects under rigid, articulated and non-rigid motions. Code, model weights and the dynamic scenes dataset will be available at [1].", "fno": "450900k0313", "keywords": [ "Cameras", "Image Motion Analysis", "Image Reconstruction", "Learning Artificial Intelligence", "Rendering Computer Graphics", "Video Signal Processing", "Nonrigid Motions", "Deformed Scene", "D Ne RF", "Camera View", "Dynamic Scenes Dataset", "Neural Radiance Fields", "Neural Rendering Techniques", "Map 5 D Input Coordinates", "Spatial Location", "Viewing Direction", "View Dependent Emitted Radiance", "Static Scenes", "Photorealism", "Computer Vision", "Three Dimensional Displays", "Dynamics", "Machine Learning", "Cameras", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "Institut de Robòtica i Informàtica Industrial, CSIC-UPC", "fullName": "Albert Pumarola", "givenName": "Albert", "surname": "Pumarola", "__typename": "ArticleAuthorType" }, { "affiliation": "Institut de Robòtica i Informàtica Industrial, CSIC-UPC", "fullName": "Enric Corona", "givenName": "Enric", "surname": "Corona", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tubingen", "fullName": "Gerard Pons-Moll", "givenName": "Gerard", "surname": "Pons-Moll", "__typename": "ArticleAuthorType" }, { "affiliation": "Institut de Robòtica i Informàtica Industrial, CSIC-UPC", "fullName": "Francesc Moreno-Noguer", "givenName": "Francesc", "surname": "Moreno-Noguer", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "10313-10322", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "450900k0303", "articleId": "1yeLVVIuhdC", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900k0323", "articleId": "1yeHMBHkX0A", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f835", "title": "Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f835/1BmEBA2TORW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2939", "title": "Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene From Monocular Video", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2939/1BmF2WhMKB2", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872532", "title": "FoV-NeRF: Foveated Neural Radiance Fields for Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872532/1GhRVOIAS4g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8332", "title": "NeRF-Editing: Geometry Editing of Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8332/1H0Nn4Xgsne", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5170", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f481", "title": "Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f481/1H1jnh582jK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8377", "title": "HDR-NeRF: High Dynamic Range Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8377/1H1kSeZPinK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h206", "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h206/1yeLpJjmuwE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8Ounz", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "acronym": "haptics", "groupId": "1000312", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNwoPtoK", "doi": "10.1109/HAPTIC.2010.5444642", "title": "Design and evaluation of a vibrotactile seat to improve spatial awareness while driving", "normalizedTitle": "Design and evaluation of a vibrotactile seat to improve spatial awareness while driving", "abstract": "In this paper, we describe the design and evaluation of a vibrotactile driver's seat that is used to display spatial information during two driving tasks. Many studies have recently shown the effectiveness of haptic and vibrotactile feedback to augment collision warning systems in automobiles. Simultaneously, driver distraction and situational awareness have been identified as significant safety issues in all areas of transportation. We hypothesize that vibrotactile feedback may be used to enhance and improve spatial awareness while driving if it is used continuously and naturally so that it is part of the normal operation of the automobile. We designed a tactile feedback seat from low cost pager motors and characterized the spatial resolution of the seat. We then developed a driving simulation in which the location of vehicles behind and next to the driver's vehicle is communicated through vibrotactile feedback from the seat back. The effectiveness of the seat was evaluated in two driving tasks designated commuting and racing. In the commuting exercise, the test subjects (N=12) maintained a target speed while simultaneously avoiding other vehicles and performing a secondary task. A \"near-miss\" blind spot recording method was used to evaluate the effect of the feedback in reducing hazard exposure. In the racing exercise, the test subjects (N=10) raced other virtual competitors while using the feedback to maintain awareness of other vehicles in close proximity. Effectiveness was measured by comparing the accumulated time that cars were blocked behind the driver's car. Three feedback conditions were tested: only vibrotactile feedback, rear view mirror and vibrotactile feedback, rear view mirror only. Our preliminary results showed that vibrotactile feedback used in conjunction with the rear view mirror improved performance over using just the rear view mirror. We discuss some of the challenges of creating driving simulations and evaluation metrics that are both realistic and repeatable.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we describe the design and evaluation of a vibrotactile driver's seat that is used to display spatial information during two driving tasks. Many studies have recently shown the effectiveness of haptic and vibrotactile feedback to augment collision warning systems in automobiles. Simultaneously, driver distraction and situational awareness have been identified as significant safety issues in all areas of transportation. We hypothesize that vibrotactile feedback may be used to enhance and improve spatial awareness while driving if it is used continuously and naturally so that it is part of the normal operation of the automobile. We designed a tactile feedback seat from low cost pager motors and characterized the spatial resolution of the seat. We then developed a driving simulation in which the location of vehicles behind and next to the driver's vehicle is communicated through vibrotactile feedback from the seat back. The effectiveness of the seat was evaluated in two driving tasks designated commuting and racing. In the commuting exercise, the test subjects (N=12) maintained a target speed while simultaneously avoiding other vehicles and performing a secondary task. A \"near-miss\" blind spot recording method was used to evaluate the effect of the feedback in reducing hazard exposure. In the racing exercise, the test subjects (N=10) raced other virtual competitors while using the feedback to maintain awareness of other vehicles in close proximity. Effectiveness was measured by comparing the accumulated time that cars were blocked behind the driver's car. Three feedback conditions were tested: only vibrotactile feedback, rear view mirror and vibrotactile feedback, rear view mirror only. Our preliminary results showed that vibrotactile feedback used in conjunction with the rear view mirror improved performance over using just the rear view mirror. We discuss some of the challenges of creating driving simulations and evaluation metrics that are both realistic and repeatable.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we describe the design and evaluation of a vibrotactile driver's seat that is used to display spatial information during two driving tasks. Many studies have recently shown the effectiveness of haptic and vibrotactile feedback to augment collision warning systems in automobiles. Simultaneously, driver distraction and situational awareness have been identified as significant safety issues in all areas of transportation. We hypothesize that vibrotactile feedback may be used to enhance and improve spatial awareness while driving if it is used continuously and naturally so that it is part of the normal operation of the automobile. We designed a tactile feedback seat from low cost pager motors and characterized the spatial resolution of the seat. We then developed a driving simulation in which the location of vehicles behind and next to the driver's vehicle is communicated through vibrotactile feedback from the seat back. The effectiveness of the seat was evaluated in two driving tasks designated commuting and racing. In the commuting exercise, the test subjects (N=12) maintained a target speed while simultaneously avoiding other vehicles and performing a secondary task. A \"near-miss\" blind spot recording method was used to evaluate the effect of the feedback in reducing hazard exposure. In the racing exercise, the test subjects (N=10) raced other virtual competitors while using the feedback to maintain awareness of other vehicles in close proximity. Effectiveness was measured by comparing the accumulated time that cars were blocked behind the driver's car. Three feedback conditions were tested: only vibrotactile feedback, rear view mirror and vibrotactile feedback, rear view mirror only. Our preliminary results showed that vibrotactile feedback used in conjunction with the rear view mirror improved performance over using just the rear view mirror. We discuss some of the challenges of creating driving simulations and evaluation metrics that are both realistic and repeatable.", "fno": "05444642", "keywords": [ "Driver Information Systems", "Haptic Interfaces", "Road Safety", "Seats", "Spatial Awareness", "Vibrotactile Driver Seat", "Spatial Information", "Haptic Feedback", "Vibrotactile Feedback", "Collision Warning Systems", "Automobiles", "Driver Distraction", "Situational Awareness", "Transportation Safety", "Tactile Feedback Seat", "Spatial Resolution", "Driving Simulation", "Near Miss Blind Spot Recording Method", "Hazard Exposure", "Rear View Mirror", "Evaluation Metrics", "Feedback", "Mirrors", "Testing", "Automobiles", "Vehicle Driving", "Vehicles", "Displays", "Haptic Interfaces", "Road Accidents", "Alarm Systems", "Haptics", "Vibrotactile Feedback", "Driving Simulation", "Spatial Awareness", "Driver Safety" ], "authors": [ { "affiliation": "Yale University, Department of Mechanical Engineering, USA", "fullName": "John Morrell", "givenName": "John", "surname": "Morrell", "__typename": "ArticleAuthorType" }, { "affiliation": "Yale University, Department of Mechanical Engineering, USA", "fullName": "Kamil Wasilewski", "givenName": "Kamil", "surname": "Wasilewski", "__typename": "ArticleAuthorType" } ], "idPrefix": "haptics", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2010", "issn": "2324-7347", "isbn": "978-1-4244-6821-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444645", "articleId": "12OmNwGZNQB", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444643", "articleId": "12OmNwcUk1x", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icnc/2009/3736/1/3736a455", "title": "An Evolution Method of Driving Seat Comfort Based on Least Squares Support Vector Regression", "doi": null, "abstractUrl": "/proceedings-article/icnc/2009/3736a455/12OmNAS9zOD", "parentPublication": { "id": "proceedings/icnc/2009/3736/4", "title": "2009 Fifth International Conference on Natural Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2014/7100/0/07073274", "title": "A study on the design and effectiveness of tactile feedback in driving simulator", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2014/07073274/12OmNCyBXk4", "parentPublication": { "id": "proceedings/aiccsa/2014/7100/0", "title": "2014 IEEE/ACS 11th International Conference on Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fcst/2006/2721/0/27210109", "title": "\"Back-Seat Driver\": Spatial Sound for Vehicular Way-Finding and Situation Awareness", "doi": null, "abstractUrl": "/proceedings-article/fcst/2006/27210109/12OmNvo67G3", "parentPublication": { "id": "proceedings/fcst/2006/2721/0", "title": "2006 Japan-China Joint Workshop on Frontier of Computer Science and Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2008/3554/0/04775732", "title": "Road Type Recognition Using Neural Networks for Vehicle Seat Vibration Damping", "doi": null, "abstractUrl": "/proceedings-article/isspit/2008/04775732/12OmNweTvKT", "parentPublication": { "id": "proceedings/isspit/2008/3554/0", "title": "2008 8th IEEE International Symposium on Signal Processing and Information Technology. ISSPIT 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2007/0905/0/04161019", "title": "Collision Awareness Using Vibrotactile Arrays", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161019/12OmNwfsI5m", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acv/1992/2840/0/00240316", "title": "CARTRACK: computer vision-based car following", "doi": null, "abstractUrl": "/proceedings-article/acv/1992/00240316/12OmNwx3Q69", "parentPublication": { "id": "proceedings/acv/1992/2840/0", "title": "Proceedings IEEE Workshop on Applications of Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ivs/2005/8961/0/01505142", "title": "Cooperative driving and lane changing at blind crossings", "doi": null, "abstractUrl": "/proceedings-article/ivs/2005/01505142/12OmNx4yvC0", "parentPublication": { "id": "proceedings/ivs/2005/8961/0", "title": "2005 IEEE Intelligent Vehicles Symposium Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdma/2012/4772/0/4772a707", "title": "Research on the Effect of Rear Seat Structure on Child Occupant Safety", "doi": null, "abstractUrl": "/proceedings-article/icdma/2012/4772a707/12OmNy7yEiK", "parentPublication": { "id": "proceedings/icdma/2012/4772/0", "title": "2012 Third International Conference on Digital Manufacturing & Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2008/3357/2/3357c513", "title": "Test of Urban Road Traffic Facilities on the Driving Safety", "doi": null, "abstractUrl": "/proceedings-article/icicta/2008/3357c513/12OmNyKJiAR", "parentPublication": { "id": "icicta/2008/3357/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2022/1647/0/09767514", "title": "Demo: Distracted Driving Detection", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2022/09767514/1Df87KUPDqM", "parentPublication": { "id": "proceedings/percom-workshops/2022/1647/0", "title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqBtiPE", "title": "Ninth IEEE International Symposium on Object and Component-Oriented Real-Time Distributed Computing (ISORC'06)", "acronym": "isorc", "groupId": "1000514", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNyGbIjU", "doi": "10.1109/ISORC.2006.39", "title": "Incorporating Situation Awareness in Service Specifications", "normalizedTitle": "Incorporating Situation Awareness in Service Specifications", "abstract": "of enabling rapid composition of distributed applications from various services, and has become increasingly popular for many large-scale servicebased systems in various application areas, including scientific collaboration, e-business, health care, military, and homeland security. Situation awareness (SAW) is the capability of the entities in a servicebased system to be aware of the situation changes and automatically adapt themselves to such changes to satisfy user requirements, including security and privacy. The continuing evolutions of the entities and environment makes SAW one of the most desired features to support dynamic adaptive computing in service-based systems. In this paper, the relationship between contexts/situations and services in situationaware service-based systems is identified and an extension of OWL-S with situation ontology, called SAW-OWL-S, incorporates SAW in service specifications is presented. An approach to generating service specifications for situation-aware servicebased systems using SAW-OWL-S and the system diagram of situation-aware service-based systems using SAW-OWL-S are presented.", "abstracts": [ { "abstractType": "Regular", "content": "of enabling rapid composition of distributed applications from various services, and has become increasingly popular for many large-scale servicebased systems in various application areas, including scientific collaboration, e-business, health care, military, and homeland security. Situation awareness (SAW) is the capability of the entities in a servicebased system to be aware of the situation changes and automatically adapt themselves to such changes to satisfy user requirements, including security and privacy. The continuing evolutions of the entities and environment makes SAW one of the most desired features to support dynamic adaptive computing in service-based systems. In this paper, the relationship between contexts/situations and services in situationaware service-based systems is identified and an extension of OWL-S with situation ontology, called SAW-OWL-S, incorporates SAW in service specifications is presented. An approach to generating service specifications for situation-aware servicebased systems using SAW-OWL-S and the system diagram of situation-aware service-based systems using SAW-OWL-S are presented.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "of enabling rapid composition of distributed applications from various services, and has become increasingly popular for many large-scale servicebased systems in various application areas, including scientific collaboration, e-business, health care, military, and homeland security. Situation awareness (SAW) is the capability of the entities in a servicebased system to be aware of the situation changes and automatically adapt themselves to such changes to satisfy user requirements, including security and privacy. The continuing evolutions of the entities and environment makes SAW one of the most desired features to support dynamic adaptive computing in service-based systems. In this paper, the relationship between contexts/situations and services in situationaware service-based systems is identified and an extension of OWL-S with situation ontology, called SAW-OWL-S, incorporates SAW in service specifications is presented. An approach to generating service specifications for situation-aware servicebased systems using SAW-OWL-S and the system diagram of situation-aware service-based systems using SAW-OWL-S are presented.", "fno": "25610287", "keywords": [ "Service Oriented Architecture", "Situation Awareness", "Service Specification", "Web Ontology Language For Web Services", "Service Based Systems" ], "authors": [ { "affiliation": "Arizona State University, USA", "fullName": "Stephen S. Yau", "givenName": "Stephen S.", "surname": "Yau", "__typename": "ArticleAuthorType" }, { "affiliation": "Arizona State University, USA", "fullName": "Junwei Liu", "givenName": "Junwei", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "isorc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-04-01T00:00:00", "pubType": "proceedings", "pages": "287-294", "year": "2006", "issn": null, "isbn": "0-7695-2561-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "25610279", "articleId": "12OmNzmLxA3", "__typename": "AdjacentArticleType" }, "next": { "fno": "25610295", "articleId": "12OmNBOlljE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/compsac/2006/2655/1/26550503", "title": "Automated Agent Synthesis for Situation Awareness in Service-Based Systems", "doi": null, "abstractUrl": "/proceedings-article/compsac/2006/26550503/12OmNAZOK2F", "parentPublication": { "id": "compsac/2006/2655/1", "title": "30th Annual International Computer Software and Applications Conference (COMPSAC'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncca/2011/4550/0/4550a079", "title": "Adaptive Situation Awareness Using Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/ncca/2011/4550a079/12OmNrIrPjS", "parentPublication": { "id": "proceedings/ncca/2011/4550/0", "title": "Network Cloud Computing and Applications, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifcsta/2009/3930/2/3930b392", "title": "Service Implementation Based on Semantic Web", "doi": null, "abstractUrl": "/proceedings-article/ifcsta/2009/3930b392/12OmNvzJG0W", "parentPublication": { "id": "proceedings/ifcsta/2009/3930/2", "title": "Computer Science-Technology and Applications, International Forum on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cinc/2009/3645/2/3645b221", "title": "Construction and Test of Web Service Solution for E-government", "doi": null, "abstractUrl": "/proceedings-article/cinc/2009/3645b221/12OmNwCJOOk", "parentPublication": { "id": "cinc/2009/3645/2", "title": "Computational Intelligence and Natural Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2005/2413/1/241310107", "title": "Situation-Awareness for Adaptive Coordination in Service-Based Systems", "doi": null, "abstractUrl": "/proceedings-article/compsac/2005/241310107/12OmNy50g7k", "parentPublication": { "id": "proceedings/compsac/2005/2413/1", "title": "29th Annual International Computer Software and Applications Conference (COMPSAC'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seus-wccia/2006/2560/0/25600005", "title": "Hierarchical Situation Modeling and Reasoning for Pervasive Computing", "doi": null, "abstractUrl": "/proceedings-article/seus-wccia/2006/25600005/12OmNylboKO", "parentPublication": { "id": "proceedings/seus-wccia/2006/2560/0", "title": "Software Technologies for Future Embedded and Ubiquitous Systems, and International Workshop on Collaborative Computing, Integration, and Assurance, The IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wism/2009/3817/0/3817a342", "title": "An Approach for Visualization and Formalization of Web Service Composition", "doi": null, "abstractUrl": "/proceedings-article/wism/2009/3817a342/12OmNz2TCJV", "parentPublication": { "id": "proceedings/wism/2009/3817/0", "title": "Web Information Systems and Mining, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmecg/2012/4853/0/4853a091", "title": "A Social Label Automatic Tagging Based Method on Situation Semantics in Web Service Discovery", "doi": null, "abstractUrl": "/proceedings-article/icmecg/2012/4853a091/12OmNz61dCc", "parentPublication": { "id": "proceedings/icmecg/2012/4853/0", "title": "Management of e-Commerce and e-Government, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/achi/2008/3086/0/3086a230", "title": "UPOS: User Profile Ontology with Situation-Dependent Preferences Support", "doi": null, "abstractUrl": "/proceedings-article/achi/2008/3086a230/12OmNzuZUBl", "parentPublication": { "id": "proceedings/achi/2008/3086/0", "title": "International Conference on Advances in Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ex/2012/03/mex2012030091", "title": "Situation Awareness and Cognitive Modeling", "doi": null, "abstractUrl": "/magazine/ex/2012/03/mex2012030091/13rRUB7a0WW", "parentPublication": { "id": "mags/ex", "title": "IEEE Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mWh", "title": "2013 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNyS6RCp", "doi": "10.1109/VR.2013.6549364", "title": "Awareness of other: Evaluating the impact of proximity cues in collaborative tasks", "normalizedTitle": "Awareness of other: Evaluating the impact of proximity cues in collaborative tasks", "abstract": "Collaboration in 3D environments has the main goal of attaining a high degree of teamwork by exposing the team to a suitable level of immersion. However, users within virtual environments have limitations to perceive the same stimuli of the real world. Particularly, the feeling of presence and nearness of the other is difficult to be generated in VR systems. This poster presents a work in progress that shows our current results on evaluating the impact of three different proximity cues for generating awareness of the presence of other. The design of each cue aims at independently stimulating the senses of sight, hearing and touch. The experiment design includes the comparison with a baseline condition in virtual environments where no specific stimulation is applied.", "abstracts": [ { "abstractType": "Regular", "content": "Collaboration in 3D environments has the main goal of attaining a high degree of teamwork by exposing the team to a suitable level of immersion. However, users within virtual environments have limitations to perceive the same stimuli of the real world. Particularly, the feeling of presence and nearness of the other is difficult to be generated in VR systems. This poster presents a work in progress that shows our current results on evaluating the impact of three different proximity cues for generating awareness of the presence of other. The design of each cue aims at independently stimulating the senses of sight, hearing and touch. The experiment design includes the comparison with a baseline condition in virtual environments where no specific stimulation is applied.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Collaboration in 3D environments has the main goal of attaining a high degree of teamwork by exposing the team to a suitable level of immersion. However, users within virtual environments have limitations to perceive the same stimuli of the real world. Particularly, the feeling of presence and nearness of the other is difficult to be generated in VR systems. This poster presents a work in progress that shows our current results on evaluating the impact of three different proximity cues for generating awareness of the presence of other. The design of each cue aims at independently stimulating the senses of sight, hearing and touch. The experiment design includes the comparison with a baseline condition in virtual environments where no specific stimulation is applied.", "fno": "06549364", "keywords": [ "Teamwork", "Belts", "Synchronous Motors", "Electronic Mail", "Vibrations", "Virtual Environments", "H 1 2 User Machine Systems Human Factors" ], "authors": [ { "affiliation": null, "fullName": "Wilson J. Sarmiento", "givenName": "Wilson J.", "surname": "Sarmiento", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Vitor Jorge", "givenName": "Vitor", "surname": "Jorge", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Anderson Maciel", "givenName": "Anderson", "surname": "Maciel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luciana Nedel", "givenName": "Luciana", "surname": "Nedel", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Cesar A. Collazos", "givenName": "Cesar A.", "surname": "Collazos", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jackson Oliveira", "givenName": "Jackson", "surname": "Oliveira", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Frederico Faria", "givenName": "Frederico", "surname": "Faria", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "63-64", "year": "2013", "issn": "1087-8270", "isbn": "978-1-4673-4795-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06549363", "articleId": "12OmNAq3hG7", "__typename": "AdjacentArticleType" }, "next": { "fno": "06549365", "articleId": "12OmNzvQHOK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2007/1179/0/04270170", "title": "3D Occlusion Inference from Silhouette Cues", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270170/12OmNBCqbE1", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fgcn/2008/3431/2/3431b141", "title": "Revised Gaze Proximity as Context Information", "doi": null, "abstractUrl": "/proceedings-article/fgcn/2008/3431b141/12OmNBhZ4g2", "parentPublication": { "id": "proceedings/fgcn/2008/3431/1", "title": "Future Generation Communication and Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2016/2535/0/2535a943", "title": "Design of Non-smooth Synchronous Control Method for Stage Lifting Machinery", "doi": null, "abstractUrl": "/proceedings-article/icisce/2016/2535a943/12OmNBpmDEe", "parentPublication": { "id": "proceedings/icisce/2016/2535/0", "title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2004/2181/0/01357762", "title": "Modelling for learning tasks - approaches, tools and implications", "doi": null, "abstractUrl": "/proceedings-article/icalt/2004/01357762/12OmNC1Y5rV", "parentPublication": { "id": "proceedings/icalt/2004/2181/0", "title": "Proceedings. IEEE International Conference on Advanced Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504682", "title": "The role of interaction in virtual embodiment: Effects of the virtual hand representation", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504682/12OmNwE9Our", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptic/2006/0226/0/01627121", "title": "A Study of Mounting Methods for Tactors Using an Elastic Polymer", "doi": null, "abstractUrl": "/proceedings-article/haptic/2006/01627121/12OmNwcCII4", "parentPublication": { "id": "proceedings/haptic/2006/0226/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ecmsm/2013/6298/0/06648964", "title": "A real-time observer for UAV's brushless motors", "doi": null, "abstractUrl": "/proceedings-article/ecmsm/2013/06648964/12OmNx2QUFO", "parentPublication": { "id": "proceedings/ecmsm/2013/6298/0", "title": "2013 IEEE 11th International Workshop of Electronics, Control, Measurement, Signals and their application to Mechatronics (ECMSM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2009/3634/3/3634c325", "title": "Analysis and Research of Vibration Feeder Based on Finite Element Method", "doi": null, "abstractUrl": "/proceedings-article/icic/2009/3634c325/12OmNx3ZjdR", "parentPublication": { "id": "proceedings/icic/2009/3634/2", "title": "2009 Second International Conference on Information and Computing Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2006/2687/0/26870129", "title": "Narrowcasting Attributes for Presence Awareness in Collaborative Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/cit/2006/26870129/12OmNxVlTKl", "parentPublication": { "id": "proceedings/cit/2006/2687/0", "title": "The Sixth IEEE International Conference on Computer and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08799015", "title": "Conveying spatial awareness cues in xR collaborations", "doi": null, "abstractUrl": "/journal/tg/2019/11/08799015/1cumXlkNGuY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1eSlysx", "doi": "10.1109/VR.2018.8446420", "title": "Gaze Direction in a Virtual Environment Via a Dynamic Full-Image Color Effect", "normalizedTitle": "Gaze Direction in a Virtual Environment Via a Dynamic Full-Image Color Effect", "abstract": "For developers of immersive 360-degree virtual environments, directing the viewer's gaze towards Points of Interest (POIs) is a challenge. Limited research exists testing the effectiveness of various gaze direction techniques. However, there is a lack of empirical research evaluating real-time color effects designed to direct the viewer's gaze. We developed a novel VR gaze-directing stimulus using a dynamic real-time color effect and tested its effectiveness in a user study. The stimulus was influenced by color psychology research and chosen by an informal pilot study. Results suggest that the stimulus encouraged participants to direct their gaze back towards POIs. In the majority of subjects who encountered the stimulus, their gaze was successfully directed back to POIs within a few seconds. While the task of holding viewer gaze in VR remains a challenge, this experiment has uncovered new information about the potential of color effect-based VR gaze direction.", "abstracts": [ { "abstractType": "Regular", "content": "For developers of immersive 360-degree virtual environments, directing the viewer's gaze towards Points of Interest (POIs) is a challenge. Limited research exists testing the effectiveness of various gaze direction techniques. However, there is a lack of empirical research evaluating real-time color effects designed to direct the viewer's gaze. We developed a novel VR gaze-directing stimulus using a dynamic real-time color effect and tested its effectiveness in a user study. The stimulus was influenced by color psychology research and chosen by an informal pilot study. Results suggest that the stimulus encouraged participants to direct their gaze back towards POIs. In the majority of subjects who encountered the stimulus, their gaze was successfully directed back to POIs within a few seconds. While the task of holding viewer gaze in VR remains a challenge, this experiment has uncovered new information about the potential of color effect-based VR gaze direction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "For developers of immersive 360-degree virtual environments, directing the viewer's gaze towards Points of Interest (POIs) is a challenge. Limited research exists testing the effectiveness of various gaze direction techniques. However, there is a lack of empirical research evaluating real-time color effects designed to direct the viewer's gaze. We developed a novel VR gaze-directing stimulus using a dynamic real-time color effect and tested its effectiveness in a user study. The stimulus was influenced by color psychology research and chosen by an informal pilot study. Results suggest that the stimulus encouraged participants to direct their gaze back towards POIs. In the majority of subjects who encountered the stimulus, their gaze was successfully directed back to POIs within a few seconds. While the task of holding viewer gaze in VR remains a challenge, this experiment has uncovered new information about the potential of color effect-based VR gaze direction.", "fno": "08446420", "keywords": [ "Gaze Tracking", "Image Colour Analysis", "Virtual Reality", "Dynamic Full Image Color Effect", "360 Degree Virtual Environments", "PO Is", "Gaze Direction Techniques", "Color Psychology Research", "Virtual Environment", "VR Gaze Directing Stimulus", "Points Of Interest", "Color Effect Based VR Gaze Direction", "Virtual Reality", "Image Color Analysis", "Psychology", "Automobiles", "Color", "Virtual Environments", "Real Time Systems", "HMD", "Gaze Direction", "Color Psychology", "H 5 1 HCI Multimedia Information Systems Artificial", "Augmented", "Virtual Realities", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality" ], "authors": [ { "affiliation": "Texas A&M University", "fullName": "Mason Smith", "givenName": "Mason", "surname": "Smith", "__typename": "ArticleAuthorType" }, { "affiliation": "Texas A&M University", "fullName": "Ann McNamara", "givenName": "Ann", "surname": "McNamara", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "1-2", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446486", "articleId": "13bd1gCd7Ts", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446471", "articleId": "13bd1AITn9Y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/etvis/2016/4731/0/07851165", "title": "Visualizing eye tracking data with gaze-guided slit-scans", "doi": null, "abstractUrl": "/proceedings-article/etvis/2016/07851165/12OmNB8kHPk", "parentPublication": { "id": "proceedings/etvis/2016/4731/0", "title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349517", "title": "Automated classification of gaze direction using spectral regression and support vector machine", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349517/12OmNBezSEG", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/1/01394213", "title": "On the importance of skin color for \"other-race\" effect", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394213/12OmNBpVPZy", "parentPublication": { "id": "proceedings/icme/2004/8603/1", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/delta/2008/3110/0/3110a470", "title": "Model-Based Gaze Direction Estimation in Office Environment", "doi": null, "abstractUrl": "/proceedings-article/delta/2008/3110a470/12OmNqGRGgh", "parentPublication": { "id": "proceedings/delta/2008/3110/0", "title": "Electronic Design, Test and Applications, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2013/3211/0/3211a006", "title": "An Estimator for Rating Video Contents on the Basis of a Viewer's Behavior in Typical Home Environments", "doi": null, "abstractUrl": "/proceedings-article/sitis/2013/3211a006/12OmNqJq4q8", "parentPublication": { "id": "proceedings/sitis/2013/3211/0", "title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446180", "title": "Effect of Virtual Human Gaze Behaviour During an Orthogonal Collision Avoidance Walking Task", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446180/13bd1sv5NyE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a025", "title": "Color Preference Differences between Head Mounted Displays and PC Screens", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a025/17D45XoXP5M", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a409", "title": "A Large-Scale Study of Proxemics and Gaze in Groups", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a409/1MNgA7qw20U", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a372", "title": "Visualization-Guided Attention Direction in Dynamic Control Tasks", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a372/1gysnIklSSY", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a011", "title": "Subject Guided Eye Image Synthesis with Application to Gaze Redirection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a011/1uqGyw32uVq", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1Eo5K94bDUc", "title": "2022 IEEE/ACM 15th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE)", "acronym": "chase", "groupId": "1002764", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1Eo5N6j6TWo", "doi": "10.1145/3528579.3529173", "title": "Problem reports and team maturity in agile automotive software development", "normalizedTitle": "Problem reports and team maturity in agile automotive software development", "abstract": "Background: Volvo Cars is pioneering an agile transformation on a large scale in the automotive industry. Social psychological aspects of automotive software development are an under-researched area in general. Few studies on team maturity or group dynamics can be found specifically in the automotive software engineering domain. Objective: This study is intended as an initial step to fill that gap by investigating the connection between issues and problem reports and team maturity. Method: We conducted a quantitative study with 84 participants from 14 teams and qualitatively validated the result with the Release Train Engineer having an overview of all the participating teams. Results: We find that the more mature a team is, the faster they seem to resolve issues as provided through external feedback, at least in the two initial team maturity stages. Conclusion: This study suggests that working on team dynamics might increase productivity in modern automotive software development departments, but this needs further investigation.", "abstracts": [ { "abstractType": "Regular", "content": "Background: Volvo Cars is pioneering an agile transformation on a large scale in the automotive industry. Social psychological aspects of automotive software development are an under-researched area in general. Few studies on team maturity or group dynamics can be found specifically in the automotive software engineering domain. Objective: This study is intended as an initial step to fill that gap by investigating the connection between issues and problem reports and team maturity. Method: We conducted a quantitative study with 84 participants from 14 teams and qualitatively validated the result with the Release Train Engineer having an overview of all the participating teams. Results: We find that the more mature a team is, the faster they seem to resolve issues as provided through external feedback, at least in the two initial team maturity stages. Conclusion: This study suggests that working on team dynamics might increase productivity in modern automotive software development departments, but this needs further investigation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Background: Volvo Cars is pioneering an agile transformation on a large scale in the automotive industry. Social psychological aspects of automotive software development are an under-researched area in general. Few studies on team maturity or group dynamics can be found specifically in the automotive software engineering domain. Objective: This study is intended as an initial step to fill that gap by investigating the connection between issues and problem reports and team maturity. Method: We conducted a quantitative study with 84 participants from 14 teams and qualitatively validated the result with the Release Train Engineer having an overview of all the participating teams. Results: We find that the more mature a team is, the faster they seem to resolve issues as provided through external feedback, at least in the two initial team maturity stages. Conclusion: This study suggests that working on team dynamics might increase productivity in modern automotive software development departments, but this needs further investigation.", "fno": "934200a041", "keywords": [ "Automobile Industry", "Automotive Engineering", "Industrial Psychology", "Productivity", "Software Development Management", "Software Prototyping", "Team Working", "Problem Reports", "Agile Automotive Software Development", "Agile Transformation", "Automotive Industry", "Social Psychological Aspect", "Automotive Software Engineering", "Team Maturity", "Team Dynamics", "Volvo Cars", "Group Dynamics", "Release Train Engineer", "Productivity", "Industries", "Conferences", "Psychology", "Software", "Automobiles", "Vehicle Dynamics", "Automotive Software Development", "Teams", "Team Maturity", "Problem Reports" ], "authors": [ { "affiliation": "Volvo Cars and Chalmers | University of Gothenburg,Gothenburg,Sweden", "fullName": "Lucas Gren", "givenName": "Lucas", "surname": "Gren", "__typename": "ArticleAuthorType" }, { "affiliation": "Chalmers | University of Gothenburg,Gothenburg,Sweden", "fullName": "Martin Shepperd", "givenName": "Martin", "surname": "Shepperd", "__typename": "ArticleAuthorType" } ], "idPrefix": "chase", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2022-05-01T00:00:00", "pubType": "proceedings", "pages": "41-45", "year": "2022", "issn": null, "isbn": "978-1-4503-9342-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "934200a036", "articleId": "1Eo5MQgusak", "__typename": "AdjacentArticleType" }, "next": { "fno": "934200a046", "articleId": "1Eo5Ks3BLrO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/agile/2011/426/0/06005486", "title": "Agile Team Perceptions of Productivity Factors", "doi": null, "abstractUrl": "/proceedings-article/agile/2011/06005486/12OmNAlvHyZ", "parentPublication": { "id": "proceedings/agile/2011/426/0", "title": "2011 Agile Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2017/4039/0/4039a070", "title": "Team Maturity in Software Engineering Teams: A Work in Progress", "doi": null, "abstractUrl": "/proceedings-article/chase/2017/4039a070/12OmNAsTgO9", "parentPublication": { "id": "proceedings/chase/2017/4039/0", "title": "2017 IEEE/ACM 10th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esem/2017/4039/0/4039a235", "title": "Team Maturity in Software Engineering Teams", "doi": null, "abstractUrl": "/proceedings-article/esem/2017/4039a235/12OmNBSBk9F", "parentPublication": { "id": "proceedings/esem/2017/4039/0", "title": "2017 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/semotion/2016/4169/0/4169a006", "title": "Outcomes of Emotional Content from Agile Team Forum Posts", "doi": null, "abstractUrl": "/proceedings-article/semotion/2016/4169a006/12OmNCfAPG7", "parentPublication": { "id": "proceedings/semotion/2016/4169/0", "title": "2016 IEEE/ACM 1st International Workshop on Emotional Awareness in Software Engineering (SEmotion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2018/5555/0/555501a206", "title": "Dynamic Reconfiguration for Real-Time Automotive Embedded Systems in Fail-Operational Context", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2018/555501a206/12OmNwMobb5", "parentPublication": { "id": "proceedings/ipdpsw/2018/5555/0", "title": "2018 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/1999/5643/1/00839292", "title": "Assessing the process maturity utilized in software engineering team project courses", "doi": null, "abstractUrl": "/proceedings-article/fie/1999/00839292/12OmNx76TJ1", "parentPublication": { "id": "proceedings/fie/1999/5643/1", "title": "FIE'99 Frontiers in Education. 29th Annual Frontiers in Education Conference. Designing the Future of Science and Engineering Education. Conference Proceedings (IEEE Cat. No.99CH37011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wicsa/2015/1922/0/1922a115", "title": "Architecting in the Automotive Domain: Descriptive vs Prescriptive Architecture", "doi": null, "abstractUrl": "/proceedings-article/wicsa/2015/1922a115/12OmNxxNbUW", "parentPublication": { "id": "proceedings/wicsa/2015/1922/0", "title": "2015 12th Working IEEE/IFIP Conference on Software Architecture (WICSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgse/2006/2663/0/26630062", "title": "Delegation in Virtual Team: the Moderating Effects of Team Maturity and Team Distance", "doi": null, "abstractUrl": "/proceedings-article/icgse/2006/26630062/12OmNzAohVh", "parentPublication": { "id": "proceedings/icgse/2006/2663/0", "title": "2006 IEEE International Conference on Global Software Engineering (ICGSE'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsme/2018/7870/0/787000a732", "title": "Team Maturity in Agile Software Development: The Impact on Productivity", "doi": null, "abstractUrl": "/proceedings-article/icsme/2018/787000a732/17D45Wuc33H", "parentPublication": { "id": "proceedings/icsme/2018/7870/0", "title": "2018 IEEE International Conference on Software Maintenance and Evolution (ICSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2020/03/08911247", "title": "Group-Development Psychology Training: The Perceived Effects on Agile Software-Development Teams", "doi": null, "abstractUrl": "/magazine/so/2020/03/08911247/1j30Y6oKvSw", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1aDSuDp9DuU", "title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "acronym": "percom-workshops", "groupId": "1000552", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1aDSOBPC9kA", "doi": "10.1109/PERCOMW.2019.8730766", "title": "Towards Adaptive Car-to-Cloud Communication", "normalizedTitle": "Towards Adaptive Car-to-Cloud Communication", "abstract": "Recent developments in communication technology have led to cloud resources becoming ubiquitous. These resources enable many new applications by offering computational power for remote embedded devices. In combination with advances in the area of smart driving, this seems to be especially beneficial for applications such as remote maintenance of vehicles or integration with smart city services. As autonomous driving continues to gain traction, Car-to-Cloud communication can support transferring collected data to the cloud, e.g., for dynamic learning of new map information. Additionally, passengers can benefit from novel entertainment services. All these developments require a stable connection between a mobile vehicle and the cloud resources. In this vision paper, we survey Car-to-Cloud communication applications. Based on the analysis of the varying requirements for these applications, we formulate research questions and challenges. Further, we discuss how these challenges can be addressed by means of an adaptive Car-to-Cloud communication middleware. We conclude with an overview on our activities in this area and an outlook on our planned future work on adaptive communication.", "abstracts": [ { "abstractType": "Regular", "content": "Recent developments in communication technology have led to cloud resources becoming ubiquitous. These resources enable many new applications by offering computational power for remote embedded devices. In combination with advances in the area of smart driving, this seems to be especially beneficial for applications such as remote maintenance of vehicles or integration with smart city services. As autonomous driving continues to gain traction, Car-to-Cloud communication can support transferring collected data to the cloud, e.g., for dynamic learning of new map information. Additionally, passengers can benefit from novel entertainment services. All these developments require a stable connection between a mobile vehicle and the cloud resources. In this vision paper, we survey Car-to-Cloud communication applications. Based on the analysis of the varying requirements for these applications, we formulate research questions and challenges. Further, we discuss how these challenges can be addressed by means of an adaptive Car-to-Cloud communication middleware. We conclude with an overview on our activities in this area and an outlook on our planned future work on adaptive communication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent developments in communication technology have led to cloud resources becoming ubiquitous. These resources enable many new applications by offering computational power for remote embedded devices. In combination with advances in the area of smart driving, this seems to be especially beneficial for applications such as remote maintenance of vehicles or integration with smart city services. As autonomous driving continues to gain traction, Car-to-Cloud communication can support transferring collected data to the cloud, e.g., for dynamic learning of new map information. Additionally, passengers can benefit from novel entertainment services. All these developments require a stable connection between a mobile vehicle and the cloud resources. In this vision paper, we survey Car-to-Cloud communication applications. Based on the analysis of the varying requirements for these applications, we formulate research questions and challenges. Further, we discuss how these challenges can be addressed by means of an adaptive Car-to-Cloud communication middleware. We conclude with an overview on our activities in this area and an outlook on our planned future work on adaptive communication.", "fno": "08730766", "keywords": [ "Cloud Computing", "Driver Information Systems", "Embedded Systems", "Middleware", "Mobile Computing", "Power Aware Computing", "Vehicular Ad Hoc Networks", "Cloud Resources", "Communication Technology", "Computational Power", "Remote Embedded Devices", "Smart Driving", "Autonomous Driving", "Entertainment Services", "Mobile Vehicle", "Adaptive Car To Cloud Communication Middleware", "Real Time Systems", "Automobiles", "Cloud Computing", "Smart Cities", "Vehicle Dynamics", "Routing", "Vehicular Communication", "Cloud Computing", "Smart City", "Intelligent Transportation Systems", "Adaptive Communication Systems" ], "authors": [ { "affiliation": "Universität Würzburg, Chair of Software Engineering, Würzburg, Germany", "fullName": "Stefan Herrnleben", "givenName": "Stefan", "surname": "Herrnleben", "__typename": "ArticleAuthorType" }, { "affiliation": "Universität Mannheim, Chair of Information Systems II, Mannheim, Germany", "fullName": "Martin Pfannemüller", "givenName": "Martin", "surname": "Pfannemüller", "__typename": "ArticleAuthorType" }, { "affiliation": "Universität Würzburg, Chair of Software Engineering, Würzburg, Germany", "fullName": "Christian Krupitzer", "givenName": "Christian", "surname": "Krupitzer", "__typename": "ArticleAuthorType" }, { "affiliation": "Universität Würzburg, Chair of Software Engineering, Würzburg, Germany", "fullName": "Samuel Kounev", "givenName": "Samuel", "surname": "Kounev", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Trento, Advanced Networking Systems, Trento, Italy", "fullName": "Michele Segata", "givenName": "Michele", "surname": "Segata", "__typename": "ArticleAuthorType" }, { "affiliation": "Intedis GmbH & Co. KG, Connected Car Innovations, Würzburg, Germany", "fullName": "Felix Fastnacht", "givenName": "Felix", "surname": "Fastnacht", "__typename": "ArticleAuthorType" }, { "affiliation": "Intedis GmbH & Co. KG, Connected Car Innovations, Würzburg, Germany", "fullName": "Magnus Nigmann", "givenName": "Magnus", "surname": "Nigmann", "__typename": "ArticleAuthorType" } ], "idPrefix": "percom-workshops", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "119-124", "year": "2019", "issn": null, "isbn": "978-1-5386-9151-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08730883", "articleId": "1aDSzdPNap2", "__typename": "AdjacentArticleType" }, "next": { "fno": "08730764", "articleId": "1aDSK7HQgxi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/paciia/2008/3490/1/3490a885", "title": "The Design of Autonomous Smart Car Used in Simulation of Vehicle Platoon", "doi": null, "abstractUrl": "/proceedings-article/paciia/2008/3490a885/12OmNB1NVPx", "parentPublication": { "id": "proceedings/paciia/2008/3490/1", "title": "Pacific-Asia Workshop on Computational Intelligence and Industrial Application, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbs/2018/4201/0/420101a005", "title": "Application of Improved Genetic Algorithm in Vehicle Networked Cloud Data Platform", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2018/420101a005/12OmNylKB5a", "parentPublication": { "id": "proceedings/icitbs/2018/4201/0", "title": "2018 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waina/2016/2461/0/2461a975", "title": "Development of Car Cloud Sensor Network Application Platform", "doi": null, "abstractUrl": "/proceedings-article/waina/2016/2461a975/12OmNzlUKCx", "parentPublication": { "id": "proceedings/waina/2016/2461/0", "title": "2016 30th International Conference on Advanced Information Networking and Applications Workshops (WAINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wowmom/2018/4725/0/08449768", "title": "IoT and Microservices Based Testbed for Connected Car Services", "doi": null, "abstractUrl": "/proceedings-article/wowmom/2018/08449768/13bd1tl2om2", "parentPublication": { "id": "proceedings/wowmom/2018/4725/0", "title": "2018 IEEE 19th International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nca/2021/9550/0/09685942", "title": "Accountable and privacy-aware flexible car sharing and rental services", "doi": null, "abstractUrl": "/proceedings-article/nca/2021/09685942/1AC8QYRODcY", "parentPublication": { "id": "proceedings/nca/2021/9550/0", "title": "2021 IEEE 20th International Symposium on Network Computing and Applications (NCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/accc/2021/0743/0/074300a115", "title": "Second-Hand Car Trading Framework Based on Blockchain in Cloud Service Environment", "doi": null, "abstractUrl": "/proceedings-article/accc/2021/074300a115/1AqxV2nhxBu", "parentPublication": { "id": "proceedings/accc/2021/0743/0", "title": "2021 2nd Asia Conference on Computers and Communications (ACCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ica/2021/0716/0/071600a019", "title": "Usage Coordination Utilizing Flexible Contracts in Free-floating Car Sharing", "doi": null, "abstractUrl": "/proceedings-article/ica/2021/071600a019/1BtfPt4pRS0", "parentPublication": { "id": "proceedings/ica/2021/0716/0", "title": "2021 IEEE International Conference on Agents (ICA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2019/1764/0/176400a031", "title": "AC3R: Automatically Reconstructing Car Crashes from Police Reports", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2019/176400a031/1cJ7n8xSxMY", "parentPublication": { "id": "proceedings/icse-companion/2019/1764/0", "title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2019/1764/0/176400a290", "title": "Automatically Reconstructing Car Crashes from Police Reports for Testing Self-Driving Cars", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2019/176400a290/1cJ7nMlYJLW", "parentPublication": { "id": "proceedings/icse-companion/2019/1764/0", "title": "2019 IEEE/ACM 41st International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ica/2019/4026/0/08929128", "title": "Dynamic pricing method to maximize utilization of one-way car sharing service", "doi": null, "abstractUrl": "/proceedings-article/ica/2019/08929128/1fJRQxAZMas", "parentPublication": { "id": "proceedings/ica/2019/4026/0", "title": "2019 IEEE International Conference on Agents (ICA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0In7PNdu", "doi": "10.1109/VR.2019.8798084", "title": "Passenger Anxiety when Seated in a Virtual Reality Self-Driving Car", "normalizedTitle": "Passenger Anxiety when Seated in a Virtual Reality Self-Driving Car", "abstract": "A virtual reality study was conducted to understand participants' anxiety when immersed in a virtual reality trip with a self-driving car. Participants were placed as passengers in a virtual car, and they were seated in the co-driver seat. Five different conditions were developed and examined. For this experiment, the Anxiety Modality Questionnaire that captures the cognitive anxiety of participants was used. The obtained results indicated that the participants' level of anxiety for the partial awareness of the driver condition is influenced less than expected. Specifically, lower levels of anxiety were found when the driver is either fully or partially aware of the traffic and the behavior of the car, and higher anxiety levels were found when the driver is completely unaware.", "abstracts": [ { "abstractType": "Regular", "content": "A virtual reality study was conducted to understand participants' anxiety when immersed in a virtual reality trip with a self-driving car. Participants were placed as passengers in a virtual car, and they were seated in the co-driver seat. Five different conditions were developed and examined. For this experiment, the Anxiety Modality Questionnaire that captures the cognitive anxiety of participants was used. The obtained results indicated that the participants' level of anxiety for the partial awareness of the driver condition is influenced less than expected. Specifically, lower levels of anxiety were found when the driver is either fully or partially aware of the traffic and the behavior of the car, and higher anxiety levels were found when the driver is completely unaware.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A virtual reality study was conducted to understand participants' anxiety when immersed in a virtual reality trip with a self-driving car. Participants were placed as passengers in a virtual car, and they were seated in the co-driver seat. Five different conditions were developed and examined. For this experiment, the Anxiety Modality Questionnaire that captures the cognitive anxiety of participants was used. The obtained results indicated that the participants' level of anxiety for the partial awareness of the driver condition is influenced less than expected. Specifically, lower levels of anxiety were found when the driver is either fully or partially aware of the traffic and the behavior of the car, and higher anxiety levels were found when the driver is completely unaware.", "fno": "08798084", "keywords": [ "Automobiles", "Behavioural Sciences Computing", "Cognition", "Human Factors", "Virtual Reality", "Passenger Anxiety", "Virtual Reality Self Driving Car", "Virtual Reality Trip", "Co Driver Seat", "Anxiety Modality Questionnaire", "Cognitive Anxiety", "Driver Condition", "Anxiety Levels", "Partial Awareness", "Autonomous Automobiles", "Automobiles", "Roads", "Urban Areas", "Virtual Environments", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality" ], "authors": [ { "affiliation": "University of the Aegean", "fullName": "Alexandros Koilias", "givenName": "Alexandros", "surname": "Koilias", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University", "fullName": "Christos Mousas", "givenName": "Christos", "surname": "Mousas", "__typename": "ArticleAuthorType" }, { "affiliation": "Southern Illinois University", "fullName": "Banafsheh Rekabdar", "givenName": "Banafsheh", "surname": "Rekabdar", "__typename": "ArticleAuthorType" }, { "affiliation": "University of the Aegean", "fullName": "Christos-Nikolaos Anagnostopoulos", "givenName": "Christos-Nikolaos", "surname": "Anagnostopoulos", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1024-1025", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797897", "articleId": "1cJ1cwrcT5u", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798033", "articleId": "1cJ0LPy4Yb6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/waina/2017/6231/0/6231a082", "title": "Study of the Patterns of Automatic Car Washing in the Era of Internet of Things", "doi": null, "abstractUrl": "/proceedings-article/waina/2017/6231a082/12OmNs0TL0U", "parentPublication": { "id": "proceedings/waina/2017/6231/0", "title": "2017 31st International Conference on Advanced Information Networking and Applications: Workshops (WAINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892298", "title": "A diminished reality simulation for driver-car interaction with transparent cockpits", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892298/12OmNwIHozu", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waina/2018/5395/0/539501a597", "title": "An Intelligent Car Park Management System : Hierarchical Placement Algorithm Based on Nearest Location", "doi": null, "abstractUrl": "/proceedings-article/waina/2018/539501a597/12OmNxw5Bd8", "parentPublication": { "id": "proceedings/waina/2018/5395/0", "title": "2018 32nd International Conference on Advanced Information Networking and Applications Workshops (WAINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icime/2018/7616/0/761600a215", "title": "GIS Application for Stimulating Car-Sharing Activity", "doi": null, "abstractUrl": "/proceedings-article/icime/2018/761600a215/17D45WrVg9w", "parentPublication": { "id": "proceedings/icime/2018/7616/0", "title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2018/9288/0/928800a604", "title": "Anomaly Detection in Car-Booking Graphs", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2018/928800a604/18jXCZiRL1K", "parentPublication": { "id": "proceedings/icdmw/2018/9288/0", "title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/citc/2021/2192/0/219200a006", "title": "Extent of Internet Use and Anxiety in Adolescence", "doi": null, "abstractUrl": "/proceedings-article/citc/2021/219200a006/1AH81MVz6lq", "parentPublication": { "id": "proceedings/citc/2021/2192/0", "title": "2021 Second International Conference on Innovative Technology Convergence (CITC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ishc/2021/6743/0/674300a036", "title": "A Novel Car-following Model by Considering Driver&#x0027;s Behaviors with Fuzzy Inference Method", "doi": null, "abstractUrl": "/proceedings-article/ishc/2021/674300a036/1EBWcmULHt6", "parentPublication": { "id": "proceedings/ishc/2021/6743/0", "title": "2021 3rd International Symposium on Smart and Healthy Cities (ISHC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icphds/2020/8571/0/857100a149", "title": "Investigation on Hypertension and its co-morbidity in China and Research on the Correlation between Psychological Control Source and Anxiety/Depression Status", "doi": null, "abstractUrl": "/proceedings-article/icphds/2020/857100a149/1rxhqtQwQeI", "parentPublication": { "id": "proceedings/icphds/2020/8571/0", "title": "2020 International Conference on Public Health and Data Science (ICPHDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a497", "title": "Co-Drive: the experience of a shared car trip between a driver and a remote passenger", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a497/1yfxI7RFmNy", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icphds/2021/2594/0/259400a235", "title": "Analysis of the Relationship among College Students&#x2019; Coping Strategies, Psychological Resilience, and Anxiety During COVID-19", "doi": null, "abstractUrl": "/proceedings-article/icphds/2021/259400a235/1ymIMHwCkWQ", "parentPublication": { "id": "proceedings/icphds/2021/2594/0", "title": "2021 International Conference on Public Health and Data Science (ICPHDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNynJMVy", "title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "acronym": "ipdpsw", "groupId": "1800044", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNqBbHTT", "doi": "10.1109/IPDPSW.2016.13", "title": "A Fully Parameterized Virtual Coarse Grained Reconfigurable Array for High Performance Computing Applications", "normalizedTitle": "A Fully Parameterized Virtual Coarse Grained Reconfigurable Array for High Performance Computing Applications", "abstract": "Field Programmable Gate Arrays (FPGAs) have proven their potential in accelerating High Performance Computing (HPC) Applications. Conventionally such accelerators predominantly use, FPGAs that contain fine-grained elements such as LookUp Tables (LUTs), Switch Blocks (SB) and Connection Blocks (CB) as basic programmable logic blocks. However, the conventional implementation suffers from high reconfiguration and development costs. In order to solve this problem, programmable logic components are defined at a virtual higher abstraction level. These components are called Processing Elements (PEs) and the group of PEs along with the inter-connection network form an architecture called a Virtual Coarse-Grained Reconfigurable Array (VCGRA). The abstraction helps to reconfigure the PEs faster at the intermediate level than at the lower-level of an FPGA. Conventional VCGRA implementations (built on top of the lower levels of the FPGA) use functional resources such as LUTs to establish required connections (intra-connect) within a PE. In this paper, we propose to use the parameterized reconfiguration technique to implement the intra-connections of each PE with the aim to reduce the FPGA resource utilization (LUTs). The technique is used to parameterize the intra-connections with parameters that only change their value infrequently (whenever a new VCGRA function has to be reconfigured) and that are implemented as constants. Since the design is optimized for these constants at every moment in time, this reduces the resource utilization. Further, inter-connections (network between the multiple PEs) of the VCGRA grid can also be parameterized so that both the inter-and intra-connect network of the VCGRA grid can be mapped onto the physical switch blocks of the FPGA. For every change in parameter values a specialized bitstream is generated on the fly and the FPGA is reconfigured using the parameterized run-time reconfiguration technique. Our results show a drastic reduction in FPGA LUT resource utilization in the PE by at least 30% and in the intra-network of the PE by 31% when implementing an HPC application.", "abstracts": [ { "abstractType": "Regular", "content": "Field Programmable Gate Arrays (FPGAs) have proven their potential in accelerating High Performance Computing (HPC) Applications. Conventionally such accelerators predominantly use, FPGAs that contain fine-grained elements such as LookUp Tables (LUTs), Switch Blocks (SB) and Connection Blocks (CB) as basic programmable logic blocks. However, the conventional implementation suffers from high reconfiguration and development costs. In order to solve this problem, programmable logic components are defined at a virtual higher abstraction level. These components are called Processing Elements (PEs) and the group of PEs along with the inter-connection network form an architecture called a Virtual Coarse-Grained Reconfigurable Array (VCGRA). The abstraction helps to reconfigure the PEs faster at the intermediate level than at the lower-level of an FPGA. Conventional VCGRA implementations (built on top of the lower levels of the FPGA) use functional resources such as LUTs to establish required connections (intra-connect) within a PE. In this paper, we propose to use the parameterized reconfiguration technique to implement the intra-connections of each PE with the aim to reduce the FPGA resource utilization (LUTs). The technique is used to parameterize the intra-connections with parameters that only change their value infrequently (whenever a new VCGRA function has to be reconfigured) and that are implemented as constants. Since the design is optimized for these constants at every moment in time, this reduces the resource utilization. Further, inter-connections (network between the multiple PEs) of the VCGRA grid can also be parameterized so that both the inter-and intra-connect network of the VCGRA grid can be mapped onto the physical switch blocks of the FPGA. For every change in parameter values a specialized bitstream is generated on the fly and the FPGA is reconfigured using the parameterized run-time reconfiguration technique. Our results show a drastic reduction in FPGA LUT resource utilization in the PE by at least 30% and in the intra-network of the PE by 31% when implementing an HPC application.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Field Programmable Gate Arrays (FPGAs) have proven their potential in accelerating High Performance Computing (HPC) Applications. Conventionally such accelerators predominantly use, FPGAs that contain fine-grained elements such as LookUp Tables (LUTs), Switch Blocks (SB) and Connection Blocks (CB) as basic programmable logic blocks. However, the conventional implementation suffers from high reconfiguration and development costs. In order to solve this problem, programmable logic components are defined at a virtual higher abstraction level. These components are called Processing Elements (PEs) and the group of PEs along with the inter-connection network form an architecture called a Virtual Coarse-Grained Reconfigurable Array (VCGRA). The abstraction helps to reconfigure the PEs faster at the intermediate level than at the lower-level of an FPGA. Conventional VCGRA implementations (built on top of the lower levels of the FPGA) use functional resources such as LUTs to establish required connections (intra-connect) within a PE. In this paper, we propose to use the parameterized reconfiguration technique to implement the intra-connections of each PE with the aim to reduce the FPGA resource utilization (LUTs). The technique is used to parameterize the intra-connections with parameters that only change their value infrequently (whenever a new VCGRA function has to be reconfigured) and that are implemented as constants. Since the design is optimized for these constants at every moment in time, this reduces the resource utilization. Further, inter-connections (network between the multiple PEs) of the VCGRA grid can also be parameterized so that both the inter-and intra-connect network of the VCGRA grid can be mapped onto the physical switch blocks of the FPGA. For every change in parameter values a specialized bitstream is generated on the fly and the FPGA is reconfigured using the parameterized run-time reconfiguration technique. Our results show a drastic reduction in FPGA LUT resource utilization in the PE by at least 30% and in the intra-network of the PE by 31% when implementing an HPC application.", "fno": "3682a265", "keywords": [ "Field Programmable Gate Arrays", "Table Lookup", "Switches", "Registers", "Routing", "Boolean Functions", "Feature Extraction", "PE", "FPGA", "Reconfiguration", "DCS", "TLUT", "TCON" ], "authors": [ { "affiliation": null, "fullName": "Amit Kulkarni", "givenName": "Amit", "surname": "Kulkarni", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Elias Vasteenkiste", "givenName": "Elias", "surname": "Vasteenkiste", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dirk Stroobandt", "givenName": "Dirk", "surname": "Stroobandt", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Andreas Brokalakis", "givenName": "Andreas", "surname": "Brokalakis", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Antonios Nikitakis", "givenName": "Antonios", "surname": "Nikitakis", "__typename": "ArticleAuthorType" } ], "idPrefix": "ipdpsw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-05-01T00:00:00", "pubType": "proceedings", "pages": "265-270", "year": "2016", "issn": null, "isbn": "978-1-5090-3682-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3682a259", "articleId": "12OmNvEyR9u", "__typename": "AdjacentArticleType" }, "next": { "fno": "3682a271", "articleId": "12OmNxwWoGX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icassp/1988/9999/0/00197033", "title": "A reconfigurable systolic array for real-time image processing", "doi": null, "abstractUrl": "/proceedings-article/icassp/1988/00197033/12OmNAo45Kt", "parentPublication": { "id": "proceedings/icassp/1988/9999/0", "title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsoc/2014/4305/0/4305a237", "title": "Design of a Coarse-Grained Processing Element for Matrix Multiplication on FPGA", "doi": null, "abstractUrl": "/proceedings-article/mcsoc/2014/4305a237/12OmNApcun6", "parentPublication": { "id": "proceedings/mcsoc/2014/4305/0", "title": "2014 IEEE 8th International Symposium on Embedded Multicore/Manycore SoCs (MCSoC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/date/2012/2145/0/06176724", "title": "Mapping into LUT structures", "doi": null, "abstractUrl": "/proceedings-article/date/2012/06176724/12OmNvRU0gx", "parentPublication": { "id": "proceedings/date/2012/2145/0", "title": "Design, Automation &amp; Test in Europe Conference &amp; Exhibition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvlsi/2017/6762/0/6762a116", "title": "An Efficient Design of an FPGA-Based Multiplier Using LUT Merging Theorem", "doi": null, "abstractUrl": "/proceedings-article/isvlsi/2017/6762a116/12OmNvTjZVl", "parentPublication": { "id": "proceedings/isvlsi/2017/6762/0", "title": "2017 IEEE Computer Society Annual Symposium on VLSI (ISVLSI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2018/2205/0/08402793", "title": "Trojan circuits masking and debugging of combinational circuits with LUT insertion", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2018/08402793/12OmNzIl3Eu", "parentPublication": { "id": "proceedings/aqtr/2018/2205/0", "title": "2018 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2017/3299/0/08110096", "title": "Logic circuit design with gates, LUTs and MUXs oriented to mask faults", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2017/08110096/12OmNzWx04n", "parentPublication": { "id": "proceedings/ewdts/2017/3299/0", "title": "2017 IEEE East-West Design & Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsd/2011/1048/0/06037389", "title": "On the Cascade Implementation of Multiple-Output Sparse Logic Functions", "doi": null, "abstractUrl": "/proceedings-article/dsd/2011/06037389/12OmNzwpUbG", "parentPublication": { "id": "proceedings/dsd/2011/1048/0", "title": "2011 14th Euromicro Conference on Digital System Design. Architectures, Methods and Tools. (DSD 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2016/05/07273971", "title": "Memory-Aware Loop Mapping on Coarse-Grained Reconfigurable Architectures", "doi": null, "abstractUrl": "/journal/si/2016/05/07273971/13rRUyfbwom", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mcsoc/2021/3860/0/386000a265", "title": "A Highly Efficient Layout-Aware FPGA Overlay Accelerator Mapping Method", "doi": null, "abstractUrl": "/proceedings-article/mcsoc/2021/386000a265/1AIN0m4wiUE", "parentPublication": { "id": "proceedings/mcsoc/2021/3860/0", "title": "2021 IEEE 14th International Symposium on Embedded Multicore/Many-core Systems-on-Chip (MCSoC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismvl/2022/2395/0/239500a151", "title": "LUT Cascade Realization of Threshold Functions and Its Application to Implementation of Ternary Weight Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/ismvl/2022/239500a151/1Et602PKY6s", "parentPublication": { "id": "proceedings/ismvl/2022/2395/0", "title": "2022 IEEE 52nd International Symposium on Multiple-Valued Logic (ISMVL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBrDqEk", "title": "Parallel Algorithms / Architecture Synthesis, AIZU International Symposium on", "acronym": "pas", "groupId": "1000527", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNqBtiF1", "doi": "10.1109/AISPAS.1995.401355", "title": "Aizu supercomputer: a massively parallel system for virtual reality problems", "normalizedTitle": "Aizu supercomputer: a massively parallel system for virtual reality problems", "abstract": "The Aizu supercomputer is a massively parallel system suited to the solution of virtual reality problems and the support of multimedia applications. It employs a highly parallel MIMD architecture using a conflict-free internetwork system. The scalable communication system consists of two networks: a pyramid network and a reconfigurable network using optical links. The Aizu Supercomputer has a cluster configuration and a shared memory. Each PE includes 113 SPECmark and one cluster is organized with 8 PEs. In the trial production, the supercomputer will include 1365 PEs with more than 100 GFlops at peak performance.", "abstracts": [ { "abstractType": "Regular", "content": "The Aizu supercomputer is a massively parallel system suited to the solution of virtual reality problems and the support of multimedia applications. It employs a highly parallel MIMD architecture using a conflict-free internetwork system. The scalable communication system consists of two networks: a pyramid network and a reconfigurable network using optical links. The Aizu Supercomputer has a cluster configuration and a shared memory. Each PE includes 113 SPECmark and one cluster is organized with 8 PEs. In the trial production, the supercomputer will include 1365 PEs with more than 100 GFlops at peak performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Aizu supercomputer is a massively parallel system suited to the solution of virtual reality problems and the support of multimedia applications. It employs a highly parallel MIMD architecture using a conflict-free internetwork system. The scalable communication system consists of two networks: a pyramid network and a reconfigurable network using optical links. The Aizu Supercomputer has a cluster configuration and a shared memory. Each PE includes 113 SPECmark and one cluster is organized with 8 PEs. In the trial production, the supercomputer will include 1365 PEs with more than 100 GFlops at peak performance.", "fno": "70380054", "keywords": [ "Virtual Reality Parallel Machines Multimedia Computing Shared Memory Systems Parallel Architectures Multiprocessor Interconnection Networks Performance Evaluation Computer Graphic Equipment Aizu Supercomputer Massively Parallel System Virtual Reality Problems Multimedia Applications Highly Parallel MIMD Architecture Conflict Free Internetwork System Scalable Communication System Pyramid Network Reconfigurable Network Optical Links Cluster Configuration Shared Memory 113 SPE Cmark Peak Performance 100 GFLOPS" ], "authors": [ { "affiliation": "Dept. of Comput. Hardware, Aizu Univ., Japan", "fullName": "T. Ikedo", "givenName": "T.", "surname": "Ikedo", "__typename": "ArticleAuthorType" } ], "idPrefix": "pas", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-03-01T00:00:00", "pubType": "proceedings", "pages": "54", "year": "1995", "issn": null, "isbn": "0-8186-7038-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "70380045", "articleId": "12OmNxWuigJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "70380063", "articleId": "12OmNqI04Ir", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwdbUZ9", "title": "Cluster Computing and the Grid, IEEE International Symposium on", "acronym": "ccgrid", "groupId": "1000093", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNrJ11Ha", "doi": "10.1109/CCGrid.2012.35", "title": "A Map-Reduce Based Framework for Heterogeneous Processing Element Cluster Environments", "normalizedTitle": "A Map-Reduce Based Framework for Heterogeneous Processing Element Cluster Environments", "abstract": "In this paper, we present our design of a Processing Element (PE) Aware MapReduce base framework, Pamar. Pamar is designed for supporting distributed computing on clusters where node PE configurations are asymmetric on different nodes. Pamar's main goal is to allow users to seamlessly utilize different kinds of processing elements (e.g., CPUs or GPUs) collaboratively for large scale data processing. To show proof of concept, we have incorporated our designs into the Hadoop framework and tested it on cluster environments having asymmetric node PE configurations. We demonstrate Pamar's ability to identify PEs available on each node and match-make user jobs with nodes, base on job PE requirements. Pamar allows users to easily parallelize applications across large datasets and at the same time utilizes different PEs for processing different classes of functions efficiently. The experiments show improvement in job queue completion time with Pamar over clusters with asymmetric nodes as compared to clusters with symmetric nodes.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present our design of a Processing Element (PE) Aware MapReduce base framework, Pamar. Pamar is designed for supporting distributed computing on clusters where node PE configurations are asymmetric on different nodes. Pamar's main goal is to allow users to seamlessly utilize different kinds of processing elements (e.g., CPUs or GPUs) collaboratively for large scale data processing. To show proof of concept, we have incorporated our designs into the Hadoop framework and tested it on cluster environments having asymmetric node PE configurations. We demonstrate Pamar's ability to identify PEs available on each node and match-make user jobs with nodes, base on job PE requirements. Pamar allows users to easily parallelize applications across large datasets and at the same time utilizes different PEs for processing different classes of functions efficiently. The experiments show improvement in job queue completion time with Pamar over clusters with asymmetric nodes as compared to clusters with symmetric nodes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present our design of a Processing Element (PE) Aware MapReduce base framework, Pamar. Pamar is designed for supporting distributed computing on clusters where node PE configurations are asymmetric on different nodes. Pamar's main goal is to allow users to seamlessly utilize different kinds of processing elements (e.g., CPUs or GPUs) collaboratively for large scale data processing. To show proof of concept, we have incorporated our designs into the Hadoop framework and tested it on cluster environments having asymmetric node PE configurations. We demonstrate Pamar's ability to identify PEs available on each node and match-make user jobs with nodes, base on job PE requirements. Pamar allows users to easily parallelize applications across large datasets and at the same time utilizes different PEs for processing different classes of functions efficiently. The experiments show improvement in job queue completion time with Pamar over clusters with asymmetric nodes as compared to clusters with symmetric nodes.", "fno": "4691a057", "keywords": [ "Map Reduce", "GPGPU", "Heterogeneous Resource Framework" ], "authors": [ { "affiliation": null, "fullName": "Yu Shyang Tan", "givenName": "Yu Shyang", "surname": "Tan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bu-Sung Lee", "givenName": "Bu-Sung", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bingsheng He", "givenName": "Bingsheng", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Roy H. Campbell", "givenName": "Roy H.", "surname": "Campbell", "__typename": "ArticleAuthorType" } ], "idPrefix": "ccgrid", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-05-01T00:00:00", "pubType": "proceedings", "pages": "57-64", "year": "2012", "issn": null, "isbn": "978-0-7695-4691-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4691a049", "articleId": "12OmNzuZUum", "__typename": "AdjacentArticleType" }, "next": { "fno": "4691a065", "articleId": "12OmNxEBz2H", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ccgrid/2013/4996/0/4996a277", "title": "A Scalable Implementation of a MapReduce-based Graph Processing Algorithm for Large-Scale Heterogeneous Supercomputers", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2013/4996a277/12OmNAGNCe4", "parentPublication": { "id": "proceedings/ccgrid/2013/4996/0", "title": "Cluster Computing and the Grid, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fpl/2005/9362/0/01515747", "title": "Cluster architecture for reconfigurable signal processing engine for wireless communication", "doi": null, "abstractUrl": "/proceedings-article/fpl/2005/01515747/12OmNqOffBc", "parentPublication": { "id": "proceedings/fpl/2005/9362/0", "title": "Proceedings. 2005 International Conference on Field Programmable Logic and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cicsyn/2011/4482/0/4482a287", "title": "An Adaptive Task Allocation Approach for MapReduce in a Heterogeneous Cloud", "doi": null, "abstractUrl": "/proceedings-article/cicsyn/2011/4482a287/12OmNrJiCMw", "parentPublication": { "id": "proceedings/cicsyn/2011/4482/0", "title": "Computational Intelligence, Communication Systems and Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2009/3766/0/3766a218", "title": "A Dynamic MapReduce Scheduler for Heterogeneous Workloads", "doi": null, "abstractUrl": "/proceedings-article/gcc/2009/3766a218/12OmNxETa6q", "parentPublication": { "id": "proceedings/gcc/2009/3766/0", "title": "2009 Eighth International Conference on Grid and Cooperative Computing (GCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/skg/2010/4189/0/4189a135", "title": "Multiple-Job Optimization in MapReduce for Heterogeneous Workloads", "doi": null, "abstractUrl": "/proceedings-article/skg/2010/4189a135/12OmNxwWoH1", "parentPublication": { "id": "proceedings/skg/2010/4189/0", "title": "Semantics, Knowledge and Grid, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2004/2132/2/213220105b", "title": "An Execution-Time Estimation Model for Heterogeneous Clusters", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2004/213220105b/12OmNy87Qw4", "parentPublication": { "id": "proceedings/ipdps/2004/2132/2", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloudcom/2010/4302/0/4302a733", "title": "Hybrid Map Task Scheduling for GPU-Based Heterogeneous Clusters", "doi": null, "abstractUrl": "/proceedings-article/cloudcom/2010/4302a733/12OmNz2kqrb", "parentPublication": { "id": "proceedings/cloudcom/2010/4302/0", "title": "2010 IEEE Second International Conference on Cloud Computing Technology and Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2010/4108/0/4108c736", "title": "SAMR: A Self-adaptive MapReduce Scheduling Algorithm in Heterogeneous Environment", "doi": null, "abstractUrl": "/proceedings-article/cit/2010/4108c736/12OmNzE54GP", "parentPublication": { "id": "proceedings/cit/2010/4108/0", "title": "Computer and Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccgrid/2012/4691/0/4691a049", "title": "MARLA: MapReduce for Heterogeneous Clusters", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2012/4691a049/12OmNzuZUum", "parentPublication": { "id": "proceedings/ccgrid/2012/4691/0", "title": "Cluster Computing and the Grid, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2005/02/c2040", "title": "Resource-Aware Scientific Computation on a Heterogeneous Cluster", "doi": null, "abstractUrl": "/magazine/cs/2005/02/c2040/13rRUwInvEq", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwvVrMb", "title": "FIE '98. 28th Annual Frontiers in Education Conference. Moving from 'Teacher-Centered' to 'Learner-Centered' Education. Conference Proceedings (Cat. No.98CH36214)", "acronym": "fie", "groupId": "1000297", "volume": "2", "displayVolume": "2", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNxRnvQX", "doi": "10.1109/FIE.1998.738775", "title": "CoVE, Jersey, MUM-can virtual environments be the next step?", "normalizedTitle": "CoVE, Jersey, MUM-can virtual environments be the next step?", "abstract": "Computer networks are at the center of new educational uses of computer technology. However, the prevailing model remains restricted to a flat world of documents and limited communication spaces. We think that the true potential of computer networks is in using virtual environments (VEs) that offer unlimited and natural support for a variety of individual and social processes, particularly collaborative ones. We have implemented or adapted and extended several VEs and used them in a limited way in educational and other settings. We describe the experience that led to our latest model and its prototype under development.", "abstracts": [ { "abstractType": "Regular", "content": "Computer networks are at the center of new educational uses of computer technology. However, the prevailing model remains restricted to a flat world of documents and limited communication spaces. We think that the true potential of computer networks is in using virtual environments (VEs) that offer unlimited and natural support for a variety of individual and social processes, particularly collaborative ones. We have implemented or adapted and extended several VEs and used them in a limited way in educational and other settings. We describe the experience that led to our latest model and its prototype under development.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Computer networks are at the center of new educational uses of computer technology. However, the prevailing model remains restricted to a flat world of documents and limited communication spaces. We think that the true potential of computer networks is in using virtual environments (VEs) that offer unlimited and natural support for a variety of individual and social processes, particularly collaborative ones. We have implemented or adapted and extended several VEs and used them in a limited way in educational and other settings. We describe the experience that led to our latest model and its prototype under development.", "fno": "00738775", "keywords": [], "authors": [ { "affiliation": "Jodrey Sch. of Comput. Sci., Acadia Univ., Wolfville, NS, Canada", "fullName": "I. Tomek", "givenName": "I.", "surname": "Tomek", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Mech. Eng., Stanford Univ., CA, USA", "fullName": "R. Giles", "givenName": "R.", "surname": "Giles", "__typename": "ArticleAuthorType" } ], "idPrefix": "fie", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-11-01T00:00:00", "pubType": "proceedings", "pages": "701-704", "year": "1998", "issn": null, "isbn": "0-7803-4762-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00738773", "articleId": "12OmNvkGW1q", "__typename": "AdjacentArticleType" }, "next": { "fno": "00738776", "articleId": "12OmNwMXnq7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJdrSGz23S", "doi": "10.1109/VRW55335.2022.00150", "title": "&#x201C;What a Mess!&#x201D;: Traces of Use to Increase Asynchronous Social Presence in Shared Virtual Environments", "normalizedTitle": "“What a Mess!”: Traces of Use to Increase Asynchronous Social Presence in Shared Virtual Environments", "abstract": "Shared virtual environments (VEs) are challenged conveying and triggering users&#x0027; feelings of social presence. Traces of use are implicit evidence of prior interactions that support social awareness in the real environment (RE). However, they have not been explored in VEs so far. We investigate the traces&#x0027; effect on users&#x0027; perception of asynchronous social presences in a within-subject study (<tex>Z_$\\mathrm{N}=26$_Z</tex>) by comparing the users&#x0027; experience with and without traces. The traces significantly increased the feeling of social presence. We contribute an initial exploration of the traces of use concept in VE to design shared social spaces for long-term use.", "abstracts": [ { "abstractType": "Regular", "content": "Shared virtual environments (VEs) are challenged conveying and triggering users&#x0027; feelings of social presence. Traces of use are implicit evidence of prior interactions that support social awareness in the real environment (RE). However, they have not been explored in VEs so far. We investigate the traces&#x0027; effect on users&#x0027; perception of asynchronous social presences in a within-subject study (<tex>$\\mathrm{N}=26$</tex>) by comparing the users&#x0027; experience with and without traces. The traces significantly increased the feeling of social presence. We contribute an initial exploration of the traces of use concept in VE to design shared social spaces for long-term use.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Shared virtual environments (VEs) are challenged conveying and triggering users' feelings of social presence. Traces of use are implicit evidence of prior interactions that support social awareness in the real environment (RE). However, they have not been explored in VEs so far. We investigate the traces' effect on users' perception of asynchronous social presences in a within-subject study (-) by comparing the users' experience with and without traces. The traces significantly increased the feeling of social presence. We contribute an initial exploration of the traces of use concept in VE to design shared social spaces for long-term use.", "fno": "840200a598", "keywords": [ "Groupware", "Virtual Reality", "Asynchronous Social Presence", "Shared Virtual Environments", "User Experience", "Social Awareness", "Social Spaces", "Real Environment", "Social Computing", "Three Dimensional Displays", "Conferences", "Design Methodology", "Virtual Environments", "User Interfaces", "User Experience", "Human Centered Computing", "Collaborative And Social Computing Design And Evaluation Methods", "Design And Evaluation Methods" ], "authors": [ { "affiliation": "LMU Munich", "fullName": "Linda Hirsch", "givenName": "Linda", "surname": "Hirsch", "__typename": "ArticleAuthorType" }, { "affiliation": "LMU Munich", "fullName": "Anna Haller", "givenName": "Anna", "surname": "Haller", "__typename": "ArticleAuthorType" }, { "affiliation": "LMU Munich", "fullName": "Andreas Butz", "givenName": "Andreas", "surname": "Butz", "__typename": "ArticleAuthorType" }, { "affiliation": "Augsburg University", "fullName": "Ceenu George", "givenName": "Ceenu", "surname": "George", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "598-599", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1CJdrP6Sjtu", "name": "pvrw202284020-09757716s1-mm_840200a598.zip", "size": "4.09 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757716s1-mm_840200a598.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "840200a596", "articleId": "1CJf4aHcqoU", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a600", "articleId": "1CJdFxYlvfG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/1999/0093/0/00930096", "title": "User-Centered Design and Evaluation of a Real-Time Battlefield Visualization Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/1999/00930096/12OmNA2cYEt", "parentPublication": { "id": "proceedings/vr/1999/0093/0", "title": "Proceedings of Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcre/2011/1948/0/06079825", "title": "Focusing on Execution Traces Using Diver", "doi": null, "abstractUrl": "/proceedings-article/wcre/2011/06079825/12OmNqzcvMQ", "parentPublication": { "id": "proceedings/wcre/2011/1948/0", "title": "2011 18th Working Conference on Reverse Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2008/1966/0/04475464", "title": "Multiple Views on System Traces", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2008/04475464/12OmNwDACkl", "parentPublication": { "id": "proceedings/pacificvis/2008/1966/0", "title": "IEEE Pacific Visualization Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/1996/01/u1030", "title": "Interactive Multiuser VEs in the DIVE System", "doi": null, "abstractUrl": "/magazine/mu/1996/01/u1030/13rRUwvT9dv", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a826", "title": "The Sloped Shoes: Influence Human Perception of the Virtual Slope", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a826/1CJd8KwQQgM", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a588", "title": "Studying the User Adaptability to Hyperbolic Spaces and Delay Time Scenarios", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a588/1CJf2hnKVpK", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a204", "title": "Empathy building &#x2018;in the wild&#x2019; - a reflection on an avoidance of the emotional engagement", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a204/1CJfhor2tWw", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcomp/2022/8152/0/815200a133", "title": "Human Experiences in Teaching Robots: Understanding Agent Expressivity and Learning Effects through a Virtual Robot Arm", "doi": null, "abstractUrl": "/proceedings-article/smartcomp/2022/815200a133/1F0gy5khAEE", "parentPublication": { "id": "proceedings/smartcomp/2022/8152/0", "title": "2022 IEEE International Conference on Smart Computing (SMARTCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873959", "title": "Traces in Virtual Environments: A Framework and Exploration to Conceptualize the Design of Social Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873959/1GjwNeqq7aE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2021/10/08854183", "title": "How to &#x201C;DODGE&#x201D; Complex Software Analytics", "doi": null, "abstractUrl": "/journal/ts/2021/10/08854183/1dM2hVjAPgA", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1zuttwExkXK", "title": "2021 ACM/IEEE International Conference on Model Driven Engineering Languages and Systems Companion (MODELS-C)", "acronym": "models-c", "groupId": "1833924", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1zutwgWIZ9u", "doi": "10.1109/MODELS-C53483.2021.00039", "title": "Models Meet Data: Challenges to Create Virtual Entities for Digital Twins", "normalizedTitle": "Models Meet Data: Challenges to Create Virtual Entities for Digital Twins", "abstract": "In recent years, digital twin (DT) technology has moved to the center of attention of many researchers and engineers. Commonly, a digital twin is defined based on a virtual entity (VE) that exhibits similar behavior to its physical counterpart, and that is coupled to this physical entity (PE). The VE thus forms a core part of any digital twin. While VEs may differ vastly&#x2014;from ones based on simple simulation to high-fidelity virtual mirroring of the corresponding PE&#x2014;they are typically composed of multiple models that may originate from multiple domains, address different aspects, and are expressed and processed using different tools and languages. Furthermore, the use of time series data&#x2014;whether historical or real-time or both&#x2014;from the PE distinguishes VEs from mere simulations. As a consequence of the modeling landscape complexity and the data aspect of VEs, the design of a digital twin and specifically of the VE as part of it represents several challenges. In this paper, we present our vision for the development, evolution, maintenance, and verification of such virtual entities for digital twins.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years, digital twin (DT) technology has moved to the center of attention of many researchers and engineers. Commonly, a digital twin is defined based on a virtual entity (VE) that exhibits similar behavior to its physical counterpart, and that is coupled to this physical entity (PE). The VE thus forms a core part of any digital twin. While VEs may differ vastly&#x2014;from ones based on simple simulation to high-fidelity virtual mirroring of the corresponding PE&#x2014;they are typically composed of multiple models that may originate from multiple domains, address different aspects, and are expressed and processed using different tools and languages. Furthermore, the use of time series data&#x2014;whether historical or real-time or both&#x2014;from the PE distinguishes VEs from mere simulations. As a consequence of the modeling landscape complexity and the data aspect of VEs, the design of a digital twin and specifically of the VE as part of it represents several challenges. In this paper, we present our vision for the development, evolution, maintenance, and verification of such virtual entities for digital twins.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years, digital twin (DT) technology has moved to the center of attention of many researchers and engineers. Commonly, a digital twin is defined based on a virtual entity (VE) that exhibits similar behavior to its physical counterpart, and that is coupled to this physical entity (PE). The VE thus forms a core part of any digital twin. While VEs may differ vastly—from ones based on simple simulation to high-fidelity virtual mirroring of the corresponding PE—they are typically composed of multiple models that may originate from multiple domains, address different aspects, and are expressed and processed using different tools and languages. Furthermore, the use of time series data—whether historical or real-time or both—from the PE distinguishes VEs from mere simulations. As a consequence of the modeling landscape complexity and the data aspect of VEs, the design of a digital twin and specifically of the VE as part of it represents several challenges. In this paper, we present our vision for the development, evolution, maintenance, and verification of such virtual entities for digital twins.", "fno": "248400a225", "keywords": [ "Augmented Reality", "Program Verification", "Software Maintenance", "Software Tools", "Time Series", "Virtual Reality Languages", "Digital Twin", "Virtual Entity", "DT", "VE", "Physical Entity", "PE", "Time Series Data", "Software Evolution", "Software Maintenance", "Software Development", "Software Verification", "Digital Twin", "Time Series Analysis", "Maintenance Engineering", "Real Time Systems", "Model Driven Engineering", "Data Models", "Complexity Theory", "Digital Twin", "Model Consistency", "Model Orchestration", "Dynamic Consistency", "Model Management", "Digital Twin Development Roadmap" ], "authors": [ { "affiliation": "Eindhoven University of Technology,Software Engineering & Technology cluster,Department of Mathematics and Computer Science,Eindhoven,The Netherlands", "fullName": "Mark Van Den Brand", "givenName": "Mark", "surname": "Van Den Brand", "__typename": "ArticleAuthorType" }, { "affiliation": "Eindhoven University of Technology,Software Engineering & Technology cluster,Department of Mathematics and Computer Science,Eindhoven,The Netherlands", "fullName": "Loek Cleophas", "givenName": "Loek", "surname": "Cleophas", "__typename": "ArticleAuthorType" }, { "affiliation": "Tilburg University,Tilburg School of Humanities and Digital Sciences,The Netherlands", "fullName": "Raghavendran Gunasekaran", "givenName": "Raghavendran", "surname": "Gunasekaran", "__typename": "ArticleAuthorType" }, { "affiliation": "Tilburg University,Tilburg School of Humanities and Digital Sciences,The Netherlands", "fullName": "Boudewijn Haverkort", "givenName": "Boudewijn", "surname": "Haverkort", "__typename": "ArticleAuthorType" }, { "affiliation": "Eindhoven University of Technology,Software Engineering & Technology cluster,Department of Mathematics and Computer Science,Eindhoven,The Netherlands", "fullName": "David A. Manrique Negrin", "givenName": "David A. Manrique", "surname": "Negrin", "__typename": "ArticleAuthorType" }, { "affiliation": "Eindhoven University of Technology,Software Engineering & Technology cluster,Department of Mathematics and Computer Science,Eindhoven,The Netherlands", "fullName": "Hossain Muhammad Muctadir", "givenName": "Hossain Muhammad", "surname": "Muctadir", "__typename": "ArticleAuthorType" } ], "idPrefix": "models-c", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "225-228", "year": "2021", "issn": null, "isbn": "978-1-6654-2484-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "248400a221", "articleId": "1zutIV8JoOc", "__typename": "AdjacentArticleType" }, "next": { "fno": "248400a229", "articleId": "1zutu2viGSA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/netcit/2021/0070/0/007000a377", "title": "Research on the Monitoring of the Industrial Robot Motion Based on Digital Twins", "doi": null, "abstractUrl": "/proceedings-article/netcit/2021/007000a377/1BES4x9VUTC", "parentPublication": { "id": "proceedings/netcit/2021/0070/0", "title": "2021 International Conference on Networking, Communications and Information Technology (NetCIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csde/2021/9552/0/09718481", "title": "Cyber-Physical Systems and Digital Twins in Practice &#x2013; A Real-Life Application Example", "doi": null, "abstractUrl": "/proceedings-article/csde/2021/09718481/1BogZmWUiCA", "parentPublication": { "id": "proceedings/csde/2021/9552/0", "title": "2021 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tps-isa/2021/1623/0/162300a070", "title": "Edge Centric Secure Data Sharing with Digital Twins in Smart Ecosystems", "doi": null, "abstractUrl": "/proceedings-article/tps-isa/2021/162300a070/1CzeqNtBtPG", "parentPublication": { "id": "proceedings/tps-isa/2021/1623/0", "title": "2021 Third IEEE International Conference on Trust, Privacy and Security in Intelligent Systems and Applications (TPS-ISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiea/2020/8288/0/828800a283", "title": "Digital Twins Driving Model Based on Petri Net in Industrial Pipeline", "doi": null, "abstractUrl": "/proceedings-article/aiea/2020/828800a283/1nTuib3XxNC", "parentPublication": { "id": "proceedings/aiea/2020/8288/0", "title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ises/2020/0478/0/047800a155", "title": "An Implementation of AR Enabled Digital Twins for 3-D Printing", "doi": null, "abstractUrl": "/proceedings-article/ises/2020/047800a155/1txTJ028HvO", "parentPublication": { "id": "proceedings/ises/2020/0478/0", "title": "2020 IEEE International Symposium on Smart Electronic Systems (iSES) (Formerly iNiS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2022/02/09637491", "title": "Axispot: A Distributed Spatiotemporal Data Management System for Digital Twins of Moving Objects", "doi": null, "abstractUrl": "/magazine/so/2022/02/09637491/1z77NpXxlyE", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/models-c/2021/2484/0/248400a741", "title": "Combining Low-Code Programming and SDL-Based Modeling with Snap&#x0021; in the Industry 4.0 Context", "doi": null, "abstractUrl": "/proceedings-article/models-c/2021/248400a741/1zutG89m10s", "parentPublication": { "id": "proceedings/models-c/2021/2484/0", "title": "2021 ACM/IEEE International Conference on Model Driven Engineering Languages and Systems Companion (MODELS-C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/models-c/2021/2484/0/248400a237", "title": "Digital Twin Operational Platform for Connectivity and Accessibility using Flask Python", "doi": null, "abstractUrl": "/proceedings-article/models-c/2021/248400a237/1zutGBdOQY8", "parentPublication": { "id": "proceedings/models-c/2021/2484/0", "title": "2021 ACM/IEEE International Conference on Model Driven Engineering Languages and Systems Companion (MODELS-C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/models-c/2021/2484/0/248400a221", "title": "Inference of Simulation Models in Digital Twins by Reinforcement Learning", "doi": null, "abstractUrl": "/proceedings-article/models-c/2021/248400a221/1zutIV8JoOc", "parentPublication": { "id": "proceedings/models-c/2021/2484/0", "title": "2021 ACM/IEEE International Conference on Model Driven Engineering Languages and Systems Companion (MODELS-C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/models-c/2021/2484/0/248400a233", "title": "Using Ptolemy II as a Framework for Virtual Entity Integration and Orchestration in Digital Twins", "doi": null, "abstractUrl": "/proceedings-article/models-c/2021/248400a233/1zutvbOAITu", "parentPublication": { "id": "proceedings/models-c/2021/2484/0", "title": "2021 ACM/IEEE International Conference on Model Driven Engineering Languages and Systems Companion (MODELS-C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcxZq6", "title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007", "acronym": "iciap", "groupId": "1000346", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNCmGNNy", "doi": "10.1109/ICIAP.2007.4362788", "title": "Real-time Gesture Recognition in Advanced Videocommunication Services", "normalizedTitle": "Real-time Gesture Recognition in Advanced Videocommunication Services", "abstract": "Gesture recognition becomes feasible even for realtime applications and offers therefore a wide range of new capabilities for novel approaches of human computer interaction, but also for many multi-media services. In this paper, we present a real-time solution for gesture recognition applied in a novel avatar animation videocommunication service. We focus on user friendliness, robustness and easy usage. Hence, the algorithm does not require any training or adaptation to a specific user and can be applied in arbitrary unconstrained environment.", "abstracts": [ { "abstractType": "Regular", "content": "Gesture recognition becomes feasible even for realtime applications and offers therefore a wide range of new capabilities for novel approaches of human computer interaction, but also for many multi-media services. In this paper, we present a real-time solution for gesture recognition applied in a novel avatar animation videocommunication service. We focus on user friendliness, robustness and easy usage. Hence, the algorithm does not require any training or adaptation to a specific user and can be applied in arbitrary unconstrained environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Gesture recognition becomes feasible even for realtime applications and offers therefore a wide range of new capabilities for novel approaches of human computer interaction, but also for many multi-media services. In this paper, we present a real-time solution for gesture recognition applied in a novel avatar animation videocommunication service. We focus on user friendliness, robustness and easy usage. Hence, the algorithm does not require any training or adaptation to a specific user and can be applied in arbitrary unconstrained environment.", "fno": "04362788", "keywords": [ "Avatars", "Gesture Recognition", "Real Time Systems", "Video Communication", "Gesture Recognition", "Videocommunication Service", "Realtime Application", "Avatar Animation", "User Friendliness", "Avatars", "Animation", "Application Software", "Human Computer Interaction", "Robustness", "Cameras", "Shape", "Head", "Prototypes", "Speech" ], "authors": [ { "affiliation": "Fraunhofer Institute for Telecommunications, Germany", "fullName": "Oliver Schreer", "givenName": "Oliver", "surname": "Schreer", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Telecommunications, Germany", "fullName": "Seinghor Ngongang", "givenName": "Seinghor", "surname": "Ngongang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iciap", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-09-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2007", "issn": null, "isbn": "0-7695-2877-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "28770165", "articleId": "12OmNAWH9E3", "__typename": "AdjacentArticleType" }, "next": { "fno": "28770171", "articleId": "12OmNy3Agwb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2013/6097/0/06550223", "title": "Poster: Gesture-based control of avatars for social TV", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550223/12OmNAGepYr", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2000/0580/0/05800422", "title": "Exploiting Speech/Gesture Co-occurrence for Improving Continuous Gesture Recognition in Weather Narration", "doi": null, "abstractUrl": "/proceedings-article/fg/2000/05800422/12OmNCwUmBP", "parentPublication": { "id": "proceedings/fg/2000/0580/0", "title": "Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (Cat. No. PR00580)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223369", "title": "Human-avatar interaction and recognition memory according to interaction types and methods", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223369/12OmNvEQsfz", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmvit/2017/4993/0/07878718", "title": "Head Gesture Recognition Based on SAE", "doi": null, "abstractUrl": "/proceedings-article/cmvit/2017/07878718/12OmNvxKu48", "parentPublication": { "id": "proceedings/cmvit/2017/4993/0", "title": "2017 International Conference on Machine Vision and Information Technology (CMVIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019348", "title": "Real time hand gesture recognition via finger-emphasized multi-scale description", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019348/12OmNwvVrHD", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/2007/2877/0/28770253", "title": "Vision-based hand shape estimation is a challenging", "doi": null, "abstractUrl": "/proceedings-article/iciap/2007/28770253/12OmNyFU73k", "parentPublication": { "id": "proceedings/iciap/2007/2877/0", "title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paap/2018/9403/0/940300a157", "title": "Gesture Recognition Applications Developed in Embedded Systems", "doi": null, "abstractUrl": "/proceedings-article/paap/2018/940300a157/19JE8a1qkI8", "parentPublication": { "id": "proceedings/paap/2018/9403/0", "title": "2018 9th International Symposium on Parallel Architectures, Algorithms and Programming (PAAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eiect/2022/9956/0/995600a257", "title": "Gesture Recognition Based on Improved AlexNet", "doi": null, "abstractUrl": "/proceedings-article/eiect/2022/995600a257/1LHctSCaYuY", "parentPublication": { "id": "proceedings/eiect/2022/9956/0", "title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ises/2018/9172/0/917200a265", "title": "Real-Time Efficient Detection in Vision Based Static Hand Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/ises/2018/917200a265/1ap5dOxB1U4", "parentPublication": { "id": "proceedings/ises/2018/9172/0", "title": "2018 IEEE International Symposium on Smart Electronic Systems (iSES) (Formerly iNiS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a994", "title": "Sensor Based Dynamic Hand Gesture Recognition by PairNet", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a994/1ehBChsoQyk", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0", "title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBrDqEq", "title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)", "acronym": "cvpr", "groupId": "1000147", "volume": "2", "displayVolume": "3", "year": "2000", "__typename": "ProceedingType" }, "article": { "id": "12OmNrGsDkR", "doi": "10.1109/CVPR.2000.854800", "title": "Gesture, Speech, and Gaze Cues for Discourse Segmentation", "normalizedTitle": "Gesture, Speech, and Gaze Cues for Discourse Segmentation", "abstract": "Psycholinguistic evidence has established the complementary nature of the verbal and non-verbal aspects of human expression. We present our findings in the detection of these cues in interaction. We use the psycholinguistic device known as the “catchment” as the locus of integration of gesture, speech and gaze components. We videotape conversation elicitation experiments in which subjects convey complex spatial plans to an interlocutor using a calibrated three-camera setup. We extract the gestural motion of both hands, gaze direction, and voiced units in the discourse and compare these with transcripts generated by expert microanalysis of the video. Our results show the complementary nature of these communicative modalities. Where there is ambiguity in the structure of one modality (such as in haplologies or owing to noise in the audio signal), other modalities provide evidence for correct segmentation.", "abstracts": [ { "abstractType": "Regular", "content": "Psycholinguistic evidence has established the complementary nature of the verbal and non-verbal aspects of human expression. We present our findings in the detection of these cues in interaction. We use the psycholinguistic device known as the “catchment” as the locus of integration of gesture, speech and gaze components. We videotape conversation elicitation experiments in which subjects convey complex spatial plans to an interlocutor using a calibrated three-camera setup. We extract the gestural motion of both hands, gaze direction, and voiced units in the discourse and compare these with transcripts generated by expert microanalysis of the video. Our results show the complementary nature of these communicative modalities. Where there is ambiguity in the structure of one modality (such as in haplologies or owing to noise in the audio signal), other modalities provide evidence for correct segmentation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Psycholinguistic evidence has established the complementary nature of the verbal and non-verbal aspects of human expression. We present our findings in the detection of these cues in interaction. We use the psycholinguistic device known as the “catchment” as the locus of integration of gesture, speech and gaze components. We videotape conversation elicitation experiments in which subjects convey complex spatial plans to an interlocutor using a calibrated three-camera setup. We extract the gestural motion of both hands, gaze direction, and voiced units in the discourse and compare these with transcripts generated by expert microanalysis of the video. Our results show the complementary nature of these communicative modalities. Where there is ambiguity in the structure of one modality (such as in haplologies or owing to noise in the audio signal), other modalities provide evidence for correct segmentation.", "fno": "06622247", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Francis Quek", "givenName": "Francis", "surname": "Quek", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "David McNeill", "givenName": "David", "surname": "McNeill", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Robert Bryll", "givenName": "Robert", "surname": "Bryll", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Cemil Kirbas", "givenName": "Cemil", "surname": "Kirbas", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hasan Arslan", "givenName": "Hasan", "surname": "Arslan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Karl E. McCullough", "givenName": "Karl E.", "surname": "McCullough", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nobuhiro Furuyama", "givenName": "Nobuhiro", "surname": "Furuyama", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2000-06-01T00:00:00", "pubType": "proceedings", "pages": "2247", "year": "2000", "issn": "1063-6919", "isbn": "0-7695-0662-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06622239", "articleId": "12OmNvTBB3r", "__typename": "AdjacentArticleType" }, "next": { "fno": "06622255", "articleId": "12OmNvIxf00", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRw", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNxzuMCv", "doi": "10.1109/ISMAR.2014.6948491", "title": "[DEMO] G-SIAR: Gesture-speech interface for augmented reality", "normalizedTitle": "[DEMO] G-SIAR: Gesture-speech interface for augmented reality", "abstract": "We demonstrate an Augmented Reality (AR) system that utilizes a combination of direct free hand interaction and indirect multimodal gesture and speech interface. A three-dimensional (3D) design sandbox application, featuring online object creation, has been developed to illustrate the use case of our system that supports dual interaction techniques.", "abstracts": [ { "abstractType": "Regular", "content": "We demonstrate an Augmented Reality (AR) system that utilizes a combination of direct free hand interaction and indirect multimodal gesture and speech interface. A three-dimensional (3D) design sandbox application, featuring online object creation, has been developed to illustrate the use case of our system that supports dual interaction techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We demonstrate an Augmented Reality (AR) system that utilizes a combination of direct free hand interaction and indirect multimodal gesture and speech interface. A three-dimensional (3D) design sandbox application, featuring online object creation, has been developed to illustrate the use case of our system that supports dual interaction techniques.", "fno": "06948491", "keywords": [ "Multimodal Interface", "Augmented Reality", "Natural Interaction" ], "authors": [ { "affiliation": "HITLab NZ, University of Canterbury, New Zealand", "fullName": "Thammathip Piumsomboon", "givenName": "Thammathip", "surname": "Piumsomboon", "__typename": "ArticleAuthorType" }, { "affiliation": "HITLab NZ, University of Canterbury, New Zealand", "fullName": "Adrian Clark", "givenName": "Adrian", "surname": "Clark", "__typename": "ArticleAuthorType" }, { "affiliation": "HITLab NZ, University of Canterbury, New Zealand", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "365-366", "year": "2014", "issn": null, "isbn": "978-1-4799-6184-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06948490", "articleId": "12OmNxETaiN", "__typename": "AdjacentArticleType" }, "next": { "fno": "06948492", "articleId": "12OmNxvNZZT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948411", "title": "Grasp-Shell vs gesture-speech: A comparison of direct and indirect natural interaction techniques in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948411/12OmNA1Vnwp", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmc/2009/3501/3/3501c170", "title": "GIUC: A Gesture Interface for Ubiquitous Computing", "doi": null, "abstractUrl": "/proceedings-article/cmc/2009/3501c170/12OmNBPc8rC", "parentPublication": { "id": "proceedings/cmc/2009/3501/3", "title": "2009 WRI International Conference on Communications and Mobile Computing. CMC 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2006/2503/0/25030275", "title": "Framework for a Portable Gesture Interface", "doi": null, "abstractUrl": "/proceedings-article/fg/2006/25030275/12OmNqBtiV1", "parentPublication": { "id": "proceedings/fg/2006/2503/0", "title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2002/1602/0/16020429", "title": "Real-Time Tracking of Multiple Fingertips and Gesture Recognition for Augmented Desk Interface Systems", "doi": null, "abstractUrl": "/proceedings-article/fg/2002/16020429/12OmNvnfkcc", "parentPublication": { "id": "proceedings/fg/2002/1602/0", "title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2007/1452/0/04373768", "title": "Hambone: A Bio-Acoustic Gesture Interface", "doi": null, "abstractUrl": "/proceedings-article/iswc/2007/04373768/12OmNwe2IuQ", "parentPublication": { "id": "proceedings/iswc/2007/1452/0", "title": "2007 11th IEEE International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isms/2010/3973/0/3973a065", "title": "Preliminary Study on Gesture Recognition for Walking-Stick Interface", "doi": null, "abstractUrl": "/proceedings-article/isms/2010/3973a065/12OmNxWcHat", "parentPublication": { "id": "proceedings/isms/2010/3973/0", "title": "Intelligent Systems, Modelling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvs/2006/2506/0/25060007", "title": "Lexical Gesture Interface", "doi": null, "abstractUrl": "/proceedings-article/icvs/2006/25060007/12OmNyPQ4x1", "parentPublication": { "id": "proceedings/icvs/2006/2506/0", "title": "Fourth IEEE International Conference on Computer Vision Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2014/4335/0/06981030", "title": "A Graph Modeling Strategy for Multi-touch Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2014/06981030/12OmNyQpha3", "parentPublication": { "id": "proceedings/icfhr/2014/4335/0", "title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciis/1999/0446/0/04460328", "title": "Toward Multimodal Interpretation in a Natural Speech/Gesture Interface", "doi": null, "abstractUrl": "/proceedings-article/iciis/1999/04460328/12OmNzuZUnH", "parentPublication": { "id": "proceedings/iciis/1999/0446/0", "title": "Information, Intelligence, and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a228", "title": "Real-Time 3D Hand Gesture Based Mobile Interaction Interface", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a228/1gysl2Y9OUM", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqH9hnj", "title": "2015 30th IEEE/ACM International Conference on Automated Software Engineering Workshop (ASEW)", "acronym": "asew", "groupId": "1002519", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuZUwD", "doi": "10.1109/ASEW.2015.13", "title": "Towards Automatic Constraint Elicitation in Test Design: Preliminary Evaluation Based on Collective Intelligence", "normalizedTitle": "Towards Automatic Constraint Elicitation in Test Design: Preliminary Evaluation Based on Collective Intelligence", "abstract": "Constraint elicitation is an important process in test design since constraints determine the test space, however, the constraint elicitation process, which is known as a daunting task, has not been well studied. It usually requires manual capturing and precise definition of constraints. In this paper, we try to automate a part of the constraint elicitation process in combinatorial test design. Pair-wise testing, a common combinatorial testing approach, is an effective test planning technique for finding interaction faults using a small set of test cases. We propose a collective intelligence approach to determining which value combinations are potential constraints. We conduct preliminary experiments on two examples, a traditional cross-browser testing example and an ATM system example, and evaluate the feasibility of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "Constraint elicitation is an important process in test design since constraints determine the test space, however, the constraint elicitation process, which is known as a daunting task, has not been well studied. It usually requires manual capturing and precise definition of constraints. In this paper, we try to automate a part of the constraint elicitation process in combinatorial test design. Pair-wise testing, a common combinatorial testing approach, is an effective test planning technique for finding interaction faults using a small set of test cases. We propose a collective intelligence approach to determining which value combinations are potential constraints. We conduct preliminary experiments on two examples, a traditional cross-browser testing example and an ATM system example, and evaluate the feasibility of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Constraint elicitation is an important process in test design since constraints determine the test space, however, the constraint elicitation process, which is known as a daunting task, has not been well studied. It usually requires manual capturing and precise definition of constraints. In this paper, we try to automate a part of the constraint elicitation process in combinatorial test design. Pair-wise testing, a common combinatorial testing approach, is an effective test planning technique for finding interaction faults using a small set of test cases. We propose a collective intelligence approach to determining which value combinations are potential constraints. We conduct preliminary experiments on two examples, a traditional cross-browser testing example and an ATM system example, and evaluate the feasibility of our approach.", "fno": "9775a058", "keywords": [ "Browsers", "Testing", "Linux", "Media", "Web Search", "Engines", "Collective Intelligence", "Pair Wise Testing", "Constraints" ], "authors": [ { "affiliation": null, "fullName": "Hiroyuki Nakagawa", "givenName": "Hiroyuki", "surname": "Nakagawa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tatsuhiro Tsuchiya", "givenName": "Tatsuhiro", "surname": "Tsuchiya", "__typename": "ArticleAuthorType" } ], "idPrefix": "asew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-11-01T00:00:00", "pubType": "proceedings", "pages": "58-61", "year": "2015", "issn": null, "isbn": "978-1-4673-9775-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9775a050", "articleId": "12OmNBOllaC", "__typename": "AdjacentArticleType" }, "next": { "fno": "9775a062", "articleId": "12OmNvAiSIs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icstw/2018/6352/0/635201a285", "title": "A Satisfiability-Based Approach to Generation of Constrained Locating Arrays", "doi": null, "abstractUrl": "/proceedings-article/icstw/2018/635201a285/12OmNBTJIJD", "parentPublication": { "id": "proceedings/icstw/2018/6352/0", "title": "2018 IEEE International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2010/719/2/06062228", "title": "Constraint solving techniques for software testing and analysis", "doi": null, "abstractUrl": "/proceedings-article/icse/2010/06062228/12OmNviZlMb", "parentPublication": { "id": "proceedings/icse/2010/719/2", "title": "2010 32nd International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/taicpart-mutation/2007/2984/0/29840111", "title": "Modelling Dynamic Memory Management in Constraint-Based Testing", "doi": null, "abstractUrl": "/proceedings-article/taicpart-mutation/2007/29840111/12OmNwdbV8S", "parentPublication": { "id": "proceedings/taicpart-mutation/2007/2984/0", "title": "Testing: Academic and Industrial Conference Practice and Research Techniques - MUTATION", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2015/7125/0/07102599", "title": "Optimization of Combinatorial Testing by Incremental SAT Solving", "doi": null, "abstractUrl": "/proceedings-article/icst/2015/07102599/12OmNwseESn", "parentPublication": { "id": "proceedings/icst/2015/7125/0", "title": "2015 IEEE 8th International Conference on Software Testing, Verification and Validation (ICST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2016/3845/0/07582796", "title": "Greedy combinatorial test case generation using unsatisfiable cores", "doi": null, "abstractUrl": "/proceedings-article/ase/2016/07582796/12OmNx5GUbA", "parentPublication": { "id": "proceedings/ase/2016/3845/0", "title": "2016 31st IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2015/1885/0/07107432", "title": "Embedded functions in combinatorial test designs", "doi": null, "abstractUrl": "/proceedings-article/icstw/2015/07107432/12OmNxzuMBQ", "parentPublication": { "id": "proceedings/icstw/2015/1885/0", "title": "2015 IEEE Eighth International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icst/2013/4968/0/4968a242", "title": "An Efficient Algorithm for Constraint Handling in Combinatorial Test Generation", "doi": null, "abstractUrl": "/proceedings-article/icst/2013/4968a242/12OmNyKrH7J", "parentPublication": { "id": "proceedings/icst/2013/4968/0", "title": "2013 IEEE Sixth International Conference on Software Testing, Verification and Validation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/1998/8245/5/82450134", "title": "Approximate Constraint Satisfaction Over a Constraint Hierarchy: A Preliminary Study", "doi": null, "abstractUrl": "/proceedings-article/hicss/1998/82450134/12OmNzSh11Q", "parentPublication": { "id": "proceedings/hicss/1998/8245/5", "title": "Proceedings of the Thirty-First Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ret/2015/7073/0/7073a034", "title": "Towards Automatic Constraints Elicitation in Pair-Wise Testing Based on a Linguistic Approach: Elicitation Support Using Coupling Strength", "doi": null, "abstractUrl": "/proceedings-article/ret/2015/7073a034/12OmNzayNjH", "parentPublication": { "id": "proceedings/ret/2015/7073/0", "title": "2015 IEEE/ACM 2nd International Workshop on Requirements Engineering and Testing (RET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rew/2022/6000/0/600000a006", "title": "Environment Assertion Driven Requirements Elicitation: A Preliminary Study", "doi": null, "abstractUrl": "/proceedings-article/rew/2022/600000a006/1HCV9ulnu80", "parentPublication": { "id": "proceedings/rew/2022/6000/0", "title": "2022 IEEE 30th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0Qs2rZCg", "doi": "10.1109/VR.2019.8798105", "title": "Selection and Manipulation Whole-Body Gesture Elicitation Study In Virtual Reality", "normalizedTitle": "Selection and Manipulation Whole-Body Gesture Elicitation Study In Virtual Reality", "abstract": "We present a whole-body gesture elicitation study using Head Mounted Displays, including a legacy bias reduction. The motivation for this study was to understand the type of gesture agreement rates for selection and manipulation interactions and to improve the user experience for whole-body interactions. We looked at 23 participants and 20 distinct referents (with multiple gestures per referent). We found that regardless of the production technique used to remove legacy bias, legacy bias was still found in some of the produced gestures. In some instances, gestures were derived from previous interactions but were still appropriate for the environment presented. This study provides a rich set of information and useful recommendations for future designers and/or developers.", "abstracts": [ { "abstractType": "Regular", "content": "We present a whole-body gesture elicitation study using Head Mounted Displays, including a legacy bias reduction. The motivation for this study was to understand the type of gesture agreement rates for selection and manipulation interactions and to improve the user experience for whole-body interactions. We looked at 23 participants and 20 distinct referents (with multiple gestures per referent). We found that regardless of the production technique used to remove legacy bias, legacy bias was still found in some of the produced gestures. In some instances, gestures were derived from previous interactions but were still appropriate for the environment presented. This study provides a rich set of information and useful recommendations for future designers and/or developers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a whole-body gesture elicitation study using Head Mounted Displays, including a legacy bias reduction. The motivation for this study was to understand the type of gesture agreement rates for selection and manipulation interactions and to improve the user experience for whole-body interactions. We looked at 23 participants and 20 distinct referents (with multiple gestures per referent). We found that regardless of the production technique used to remove legacy bias, legacy bias was still found in some of the produced gestures. In some instances, gestures were derived from previous interactions but were still appropriate for the environment presented. This study provides a rich set of information and useful recommendations for future designers and/or developers.", "fno": "08798105", "keywords": [ "Gesture Recognition", "Helmet Mounted Displays", "Human Computer Interaction", "Virtual Reality", "Whole Body Interactions", "Manipulation Interactions", "Gesture Agreement Rates", "Legacy Bias Reduction", "Whole Body Gesture Elicitation Study", "Virtual Reality", "Legacy Bias", "Head Mounted Displays", "Production", "Virtual Reality", "User Interfaces", "Three Dimensional Displays", "Resists", "Headphones", "User Experience", "Gesture Elicitation X 2014 Gestures X 2014 Virtual Reality X 2014 Whole Body" ], "authors": [ { "affiliation": "Colorado State University", "fullName": "Francisco R. Ortega", "givenName": "Francisco R.", "surname": "Ortega", "__typename": "ArticleAuthorType" }, { "affiliation": "Florida International University", "fullName": "Katherine Tarre", "givenName": "Katherine", "surname": "Tarre", "__typename": "ArticleAuthorType" }, { "affiliation": "Florida International University", "fullName": "Mathew Kress", "givenName": "Mathew", "surname": "Kress", "__typename": "ArticleAuthorType" }, { "affiliation": "Colorado State University", "fullName": "Adam S. Williams", "givenName": "Adam S.", "surname": "Williams", "__typename": "ArticleAuthorType" }, { "affiliation": "Florida International University", "fullName": "Armando B. Barreto", "givenName": "Armando B.", "surname": "Barreto", "__typename": "ArticleAuthorType" }, { "affiliation": "Florida International University", "fullName": "Naphtali D. Rishe", "givenName": "Naphtali D.", "surname": "Rishe", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1723-1728", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797982", "articleId": "1cJ0VffBzGw", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797740", "articleId": "1cJ196OGdJm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2006/2521/4/252140774", "title": "Human-Robot Interaction by Whole Body Gesture Spotting and Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252140774/12OmNAqU4UU", "parentPublication": { "id": "proceedings/icpr/2006/2521/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550189", "title": "User-defined gestural interaction: A study on gesture memorization", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550189/12OmNBoNroS", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2017/6716/0/07893331", "title": "Gesture elicitation for 3D travel via multi-touch and mid-Air systems for procedurally generated pseudo-universe", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893331/12OmNBpVQ5U", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2006/2503/0/25030243", "title": "A Full-Body Gesture Database for Automatic Gesture Recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2006/25030243/12OmNvzJG1E", "parentPublication": { "id": "proceedings/fg/2006/2503/0", "title": "7th International Conference on Automatic Face and Gesture Recognition (FGR06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504707", "title": "Depth-based 3D gesture multi-level radial menu for virtual object manipulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504707/12OmNx3HI96", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798182", "title": "Selection and Manipulation Whole-Body Gesture Elicitation Study in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798182/1cJ0GVPhN96", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199555", "title": "Understanding Multimodal User Gesture and Speech Behavior for Object Manipulation in Augmented Reality Using Elicitation", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199555/1ncgzvoHSBG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a374", "title": "Face Commands - User-Defined Facial Gestures for Smart Glasses", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a374/1pysuXX1aBq", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382928", "title": "GestOnHMD: Enabling Gesture-based Interaction on Low-cost VR Head-Mounted Display", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382928/1saZuaAmvlu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a778", "title": "Evaluating Object Manipulation Interaction Techniques in Mixed Reality: Tangible User Interfaces and Gesture", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a778/1tuBngWRAC4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1kecGUxjQC4", "title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)", "acronym": "fg", "groupId": "1002160", "volume": "0", "displayVolume": "1", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1kecI7e5QCA", "doi": "10.1109/FG47880.2020.00043", "title": "Gesture Agreement Assessment Using Description Vectors", "normalizedTitle": "Gesture Agreement Assessment Using Description Vectors", "abstract": "Participatory design is a popular design technique that involves the end-users in the early stages of the design process to obtain user-friendly gestural interfaces. Guessability studies followed by agreement analyses are often used to elicit and comprehend the preferences (gestures/proposals) of the participants. Previous approaches to assess agreement, grouped the gestures into equivalence classes and ignored the integral properties that are shared between them. In this work, we represent the gestures using binary description vectors to allow them to be partially similar. In this context, we introduce a new metric referred to as a soft agreement rate (SAR) to quantify the level of consensus between the participants. In addition, we performed computational experiments to study the behavior of our partial agreement formula and mathematically show that existing agreement metrics are a special case of our approach. Our methodology was evaluated through a gesture elicitation study conducted with a group of neurosurgeons. Nevertheless, our formulation can be applied to any other user-elicitation study. Results show that the level of agreement obtained by SAR metric is 2.64 times higher than the existing metrics. In addition to the most agreed gesture, SAR formulation also provides the mostly agreed descriptors which can potentially help the designers to come up with a final gesture set.", "abstracts": [ { "abstractType": "Regular", "content": "Participatory design is a popular design technique that involves the end-users in the early stages of the design process to obtain user-friendly gestural interfaces. Guessability studies followed by agreement analyses are often used to elicit and comprehend the preferences (gestures/proposals) of the participants. Previous approaches to assess agreement, grouped the gestures into equivalence classes and ignored the integral properties that are shared between them. In this work, we represent the gestures using binary description vectors to allow them to be partially similar. In this context, we introduce a new metric referred to as a soft agreement rate (SAR) to quantify the level of consensus between the participants. In addition, we performed computational experiments to study the behavior of our partial agreement formula and mathematically show that existing agreement metrics are a special case of our approach. Our methodology was evaluated through a gesture elicitation study conducted with a group of neurosurgeons. Nevertheless, our formulation can be applied to any other user-elicitation study. Results show that the level of agreement obtained by SAR metric is 2.64 times higher than the existing metrics. In addition to the most agreed gesture, SAR formulation also provides the mostly agreed descriptors which can potentially help the designers to come up with a final gesture set.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Participatory design is a popular design technique that involves the end-users in the early stages of the design process to obtain user-friendly gestural interfaces. Guessability studies followed by agreement analyses are often used to elicit and comprehend the preferences (gestures/proposals) of the participants. Previous approaches to assess agreement, grouped the gestures into equivalence classes and ignored the integral properties that are shared between them. In this work, we represent the gestures using binary description vectors to allow them to be partially similar. In this context, we introduce a new metric referred to as a soft agreement rate (SAR) to quantify the level of consensus between the participants. In addition, we performed computational experiments to study the behavior of our partial agreement formula and mathematically show that existing agreement metrics are a special case of our approach. Our methodology was evaluated through a gesture elicitation study conducted with a group of neurosurgeons. Nevertheless, our formulation can be applied to any other user-elicitation study. Results show that the level of agreement obtained by SAR metric is 2.64 times higher than the existing metrics. In addition to the most agreed gesture, SAR formulation also provides the mostly agreed descriptors which can potentially help the designers to come up with a final gesture set.", "fno": "307900a291", "keywords": [ "Gesture Recognition", "Human Computer Interaction", "Vectors", "Soft Agreement Rate", "Partial Agreement Formula", "Agreement Metrics", "Gesture Elicitation Study", "User Elicitation Study", "SAR Metric", "Agreed Gesture", "SAR Formulation", "Final Gesture", "Gesture Agreement Assessment", "Participatory Design", "Popular Design Technique", "Design Process", "User Friendly Gestural Interfaces", "Guessability Studies", "Agreement Analyses", "Equivalence Classes", "Integral Properties", "Binary Description Vectors", "Proposals", "Measurement", "Shape", "National Institutes Of Health", "Indexes", "Radiology", "Particle Measurements", "Gestures Agreement Analysis Semantic Descriptors And Gesture Elicitation" ], "authors": [ { "affiliation": "Purdue University,School of Industrial Engineering,West Lafayette,USA", "fullName": "Naveen Madapana", "givenName": "Naveen", "surname": "Madapana", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University,School of Industrial Engineering,West Lafayette,USA", "fullName": "Glebys Gonzalez", "givenName": "Glebys", "surname": "Gonzalez", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University,School of Industrial Engineering,West Lafayette,USA", "fullName": "Juan Wachs", "givenName": "Juan", "surname": "Wachs", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "40-44", "year": "2020", "issn": null, "isbn": "978-1-7281-3079-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "307900a085", "articleId": "1kecHT7g7JK", "__typename": "AdjacentArticleType" }, "next": { "fno": "307900a158", "articleId": "1kecHY8VGyA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2017/4023/0/4023a784", "title": "One-Shot Gesture Recognition: One Step Towards Adaptive Learning", "doi": null, "abstractUrl": "/proceedings-article/fg/2017/4023a784/12OmNAqCtLO", "parentPublication": { "id": "proceedings/fg/2017/4023/0", "title": "2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uic-atc-scalcom/2014/7646/0/7646a362", "title": "Defining and Analyzing a Gesture Set for Interactive TV Remote on Touchscreen Phones", "doi": null, "abstractUrl": "/proceedings-article/uic-atc-scalcom/2014/7646a362/12OmNBOllrx", "parentPublication": { "id": "proceedings/uic-atc-scalcom/2014/7646/0", "title": "2014 IEEE 11th Intl Conf on Ubiquitous Intelligence & Computing and 2014 IEEE 11th Intl Conf on Autonomic & Trusted Computing and 2014 IEEE 14th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ratfg-rts/2001/1074/0/10740157", "title": "Auto Clustering for Unsupervised Learning of Atomic Gesture Components Using Minimum Description Length", "doi": null, "abstractUrl": "/proceedings-article/ratfg-rts/2001/10740157/12OmNxIRxSW", "parentPublication": { "id": "proceedings/ratfg-rts/2001/1074/0", "title": "Recognition, Analysis, &amp; Tracking of Faces &amp; Gestures in Real -Time Systems, IEEE ICCV Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813448", "title": "Real-time 3D pointing gesture recognition in mobile space", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813448/12OmNzahc3t", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robio/2006/0570/0/04141834", "title": "A Gesture Model and Its Applications in Risk Assessment for Misrecognition", "doi": null, "abstractUrl": "/proceedings-article/robio/2006/04141834/12OmNzt0IxV", "parentPublication": { "id": "proceedings/robio/2006/0570/0", "title": "IEEE International Conference on Robotics and Biomimetics - ROBIO2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2018/01/mpc2018010027", "title": "The Impact of Low Vision on Touch-Gesture Articulation on Mobile Devices", "doi": null, "abstractUrl": "/magazine/pc/2018/01/mpc2018010027/13rRUwj7cst", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2021/02/08493586", "title": "Survey on Emotional Body Gesture Recognition", "doi": null, "abstractUrl": "/journal/ta/2021/02/08493586/14qdcQU04il", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756534", "title": "MAGIC: A Fundamental Framework for Gesture Representation, Comparison and Assessment", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756534/1bzYtPdVFqE", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798182", "title": "Selection and Manipulation Whole-Body Gesture Elicitation Study in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798182/1cJ0GVPhN96", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798105", "title": "Selection and Manipulation Whole-Body Gesture Elicitation Study In Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798105/1cJ0Qs2rZCg", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNsbGvCZ", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "acronym": "isvri", "groupId": "1800344", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNAXxX00", "doi": "10.1109/ISVRI.2011.5759609", "title": "Effects of hand feedback fidelity on near space pointing performance and user acceptance", "normalizedTitle": "Effects of hand feedback fidelity on near space pointing performance and user acceptance", "abstract": "In this paper, we report on an experiment conducted to test the effects of different hand representations on near space pointing performance and user preference. Subjects were presented with varying levels of hand realism, including real hand video, a high and a low level 3D hand model and an ordinary 3D pointer arrow. Behavioural data revealed that an abstract hand substitute like a 3D pointer arrow leads to significantly larger position estimation errors in terms of lateral target overshooting when touching virtual surfaces with only visual hand movement constraints. Further, questionnaire results show that a higher fidelity hand is preferred over lower fidelity representations for different aspects of the task. But we cannot conclude that realtime video feedback of the own hand is better rated than a high level static 3D hand model. Overall, these results, which largely confirm previous research, suggest that, although a higher fidelity feedback of the hand is desirable from an user acceptance point of view, motor performance seems not to be affected by varying degrees of limb realism - as long as a hand-like shape is provided.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we report on an experiment conducted to test the effects of different hand representations on near space pointing performance and user preference. Subjects were presented with varying levels of hand realism, including real hand video, a high and a low level 3D hand model and an ordinary 3D pointer arrow. Behavioural data revealed that an abstract hand substitute like a 3D pointer arrow leads to significantly larger position estimation errors in terms of lateral target overshooting when touching virtual surfaces with only visual hand movement constraints. Further, questionnaire results show that a higher fidelity hand is preferred over lower fidelity representations for different aspects of the task. But we cannot conclude that realtime video feedback of the own hand is better rated than a high level static 3D hand model. Overall, these results, which largely confirm previous research, suggest that, although a higher fidelity feedback of the hand is desirable from an user acceptance point of view, motor performance seems not to be affected by varying degrees of limb realism - as long as a hand-like shape is provided.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we report on an experiment conducted to test the effects of different hand representations on near space pointing performance and user preference. Subjects were presented with varying levels of hand realism, including real hand video, a high and a low level 3D hand model and an ordinary 3D pointer arrow. Behavioural data revealed that an abstract hand substitute like a 3D pointer arrow leads to significantly larger position estimation errors in terms of lateral target overshooting when touching virtual surfaces with only visual hand movement constraints. Further, questionnaire results show that a higher fidelity hand is preferred over lower fidelity representations for different aspects of the task. But we cannot conclude that realtime video feedback of the own hand is better rated than a high level static 3D hand model. Overall, these results, which largely confirm previous research, suggest that, although a higher fidelity feedback of the hand is desirable from an user acceptance point of view, motor performance seems not to be affected by varying degrees of limb realism - as long as a hand-like shape is provided.", "fno": "05759609", "keywords": [ "Ergonomics", "Gesture Recognition", "Motion Estimation", "Solid Modelling", "Virtual Reality", "Hand Feedback Fidelity", "User Acceptance", "Hand Representation", "Real Hand Video", "3 D Pointer Arrow", "3 D Hand Model", "Position Estimation Error", "Target Overshooting", "Virtual Surface", "Near Space Pointing Performance", "User Preference", "Three Dimensional Displays", "Visualization", "Solid Modeling", "Accuracy", "Biological System Modeling", "Correlation", "Stability Analysis", "Near Space Interaction", "Co Location", "Limb Attribution", "Perception", "Video See Through Head Mounted Display", "Hand Displacement", "Visuo Proprioceptive Sensory Conflict" ], "authors": [ { "affiliation": "INRIA Grenoble Rhône-Alpes - LIG, 655, av. de l'Europe, 38334 St. Ismier Cedex, France", "fullName": "Andreas Pusch", "givenName": "Andreas", "surname": "Pusch", "__typename": "ArticleAuthorType" }, { "affiliation": "INRIA Grenoble Rhône-Alpes - LIG, 655, av. de l'Europe, 38334 St. Ismier Cedex, France", "fullName": "Olivier Martin", "givenName": "Olivier", "surname": "Martin", "__typename": "ArticleAuthorType" }, { "affiliation": "INRIA Grenoble Rhône-Alpes - LIG, 655, av. de l'Europe, 38334 St. Ismier Cedex, France", "fullName": "Sabine Coquillart", "givenName": "Sabine", "surname": "Coquillart", "__typename": "ArticleAuthorType" } ], "idPrefix": "isvri", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "97-102", "year": "2011", "issn": null, "isbn": "978-1-4577-0054-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05759608", "articleId": "12OmNyYDDJc", "__typename": "AdjacentArticleType" }, "next": { "fno": "05759610", "articleId": "12OmNx3q71Z", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504731", "title": "The effect of realism on the virtual hand illusion", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504731/12OmNxu6p9n", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892328", "title": "Effect on high versus low fidelity haptic feedback in a virtual reality baseball simulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892328/12OmNym2bPM", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802073", "title": "Virtual speech anxiety training — Effects of simulation fidelity on user experience", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802073/12OmNz6iOxk", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08260967", "title": "Evaluating Multiple Levels of an Interaction Fidelity Continuum on Performance and Learning in Near-Field Training Simulations", "doi": null, "abstractUrl": "/journal/tg/2018/04/08260967/13rRUyv53FB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2909", "title": "I2UV-HandNet: Image-to-UV Prediction Network for Accurate and High-fidelity 3D Hand Mesh Modeling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2909/1BmKRytjd2o", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600b516", "title": "NeuralHDHair: Automatic High-fidelity Hair Modeling from a Single Image Using Implicit Neural Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600b516/1H1lkq5sTPq", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797996", "title": "Visualizing Natural Environments from Data in Virtual Reality: Combining Realism and Uncertainty", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797996/1cJ0PzrdQmQ", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b067", "title": "Pushing the Envelope for RGB-Based Dense 3D Hand Pose Estimation via Neural Rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b067/1gyreIktw6Q", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a064", "title": "The Effects of Object Shape, Fidelity, Color, and Luminance on Depth Perception in Handheld Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a064/1pysxPMqyTm", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100a842", "title": "Single Patch Based 3D High-Fidelity Mask Face Anti-Spoofing", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100a842/1yNi7Cgpgv6", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxaw597", "title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)", "acronym": "icat", "groupId": "1002979", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNBbsihA", "doi": "10.1109/ICAT.2017.8171621", "title": "Switching design for the observation of the biomass in alcoholic fermentation processes", "normalizedTitle": "Switching design for the observation of the biomass in alcoholic fermentation processes", "abstract": "In the present paper a bank of local linear observers is designed for an alcoholic fermentation process. The design is based on the respective linear approximants of the mathematical description of the nonlinear process at particular operating points. The target and tolerance operating areas of the process are determined. A residual of the performance of the observers is introduced. A switching mechanism is developed on the basis of the minimal residual. The performance of the proposed observer design scheme is illustrated using simulation results.", "abstracts": [ { "abstractType": "Regular", "content": "In the present paper a bank of local linear observers is designed for an alcoholic fermentation process. The design is based on the respective linear approximants of the mathematical description of the nonlinear process at particular operating points. The target and tolerance operating areas of the process are determined. A residual of the performance of the observers is introduced. A switching mechanism is developed on the basis of the minimal residual. The performance of the proposed observer design scheme is illustrated using simulation results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the present paper a bank of local linear observers is designed for an alcoholic fermentation process. The design is based on the respective linear approximants of the mathematical description of the nonlinear process at particular operating points. The target and tolerance operating areas of the process are determined. A residual of the performance of the observers is introduced. A switching mechanism is developed on the basis of the minimal residual. The performance of the proposed observer design scheme is illustrated using simulation results.", "fno": "08171621", "keywords": [ "Observers", "Switches", "Stability Criteria", "Biomass", "Linear Systems", "Biological System Modeling", "Observers", "Process Control", "Supervisory Design", "Switching Systems" ], "authors": [ { "affiliation": "Department of Automation Engineering, Technological Education Institute of Sterea Ellada, 34400, Psahna Evias, Greece", "fullName": "Fotis N. Koumboulis", "givenName": "Fotis N.", "surname": "Koumboulis", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Automation Engineering, Technological Education Institute of Sterea Ellada, 34400, Psahna Evias, Greece", "fullName": "Dimitris G. Fragkoulis", "givenName": "Dimitris G.", "surname": "Fragkoulis", "__typename": "ArticleAuthorType" } ], "idPrefix": "icat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2017", "issn": null, "isbn": "978-1-5386-3337-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08171620", "articleId": "12OmNBp52Gc", "__typename": "AdjacentArticleType" }, "next": { "fno": "08171622", "articleId": "12OmNxcMSl2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/med/2006/1/0/04124911", "title": "Method for Optimal Control Calculation of a Fed-batch Fermentation Process", "doi": null, "abstractUrl": "/proceedings-article/med/2006/04124911/12OmNqBbHBK", "parentPublication": { "id": "proceedings/med/2006/1/0", "title": "Proceedings of the 14th Mediterranean Conference on Control and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eurosim/2013/5073/0/5073a506", "title": "Modelling and Simulation of Biomass Conversion Processes", "doi": null, "abstractUrl": "/proceedings-article/eurosim/2013/5073a506/12OmNqI04OA", "parentPublication": { "id": "proceedings/eurosim/2013/5073/0", "title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cerma/2008/3320/0/3320a319", "title": "Fault Detection and Isolation Problem: Sliding Mode Fuzzy Observers and Linear Integer Programming", "doi": null, "abstractUrl": "/proceedings-article/cerma/2008/3320a319/12OmNrAdsBb", "parentPublication": { "id": "proceedings/cerma/2008/3320/0", "title": "Electronics, Robotics and Automotive Mechanics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2018/2205/0/08402740", "title": "The alcoholic fermentation process temperature automatic control", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2018/08402740/12OmNxTEiPv", "parentPublication": { "id": "proceedings/aqtr/2018/2205/0", "title": "2018 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ettandgrs/2008/3563/2/3563b207", "title": "Mapping Paddy Rice Biomass Using ALOS/PALSAR Imagery", "doi": null, "abstractUrl": "/proceedings-article/ettandgrs/2008/3563b207/12OmNxw5BaP", "parentPublication": { "id": "ettandgrs/2008/3563/2", "title": "Education Technology and Training &amp; Geoscience and Remote Sensing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2010/6724/1/05520887", "title": "Application of Bond Graph modeling on a fed-batch alcoholic fermentation bioprocess", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2010/05520887/12OmNyL0Txo", "parentPublication": { "id": "proceedings/aqtr/2010/6724/1", "title": "International Conference on Automation, Quality and Testing, Robotics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031a982", "title": "Fault Detection and Isolation Based on Optimal Fault-Tolerant Observers for Linear System", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031a982/12OmNyuya6P", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2012/0701/0/06237668", "title": "Switching fuzzy observers for periodic TS systems", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2012/06237668/12OmNzuIjiV", "parentPublication": { "id": "proceedings/aqtr/2012/0701/0", "title": "International Conference on Automation, Quality and Testing, Robotics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000a859", "title": "Observer-Based Control of Bilateral Teleoperation with Time Delay", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000a859/17D45WKWnIP", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2020/7303/0/730300a036", "title": "Improving the Behavior of Evasive Targets in Cooperative Target Observation", "doi": null, "abstractUrl": "/proceedings-article/compsac/2020/730300a036/1nkDgXnsFQk", "parentPublication": { "id": "proceedings/compsac/2020/7303/0", "title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwCJOXn", "title": "3D User Interfaces (3DUI'06)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNwHQBbr", "doi": "10.1109/VR.2006.127", "title": "The Bent Pick Ray: An Extended Pointing Technique for Multi-User Interaction", "normalizedTitle": "The Bent Pick Ray: An Extended Pointing Technique for Multi-User Interaction", "abstract": "This paper presents a collaborative pointing technique for colocated multi-user interaction in projection-based virtual environments. Our approach uses bent pick rays to allow users to collaboratively work together without locking objects. Moreover, a user can manipulate distant objects in immediate reach, using a Scaled- Grab technique. The main purpose of the bent pick ray is to provide continuous visual user feedback, keeping a user informed about the collaborative manipulation.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a collaborative pointing technique for colocated multi-user interaction in projection-based virtual environments. Our approach uses bent pick rays to allow users to collaboratively work together without locking objects. Moreover, a user can manipulate distant objects in immediate reach, using a Scaled- Grab technique. The main purpose of the bent pick ray is to provide continuous visual user feedback, keeping a user informed about the collaborative manipulation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a collaborative pointing technique for colocated multi-user interaction in projection-based virtual environments. Our approach uses bent pick rays to allow users to collaboratively work together without locking objects. Moreover, a user can manipulate distant objects in immediate reach, using a Scaled- Grab technique. The main purpose of the bent pick ray is to provide continuous visual user feedback, keeping a user informed about the collaborative manipulation.", "fno": "02250062", "keywords": [ "Pointing Technique", "3 D Interaction", "Multi User Input", "Manipulation", "Collaboration", "3 D User Interfaces", "Virtual Environments" ], "authors": [ { "affiliation": "Fraunhofer Institute for Media Communication", "fullName": "Kai Riege", "givenName": "Kai", "surname": "Riege", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Media Communication", "fullName": "Thorsten Holtkamper", "givenName": "Thorsten", "surname": "Holtkamper", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Media Communication", "fullName": "Gerold Wesche", "givenName": "Gerold", "surname": "Wesche", "__typename": "ArticleAuthorType" }, { "affiliation": "Bauhaus University Weimar", "fullName": "Bernd Frohlich", "givenName": "Bernd", "surname": "Frohlich", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-03-01T00:00:00", "pubType": "proceedings", "pages": "62-65", "year": "2006", "issn": null, "isbn": "1-4244-0225-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "02250053", "articleId": "12OmNBZpH8h", "__typename": "AdjacentArticleType" }, "next": { "fno": "02250066", "articleId": "12OmNxWLTv7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2006/0225/0/02250069", "title": "SkeweR: a 3D Interaction Technique for 2-User Collaborative Manipulation of Objects in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2006/02250069/12OmNBV9Ibb", "parentPublication": { "id": "proceedings/3dui/2006/0225/0", "title": "3D User Interfaces (3DUI'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dexa/2009/3763/0/3763a289", "title": "Scene Extraction for Video Clips Based on the Relation of Text, Pointing Region and Temporal Duration of User Comments", "doi": null, "abstractUrl": "/proceedings-article/dexa/2009/3763a289/12OmNBc1uwI", "parentPublication": { "id": "proceedings/dexa/2009/3763/0", "title": "2009 20th International Workshop on Database and Expert Systems Application. DEXA 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2010/4124/0/4124a040", "title": "ARtalet: Tangible User Interface Based Immersive Augmented Reality Authoring Tool for Digilog Book", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2010/4124a040/12OmNrEL2B8", "parentPublication": { "id": "proceedings/isuvr/2010/4124/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscst/2005/2387/0/01553308", "title": "AI workflow management in a collaborative environment", "doi": null, "abstractUrl": "/proceedings-article/iscst/2005/01553308/12OmNs5rla4", "parentPublication": { "id": "proceedings/iscst/2005/2387/0", "title": "2005 International Symposium on Collaborative Technologies and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ozchi/1998/9206/0/92060014", "title": "Supporting 3D Warping Visual Feedback for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ozchi/1998/92060014/12OmNvkYx9b", "parentPublication": { "id": "proceedings/ozchi/1998/9206/0", "title": "Computer-Human Interaction, Australasian Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2007/0907/0/04142838", "title": "The Visual Appearance of User?s Avatar Can Influence the Manipulation of Both Real Devices and Virtual Objects", "doi": null, "abstractUrl": "/proceedings-article/3dui/2007/04142838/12OmNwErpsL", "parentPublication": { "id": "proceedings/3dui/2007/0907/0", "title": "2007 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2010/6846/0/05444729", "title": "Keynote address: Interactive \"smart\" computers", "doi": null, "abstractUrl": "/proceedings-article/3dui/2010/05444729/12OmNyqRn58", "parentPublication": { "id": "proceedings/3dui/2010/6846/0", "title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2007/0907/0/04142848", "title": "Cascading Hand and Eye Movement for Augmented Reality Videoconferencing", "doi": null, "abstractUrl": "/proceedings-article/3dui/2007/04142848/12OmNyrIatw", "parentPublication": { "id": "proceedings/3dui/2007/0907/0", "title": "2007 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dexa/2008/3299/0/3299a150", "title": "Organizing and Navigating User-Generated Content for the Web: A Collaborative Approach Based on a 3D Multimodal Interactive Environment", "doi": null, "abstractUrl": "/proceedings-article/dexa/2008/3299a150/12OmNzE54vT", "parentPublication": { "id": "proceedings/dexa/2008/3299/0", "title": "2008 19th International Workshop on Database and Expert Systems Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122236", "title": "WYSIWYP: What You See Is What You Pick", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122236/13rRUEgarnI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqDCE9Xsk", "title": "2019 12th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "acronym": "icicta", "groupId": "1002487", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hQqGl3oS3u", "doi": "10.1109/ICICTA49267.2019.00159", "title": "X-ray Measurement System Based on Attenuation Transmission Method for Tokamak", "normalizedTitle": "X-ray Measurement System Based on Attenuation Transmission Method for Tokamak", "abstract": "A novel X-ray energy spectrum measurement system for tokamak is developed. The system is based on attenuation transmission method by the means of aluminum bars attenuating incident X-rays. The detection module consists of LYSO, attenuated aluminum bars of eight different lengths and collimating apertures and they are all integrated in a shield block composed of lead. The particle model has been carefully established by Geant4 and the whole transmission process is simulated. The preamplifier and data acquisition (DAQ) module for detector output signals processing provide great convenience for system measurement. Real-time, parallel and pipelined algorithms are implemented in the high-performance field-programmable gate array (FPGA). The system has been test in laboratory and excellent results are obtained. Compared with the simulated result, reconstructed energy spectrum is in good agreement with it, proving the reliability and practicability of the system.", "abstracts": [ { "abstractType": "Regular", "content": "A novel X-ray energy spectrum measurement system for tokamak is developed. The system is based on attenuation transmission method by the means of aluminum bars attenuating incident X-rays. The detection module consists of LYSO, attenuated aluminum bars of eight different lengths and collimating apertures and they are all integrated in a shield block composed of lead. The particle model has been carefully established by Geant4 and the whole transmission process is simulated. The preamplifier and data acquisition (DAQ) module for detector output signals processing provide great convenience for system measurement. Real-time, parallel and pipelined algorithms are implemented in the high-performance field-programmable gate array (FPGA). The system has been test in laboratory and excellent results are obtained. Compared with the simulated result, reconstructed energy spectrum is in good agreement with it, proving the reliability and practicability of the system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel X-ray energy spectrum measurement system for tokamak is developed. The system is based on attenuation transmission method by the means of aluminum bars attenuating incident X-rays. The detection module consists of LYSO, attenuated aluminum bars of eight different lengths and collimating apertures and they are all integrated in a shield block composed of lead. The particle model has been carefully established by Geant4 and the whole transmission process is simulated. The preamplifier and data acquisition (DAQ) module for detector output signals processing provide great convenience for system measurement. Real-time, parallel and pipelined algorithms are implemented in the high-performance field-programmable gate array (FPGA). The system has been test in laboratory and excellent results are obtained. Compared with the simulated result, reconstructed energy spectrum is in good agreement with it, proving the reliability and practicability of the system.", "fno": "428400a730", "keywords": [ "Fusion Reactor Instrumentation", "Tokamak Devices", "Pipelined Algorithm", "Parallel Algorithm", "Detector Output Signal Processing", "Data Acquisition Module", "Geant 4", "Particle Model", "LYSO", "Tokamak", "Reconstructed Energy Spectrum", "High Performance Field Programmable Gate Array", "System Measurement", "Preamplifier", "Transmission Process", "Collimating Apertures", "Attenuated Aluminum Bars", "Detection Module", "Incident X Rays", "X Ray Energy Spectrum Measurement System", "Attenuation Transmission Method", "Detectors", "Aluminum", "Bars", "Attenuation", "Energy Measurement", "Field Programmable Gate Arrays", "Data Acquisition", "X Ray", "Attenuation Transmission Method", "Energy Spectrum", "High Flux", "FPGA" ], "authors": [ { "affiliation": "University of Science and Technology of China, Hefei, China", "fullName": "Weiwei Fan", "givenName": "Weiwei", "surname": "Fan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Science and Technology of China, Hefei, China", "fullName": "Zejie Yin", "givenName": "Zejie", "surname": "Yin", "__typename": "ArticleAuthorType" } ], "idPrefix": "icicta", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "730-733", "year": "2019", "issn": null, "isbn": "978-1-7281-4284-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "428400a724", "articleId": "1hQqIvRrF3a", "__typename": "AdjacentArticleType" }, "next": { "fno": "428400a734", "articleId": "1hQqJKH3VVS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dsd/2013/2978/0/06628324", "title": "FPGA Based Real-Time Data Processing DAQ System for the Mercury Imaging X-Ray Spectrometer", "doi": null, "abstractUrl": "/proceedings-article/dsd/2013/06628324/12OmNB8kHU8", "parentPublication": { "id": "proceedings/dsd/2013/2978/0", "title": "2013 Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567349", "title": "Optimisation of X-ray harmonic generation in capillaries by pulse shaping using an acousto-optic programmable filter", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567349/12OmNvq5jw5", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2011/2135/0/06121045", "title": "Automated Inspection Using X-Ray Imaging", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2011/06121045/12OmNwDSdFg", "parentPublication": { "id": "proceedings/trustcom/2011/2135/0", "title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2012/4608/0/4608a583", "title": "The Multi-resolution Technology for the X-ray Coherent Scatter Image", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608a583/12OmNwFidfn", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2012/4467/0/06404466", "title": "GridFTP based real-time data movement architecture for x-ray photon correlation spectroscopy at the Advanced Photon Source", "doi": null, "abstractUrl": "/proceedings-article/e-science/2012/06404466/12OmNxVDuTK", "parentPublication": { "id": "proceedings/e-science/2012/4467/0", "title": "2012 IEEE 8th International Conference on E-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/3/73103037", "title": "Camera calibration for 2.5-D X-ray metrology", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73103037/12OmNxZTtHj", "parentPublication": { "id": "proceedings/icip/1995/7310/3", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2014/4871/0/06918699", "title": "Automated detection of cars in transmission X-ray images of freight containers", "doi": null, "abstractUrl": "/proceedings-article/avss/2014/06918699/12OmNy1SFIG", "parentPublication": { "id": "proceedings/avss/2014/4871/0", "title": "2014 International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/qest/2005/2427/0/24270168", "title": "X-Ray: A Tool for Automatic Measurement of Hardware Parameters", "doi": null, "abstractUrl": "/proceedings-article/qest/2005/24270168/12OmNy5R3Hj", "parentPublication": { "id": "proceedings/qest/2005/2427/0", "title": "Proceedings. Second International Conference on the Quantitative Evaluation of Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122673", "title": "Fast Generation of Virtual X-ray Images for Reconstruction of 3D Anatomy", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122673/13rRUEgarBu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1988/01/i0079", "title": "Automated X-Ray Inspection of Aluminum Castings", "doi": null, "abstractUrl": "/journal/tp/1988/01/i0079/13rRUxjQyi9", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jIxhEnA8IE", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jIxkjif74A", "doi": "10.1109/VRW50115.2020.00129", "title": "Spatial Referencing for Anywhere, Anytime Augmented Reality", "normalizedTitle": "Spatial Referencing for Anywhere, Anytime Augmented Reality", "abstract": "Augmented Reality (AR) systems can effectively enhance collaboration in the real world by providing useful spatial cues through virtual overlays. In particular, AR can be used to facilitate referencing a location in space as required in many collaborative activities. However, the goal of supporting spatial referencing anywhere at any time is hindered by issues with current AR technology, such as the lack of a precise environment model, unreliable tracking, and difficulty in synchronizing among different devices. This paper proposes a research agenda that innovates in the domain of interaction techniques for spatial referencing for everywhere AR.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented Reality (AR) systems can effectively enhance collaboration in the real world by providing useful spatial cues through virtual overlays. In particular, AR can be used to facilitate referencing a location in space as required in many collaborative activities. However, the goal of supporting spatial referencing anywhere at any time is hindered by issues with current AR technology, such as the lack of a precise environment model, unreliable tracking, and difficulty in synchronizing among different devices. This paper proposes a research agenda that innovates in the domain of interaction techniques for spatial referencing for everywhere AR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented Reality (AR) systems can effectively enhance collaboration in the real world by providing useful spatial cues through virtual overlays. In particular, AR can be used to facilitate referencing a location in space as required in many collaborative activities. However, the goal of supporting spatial referencing anywhere at any time is hindered by issues with current AR technology, such as the lack of a precise environment model, unreliable tracking, and difficulty in synchronizing among different devices. This paper proposes a research agenda that innovates in the domain of interaction techniques for spatial referencing for everywhere AR.", "fno": "09090418", "keywords": [ "Biological System Modeling", "Three Dimensional Displays", "Collaboration", "Augmented Reality", "Visualization", "Reliability", "Computational Modeling", "Human Centered Computing", "Mixed Augmented Reality", "Human Centered Computing", "Collaborative Interaction", "Human Centered Computing", "Interaction Techniques" ], "authors": [ { "affiliation": "Virginia Tech,Center for Human-Computer Interaction and Dept. of Computer Science", "fullName": "Yuan Li", "givenName": "Yuan", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "555-556", "year": "2020", "issn": null, "isbn": "978-1-7281-6532-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09090511", "articleId": "1jIxviTG03C", "__typename": "AdjacentArticleType" }, "next": { "fno": "09090567", "articleId": "1jIxoQYQV44", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2008/2047/0/04476601", "title": "Studies on the Effectiveness of Virtual Pointers in Collaborative Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476601/12OmNARAnbI", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550237", "title": "Poster: 3D referencing for remote task assistance in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402558", "title": "3D referencing techniques for physical objects in shared augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402558/12OmNxj239f", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a077", "title": "PPV: Pixel-Point-Volume Segmentation for Object Referencing in Collaborative Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a077/12OmNxy4N6P", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/latice/2015/9967/0/9967a221", "title": "Learning Geometry with Augmented Reality to Enhance Spatial Ability", "doi": null, "abstractUrl": "/proceedings-article/latice/2015/9967a221/12OmNy5zsoL", "parentPublication": { "id": "proceedings/latice/2015/9967/0", "title": "2015 International Conference on Learning and Teaching in Computing and Engineering (LaTiCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2005/2459/0/01544697", "title": "Spatial measurements for medical augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2005/01544697/12OmNyFCvV8", "parentPublication": { "id": "proceedings/ismar/2005/2459/0", "title": "Fourth IEEE and ACM International Symposium on Mixed and Augmented Reality (ISMAR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2015/8471/0/8471a001", "title": "Collaboration in Mediated and Augmented Reality (CiMAR) Summary", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2015/8471a001/12OmNybfqVO", "parentPublication": { "id": "proceedings/ismarw/2015/8471/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2012/07/mco2012070026", "title": "Anywhere Interfaces Using Handheld Augmented Reality", "doi": null, "abstractUrl": "/magazine/co/2012/07/mco2012070026/13rRUxYrbPM", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873964", "title": "Evaluation of Pointing Ray Techniques for Distant Object Referencing in Model-Free Outdoor Collaborative Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873964/1GjwJpG6PYs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a367", "title": "A Classification of Augmented Reality Approaches for Spatial Data Visualization", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a367/1yeQHlFhK0w", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qyxi3OgORy", "title": "2020 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qyxmOuaCS4", "doi": "10.1109/3DV50981.2020.00010", "title": "Neural Ray Surfaces for Self-Supervised Learning of Depth and Ego-motion", "normalizedTitle": "Neural Ray Surfaces for Self-Supervised Learning of Depth and Ego-motion", "abstract": "Self-supervised learning has emerged as a powerful tool for depth and ego-motion estimation, leading to state-of-the-art results on benchmark datasets. However, one significant limitation shared by current methods is the assumption of a known parametric camera model - usually the standard pinhole geometry - leading to failure when applied to imaging systems that deviate significantly from this assumption (e.g., catadioptric cameras or underwater imaging). In this work, we show that self-supervision can be used to learn accurate depth and ego-motion estimation without prior knowledge of the camera model. Inspired by the geometric model of Grossberg and Nayar, we introduce Neural Ray Surfaces (NRS), convolutional networks that represent pixel-wise projection rays, approximating a wide range of cameras. NRS are fully differentiable and can be learned end-to-end from unlabeled raw videos. We demonstrate the use of NRS for self-supervised learning of visual odometry and depth estimation from raw videos obtained using a wide variety of camera systems, including pinhole, fisheye, and catadioptric.", "abstracts": [ { "abstractType": "Regular", "content": "Self-supervised learning has emerged as a powerful tool for depth and ego-motion estimation, leading to state-of-the-art results on benchmark datasets. However, one significant limitation shared by current methods is the assumption of a known parametric camera model - usually the standard pinhole geometry - leading to failure when applied to imaging systems that deviate significantly from this assumption (e.g., catadioptric cameras or underwater imaging). In this work, we show that self-supervision can be used to learn accurate depth and ego-motion estimation without prior knowledge of the camera model. Inspired by the geometric model of Grossberg and Nayar, we introduce Neural Ray Surfaces (NRS), convolutional networks that represent pixel-wise projection rays, approximating a wide range of cameras. NRS are fully differentiable and can be learned end-to-end from unlabeled raw videos. We demonstrate the use of NRS for self-supervised learning of visual odometry and depth estimation from raw videos obtained using a wide variety of camera systems, including pinhole, fisheye, and catadioptric.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Self-supervised learning has emerged as a powerful tool for depth and ego-motion estimation, leading to state-of-the-art results on benchmark datasets. However, one significant limitation shared by current methods is the assumption of a known parametric camera model - usually the standard pinhole geometry - leading to failure when applied to imaging systems that deviate significantly from this assumption (e.g., catadioptric cameras or underwater imaging). In this work, we show that self-supervision can be used to learn accurate depth and ego-motion estimation without prior knowledge of the camera model. Inspired by the geometric model of Grossberg and Nayar, we introduce Neural Ray Surfaces (NRS), convolutional networks that represent pixel-wise projection rays, approximating a wide range of cameras. NRS are fully differentiable and can be learned end-to-end from unlabeled raw videos. We demonstrate the use of NRS for self-supervised learning of visual odometry and depth estimation from raw videos obtained using a wide variety of camera systems, including pinhole, fisheye, and catadioptric.", "fno": "812800a001", "keywords": [ "Cameras", "Convolutional Neural Nets", "Image Representation", "Learning Artificial Intelligence", "Motion Estimation", "Ray Tracing", "Robot Vision", "Unlabeled Raw Videos", "Convolutional Networks", "Nayar", "Grossberg", "Camera Systems", "Depth Estimation", "Visual Odometry", "Pixel Wise Projection Rays", "NRS", "Geometric Model", "Underwater Imaging", "Catadioptric Cameras", "Imaging Systems", "Standard Pinhole Geometry", "Parametric Camera Model", "Ego Motion Estimation", "Self Supervised Learning", "Neural Ray Surfaces", "Cameras", "Three Dimensional Displays", "Calibration", "Training", "Standards", "Adaptation Models", "Solid Modeling" ], "authors": [ { "affiliation": "Toyota Research Institute", "fullName": "Igor Vasiljevic", "givenName": "Igor", "surname": "Vasiljevic", "__typename": "ArticleAuthorType" }, { "affiliation": "Toyota Research Institute", "fullName": "Vitor Guizilini", "givenName": "Vitor", "surname": "Guizilini", "__typename": "ArticleAuthorType" }, { "affiliation": "Toyota Research Institute", "fullName": "Rares Ambrus", "givenName": "Rares", "surname": "Ambrus", "__typename": "ArticleAuthorType" }, { "affiliation": "Toyota Research Institute", "fullName": "Sudeep Pillai", "givenName": "Sudeep", "surname": "Pillai", "__typename": "ArticleAuthorType" }, { "affiliation": "Toyota Research Institute", "fullName": "Wolfram Burgard", "givenName": "Wolfram", "surname": "Burgard", "__typename": "ArticleAuthorType" }, { "affiliation": "Toyota Technological Institute at Chicago", "fullName": "Greg Shakhnarovich", "givenName": "Greg", "surname": "Shakhnarovich", "__typename": "ArticleAuthorType" }, { "affiliation": "Toyota Research Institute", "fullName": "Adrien Gaidon", "givenName": "Adrien", "surname": "Gaidon", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "1-11", "year": "2020", "issn": null, "isbn": "978-1-7281-8128-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "812800z029", "articleId": "1qyxnqDNCTK", "__typename": "AdjacentArticleType" }, "next": { "fno": "812800a012", "articleId": "1qyxmx8Ev1C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a245", "title": "[POSTER] An Accurate Calibration Method for Optical See-Through Head-Mounted Displays Based on Actual Eye-Observation Model", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a245/12OmNwErpLb", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a057", "title": "Real-Time Direct Dense Matching on Fisheye Images Using Plane-Sweeping Stereo", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a057/12OmNy2Jt8W", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c192", "title": "Non-parametric Structure-Based Calibration of Radially Symmetric Cameras", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c192/12OmNyxXlvx", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000f667", "title": "Unsupervised Learning of Depth and Ego-Motion from Monocular Video Using 3D Geometric Constraints", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000f667/17D45XacGkd", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956304", "title": "Forecasting of depth and ego-motion with transformers and self-supervision", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956304/1IHqCiHf26c", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200b798", "title": "Unsupervised Learning of Depth and Ego-Motion with Spatial-Temporal Geometric Constraints", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200b798/1cdOVAVDMpq", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a690", "title": "Spherical View Synthesis for Self-Supervised 360&#x00B0; Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a690/1ezRCwCzke4", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2019/5604/0/560400a058", "title": "Unsupervised Learning of Depth and Ego-Motion From Cylindrical Panoramic Video", "doi": null, "abstractUrl": "/proceedings-article/aivr/2019/560400a058/1grOkqpFFm0", "parentPublication": { "id": "proceedings/aivr/2019/5604/0", "title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/09010303", "title": "SynDeMo: Synergistic Deep Feature Alignment for Joint Learning of Depth and Ego-Motion", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/09010303/1hVlDCJzKRW", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a381", "title": "Unsupervised Monocular Depth and Ego-Motion Learning With Structure and Semantics", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a381/1iTvpR3Woxy", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNApcufx", "doi": "10.1109/VR.2017.7892317", "title": "A preliminary study of users' experiences of meditation in virtual reality", "normalizedTitle": "A preliminary study of users' experiences of meditation in virtual reality", "abstract": "This poster describes a between-groups study (n=24) exploring the use of virtual reality (VR) for facilitating focused meditation. Half of the participants were exposed to a meditation session combing the sound of a guiding voice and a visual environment including virtual objects for the participants to focus on. The other half of the participants were only exposed to the auditory guide. The participants' experience of the sessions was assessed using self-reported measures of perceived concentration, temporal duration, stress reduction, and comfort. Interestingly, no statistically significant differences were found between the two conditions. This finding may be revealing in regards to the usefulness of VR-based meditation.", "abstracts": [ { "abstractType": "Regular", "content": "This poster describes a between-groups study (n=24) exploring the use of virtual reality (VR) for facilitating focused meditation. Half of the participants were exposed to a meditation session combing the sound of a guiding voice and a visual environment including virtual objects for the participants to focus on. The other half of the participants were only exposed to the auditory guide. The participants' experience of the sessions was assessed using self-reported measures of perceived concentration, temporal duration, stress reduction, and comfort. Interestingly, no statistically significant differences were found between the two conditions. This finding may be revealing in regards to the usefulness of VR-based meditation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This poster describes a between-groups study (n=24) exploring the use of virtual reality (VR) for facilitating focused meditation. Half of the participants were exposed to a meditation session combing the sound of a guiding voice and a visual environment including virtual objects for the participants to focus on. The other half of the participants were only exposed to the auditory guide. The participants' experience of the sessions was assessed using self-reported measures of perceived concentration, temporal duration, stress reduction, and comfort. Interestingly, no statistically significant differences were found between the two conditions. This finding may be revealing in regards to the usefulness of VR-based meditation.", "fno": "07892317", "keywords": [ "Stress", "Psychology", "Virtual Reality", "Atmospheric Measurements", "Particle Measurements", "Boats", "Visualization", "H 1 2 Information Systems User Machine Systems Human Factors", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality" ], "authors": [ { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Thea Andersen", "givenName": "Thea", "surname": "Andersen", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Gintare Anisimovaite", "givenName": "Gintare", "surname": "Anisimovaite", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Anders Christiansen", "givenName": "Anders", "surname": "Christiansen", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Mohamed Hussein", "givenName": "Mohamed", "surname": "Hussein", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Carol Lund", "givenName": "Carol", "surname": "Lund", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Thomas Nielsen", "givenName": "Thomas", "surname": "Nielsen", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Eoin Rafferty", "givenName": "Eoin", "surname": "Rafferty", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Niels C. Nilsson", "givenName": "Niels C.", "surname": "Nilsson", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Rolf Nordahl", "givenName": "Rolf", "surname": "Nordahl", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen, Denmark", "fullName": "Stefania Serafin", "givenName": "Stefania", "surname": "Serafin", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "343-344", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892316", "articleId": "12OmNCzb9vr", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892318", "articleId": "12OmNBQ2W0V", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/var4good/2018/5977/0/08576881", "title": "Transformative Experiences Become More Accessible Through Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/var4good/2018/08576881/17D45VTRoxP", "parentPublication": { "id": "proceedings/var4good/2018/5977/0", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a057", "title": "Visual Fidelity Effects on Expressive Self-avatar in Virtual Reality: First Impressions Matter", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a057/1CJc41zMnFC", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a193", "title": "Comparing Meditation and Immersive Virtual Environment for Relaxation", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a193/1KmFfgROQxO", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2019/4050/0/08809592", "title": "Frames to Zones: Applying Mise-en-Sc&#x00E8;ne Techniques in Cinematic Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/wevr/2019/08809592/1cI62kbHwha", "parentPublication": { "id": "proceedings/wevr/2019/4050/0", "title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797870", "title": "The Influence of Body Position on Presence When Playing a Virtual Reality Game", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797870/1cJ0RyhQnC0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797750", "title": "A Study in Virtual Reality on (Non-)Gamers&#x0027; Attitudes and Behaviors", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797750/1cJ17xo0CEo", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a474", "title": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a474/1pysuR65ESQ", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a739", "title": "[DC] Psychophysical Effects of Augmented Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a739/1tnWQJT7eWA", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a437", "title": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a437/1yeQD8KNChO", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a160", "title": "Affective State Classification in Virtual Reality Environments Using Electrocardiogram and Respiration Signals", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a160/1zxLvYxcT2U", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwE9OtI", "title": "2010 5th IEEE International Conference on Global Software Engineering", "acronym": "icgse", "groupId": "1001266", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNzd7bCA", "doi": "10.1109/ICGSE.2010.27", "title": "The Impact of Number of Sites in a Follow the Sun Setting on the Actual and Perceived Working Speed and Accuracy: A Controlled Experiment", "normalizedTitle": "The Impact of Number of Sites in a Follow the Sun Setting on the Actual and Perceived Working Speed and Accuracy: A Controlled Experiment", "abstract": "Follow the sun (FTS) software development seems promising in theory, however well founded knowledge on its successes is rare. One of the questions unanswered is: what is the impact of increasing the number of sites in a FTS cycle on working speed and accuracy? In this paper a controlled experiment is presented in which the impact of the number of sites in a daily cycle in terms of overall working speed, individual working speed and working accuracy is measured. Furthermore, the participants perception of working speed and their perception of working accuracy is investigated. The results indicate a clear discrepancy between actual and perceived performance, when increasing the number of sites.", "abstracts": [ { "abstractType": "Regular", "content": "Follow the sun (FTS) software development seems promising in theory, however well founded knowledge on its successes is rare. One of the questions unanswered is: what is the impact of increasing the number of sites in a FTS cycle on working speed and accuracy? In this paper a controlled experiment is presented in which the impact of the number of sites in a daily cycle in terms of overall working speed, individual working speed and working accuracy is measured. Furthermore, the participants perception of working speed and their perception of working accuracy is investigated. The results indicate a clear discrepancy between actual and perceived performance, when increasing the number of sites.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Follow the sun (FTS) software development seems promising in theory, however well founded knowledge on its successes is rare. One of the questions unanswered is: what is the impact of increasing the number of sites in a FTS cycle on working speed and accuracy? In this paper a controlled experiment is presented in which the impact of the number of sites in a daily cycle in terms of overall working speed, individual working speed and working accuracy is measured. Furthermore, the participants perception of working speed and their perception of working accuracy is investigated. The results indicate a clear discrepancy between actual and perceived performance, when increasing the number of sites.", "fno": "05581506", "keywords": [ "Software Management", "Follow The Sun Software Development", "Working Speed Perception", "Working Accuracy Perception", "Accuracy", "Atmospheric Measurements", "Particle Measurements", "Schedules", "Sun", "Time To Market", "Bridges", "Follow The Sun", "FTS", "Experiment", "Working Speed", "Accuracy", "Perceived Working Speed", "Perceived Accuracy" ], "authors": [ { "affiliation": null, "fullName": "Rini van Solingen", "givenName": "Rini", "surname": "van Solingen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Menno Valkema", "givenName": "Menno", "surname": "Valkema", "__typename": "ArticleAuthorType" } ], "idPrefix": "icgse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-08-01T00:00:00", "pubType": "proceedings", "pages": "165-174", "year": "2010", "issn": "2329-6305", "isbn": "978-1-4244-7619-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05581505", "articleId": "12OmNx6g6m0", "__typename": "AdjacentArticleType" }, "next": { "fno": "05581507", "articleId": "12OmNxdDFKa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icgsew/2014/5206/0/5206a021", "title": "FTS-SPM: A Software Process Model for Follow the Sun Development: Preliminary Results", "doi": null, "abstractUrl": "/proceedings-article/icgsew/2014/5206a021/12OmNAOKnUi", "parentPublication": { "id": "proceedings/icgsew/2014/5206/0", "title": "2014 IEEE International Conference on Global Software Engineeering Workshops (ICGSEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgse/2012/4787/0/4787a179", "title": "Setting Up a Stochastic Model for Teams Working in a Follow-the-Sun Environment", "doi": null, "abstractUrl": "/proceedings-article/icgse/2012/4787a179/12OmNrJRPpz", "parentPublication": { "id": "proceedings/icgse/2012/4787/0", "title": "2012 IEEE Seventh International Conference on Global Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgse/2012/4787/0/4787a164", "title": "Mapping Global Software Development Practices for Follow-the-Sun Process", "doi": null, "abstractUrl": "/proceedings-article/icgse/2012/4787a164/12OmNvStcRu", "parentPublication": { "id": "proceedings/icgse/2012/4787/0", "title": "2012 IEEE Seventh International Conference on Global Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgse/2011/4503/0/4503a060", "title": "Researching into Follow-the-Sun Software Development: Challenges and Opportunities", "doi": null, "abstractUrl": "/proceedings-article/icgse/2011/4503a060/12OmNwFRp9m", "parentPublication": { "id": "proceedings/icgse/2011/4503/0", "title": "2011 IEEE Sixth International Conference on Global Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2014/2504/0/2504a331", "title": "Handoffs Management in Follow-the-Sun Software Projects: A Case Study", "doi": null, "abstractUrl": "/proceedings-article/hicss/2014/2504a331/12OmNxTEiR0", "parentPublication": { "id": "proceedings/hicss/2014/2504/0", "title": "2014 47th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgse-w/2011/4558/0/4558a054", "title": "Follow-the-Sun Methodology in a Stochastic Modeling Perspective", "doi": null, "abstractUrl": "/proceedings-article/icgse-w/2011/4558a054/12OmNyL0TkM", "parentPublication": { "id": "proceedings/icgse-w/2011/4558/0", "title": "Global Software Engineering Workshop, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2009/3450/0/01-03-01", "title": "Follow The Sun Software Development: New Perspectives, Conceptual Foundation, and Exploratory Field Study", "doi": null, "abstractUrl": "/proceedings-article/hicss/2009/01-03-01/12OmNyq0zKh", "parentPublication": { "id": "proceedings/hicss/2009/3450/0", "title": "2009 42nd Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404588", "title": "Recalibration of Perceived Distance in Virtual Environments Occurs Rapidly and Transfers Asymmetrically Across Scale", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404588/13rRUyuegh9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874769", "title": "Objects May Be Farther Than They Appear: Depth Compression Diminishes Over Time with Repeated Calibration in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874769/1GjwLV9sW7m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2020/4716/0/09156234", "title": "Sensor Self-Report Alignment (SSRA): Reducing Sun Exposure Assessment Error", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2020/09156234/1m1jCd6G3Hq", "parentPublication": { "id": "proceedings/percom-workshops/2020/4716/0", "title": "2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KmF7rVz6Y8", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "acronym": "aivr", "groupId": "1830004", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KmFfXI75Ly", "doi": "10.1109/AIVR56993.2022.00033", "title": "Attention Score: Objective Measure of Attentiveness in Immersive Omnidirectional Videos", "normalizedTitle": "Attention Score: Objective Measure of Attentiveness in Immersive Omnidirectional Videos", "abstract": "The application of Virtual Reality (VR) in education and entertainment has shown to increase the overall learning experience of the users and inspire them in unique ways. Identifying and analyzing the viewing and attention patterns of users is extremely important for improving their engagement with the content in VR. In this paper, we introduce two new objective metrics: Attention Score (ATNS) and Maximum Attention Span Score (MASS) to quantify the attention of a user to a Point of Interest (PoI) in a VR scenario. The metrics are computationally inexpensive and require no additional hardware. As part of our work, we present ReptilesVR, a VR-based edutainment and cinematic zoo experience developed using omnidirectional stereo videos to provide immersive close encounters with wildlife. We conducted a study with 25 participants to quantitatively measure the overall experience of users in ReptilesVR using the two newly defined metrics: ATNS and MASS. Also, the emotional engagement of the users was measured qualitatively using a questionnaire on a Likert scale of 1&#x2013;10. We observed that scenarios with activity in close proximity to reptiles have positive correlation between the proposed metrics with the subjective scores of presence (&#x03BC; = 8.58, &#x03C3; = 1.28) and immersion (&#x03BC; = 9.0, &#x03C3; = 1.14) as reported by participants in the questionnaire. This work shows that the metrics can be useful to find the features that draw a user&#x2019;s attention to certain points of interest (PoIs) in a scene. This will aid in the enhancement of the overall user experience and the development of more compelling immersive VR applications.", "abstracts": [ { "abstractType": "Regular", "content": "The application of Virtual Reality (VR) in education and entertainment has shown to increase the overall learning experience of the users and inspire them in unique ways. Identifying and analyzing the viewing and attention patterns of users is extremely important for improving their engagement with the content in VR. In this paper, we introduce two new objective metrics: Attention Score (ATNS) and Maximum Attention Span Score (MASS) to quantify the attention of a user to a Point of Interest (PoI) in a VR scenario. The metrics are computationally inexpensive and require no additional hardware. As part of our work, we present ReptilesVR, a VR-based edutainment and cinematic zoo experience developed using omnidirectional stereo videos to provide immersive close encounters with wildlife. We conducted a study with 25 participants to quantitatively measure the overall experience of users in ReptilesVR using the two newly defined metrics: ATNS and MASS. Also, the emotional engagement of the users was measured qualitatively using a questionnaire on a Likert scale of 1&#x2013;10. We observed that scenarios with activity in close proximity to reptiles have positive correlation between the proposed metrics with the subjective scores of presence (&#x03BC; = 8.58, &#x03C3; = 1.28) and immersion (&#x03BC; = 9.0, &#x03C3; = 1.14) as reported by participants in the questionnaire. This work shows that the metrics can be useful to find the features that draw a user&#x2019;s attention to certain points of interest (PoIs) in a scene. This will aid in the enhancement of the overall user experience and the development of more compelling immersive VR applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The application of Virtual Reality (VR) in education and entertainment has shown to increase the overall learning experience of the users and inspire them in unique ways. Identifying and analyzing the viewing and attention patterns of users is extremely important for improving their engagement with the content in VR. In this paper, we introduce two new objective metrics: Attention Score (ATNS) and Maximum Attention Span Score (MASS) to quantify the attention of a user to a Point of Interest (PoI) in a VR scenario. The metrics are computationally inexpensive and require no additional hardware. As part of our work, we present ReptilesVR, a VR-based edutainment and cinematic zoo experience developed using omnidirectional stereo videos to provide immersive close encounters with wildlife. We conducted a study with 25 participants to quantitatively measure the overall experience of users in ReptilesVR using the two newly defined metrics: ATNS and MASS. Also, the emotional engagement of the users was measured qualitatively using a questionnaire on a Likert scale of 1–10. We observed that scenarios with activity in close proximity to reptiles have positive correlation between the proposed metrics with the subjective scores of presence (μ = 8.58, σ = 1.28) and immersion (μ = 9.0, σ = 1.14) as reported by participants in the questionnaire. This work shows that the metrics can be useful to find the features that draw a user’s attention to certain points of interest (PoIs) in a scene. This will aid in the enhancement of the overall user experience and the development of more compelling immersive VR applications.", "fno": "572500a163", "keywords": [ "Biology Computing", "Image Motion Analysis", "Stereo Image Processing", "User Experience", "Video Signal Processing", "Virtual Reality", "Zoology", "ATNS", "Attention Score", "Cinematic Zoo Experience", "Education", "Emotional Engagement", "Entertainment", "Immersive Omnidirectional Videos", "Immersive Virtual Reality Applications", "Immersive VR Applications", "Learning Experience", "Maximum Attention Span Score", "Objective Attentiveness Measure", "Omnidirectional Stereo Videos", "Po I", "Point Of Interest", "Reptiles VR", "User Experience", "VR Based Edutainment", "Visualization", "Correlation", "Atmospheric Measurements", "Wildlife", "Virtual Reality", "Particle Measurements", "Hardware", "Visual Attention", "Immersion", "Presence", "Omnidirectional Videos" ], "authors": [ { "affiliation": "Indian Institute of Technology Madras,Touchlab,Dept. of Applied Mechanics,Chennai,India", "fullName": "Jay Bhanushali", "givenName": "Jay", "surname": "Bhanushali", "__typename": "ArticleAuthorType" }, { "affiliation": "Madras Crocodile Bank Trust,Chennai,India", "fullName": "Achsah Steffi John", "givenName": "Achsah Steffi", "surname": "John", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Technology Madras,Touchlab,Dept. of Applied Mechanics,Chennai,India", "fullName": "Manivannan Muniyadi", "givenName": "Manivannan", "surname": "Muniyadi", "__typename": "ArticleAuthorType" } ], "idPrefix": "aivr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "163-170", "year": "2022", "issn": null, "isbn": "978-1-6654-5725-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "572500a157", "articleId": "1KmFaCkhwPK", "__typename": "AdjacentArticleType" }, "next": { "fno": "572500a171", "articleId": "1KmFghvd14s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892317", "title": "A preliminary study of users' experiences of meditation in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892317/12OmNApcufx", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ngmast/2016/0949/0/07801465", "title": "Immersive Virtual Reality as a Supplement in the Rehabilitation Program of Post-Stroke Patients", "doi": null, "abstractUrl": "/proceedings-article/ngmast/2016/07801465/12OmNrMZpyR", "parentPublication": { "id": "proceedings/ngmast/2016/0949/0", "title": "2016 10th International Conference on Next-Generation Mobile Applications, Security and Technologies (NGMAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2016/6117/0/6117a216", "title": "Going Outside While Staying Inside — Exercise Motivation with Immersive vs. Non–immersive Recreational Virtual Environment Augmentation for Older Adult Nursing Home Residents", "doi": null, "abstractUrl": "/proceedings-article/ichi/2016/6117a216/12OmNxwncza", "parentPublication": { "id": "proceedings/ichi/2016/6117/0", "title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446490", "title": "Water Flow Measurement Technology Assessing Spatial User Interaction in an Underwater Immersive Virtual Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446490/13bd1fKQxqS", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446488", "title": "Investigating the Reason for Increased Postural Instability in Virtual Reality for Persons with Balance Impairments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446488/13bd1gJ1v0N", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/var4good/2018/5977/0/08576881", "title": "Transformative Experiences Become More Accessible Through Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/var4good/2018/08576881/17D45VTRoxP", "parentPublication": { "id": "proceedings/var4good/2018/5977/0", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2022/1647/0/09767281", "title": "An Exploratory Analysis of Interactive VR-Based Framework for Multi-Componential Analysis of Emotion", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2022/09767281/1Df82pGW23e", "parentPublication": { "id": "proceedings/percom-workshops/2022/1647/0", "title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797858", "title": "Immersive EEG: Evaluating Electroencephalography in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797858/1cJ0JWkSE3m", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797805", "title": "Encouraging Rehabilitation Trials: The Potential of 360&#x00B0; Immersive Instruction Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797805/1cJ13iaKgve", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09206143", "title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments", "doi": null, "abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ17xo0CEo", "doi": "10.1109/VR.2019.8797750", "title": "A Study in Virtual Reality on (Non-)Gamers&#x0027; Attitudes and Behaviors", "normalizedTitle": "A Study in Virtual Reality on (Non-)Gamers' Attitudes and Behaviors", "abstract": "Virtual Reality (VR) constitutes an advantageous alternative for research considering scenarios that are not feasible in real-life conditions. Thus, this technology was used in the presented study for the behavioral observation of participants when being exposed to autonomous vehicles (AVs). Further data was collected via questionnaires before, directly after the experience and one month later to measure the impact that the experience had on participants' general attitude towards AVs. Despite a nonsignificance of the results, first insights suggest that participants with low prior gaming experience were more impacted than gamers. Future work will involve bigger sample size and refined questionnaires.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual Reality (VR) constitutes an advantageous alternative for research considering scenarios that are not feasible in real-life conditions. Thus, this technology was used in the presented study for the behavioral observation of participants when being exposed to autonomous vehicles (AVs). Further data was collected via questionnaires before, directly after the experience and one month later to measure the impact that the experience had on participants' general attitude towards AVs. Despite a nonsignificance of the results, first insights suggest that participants with low prior gaming experience were more impacted than gamers. Future work will involve bigger sample size and refined questionnaires.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual Reality (VR) constitutes an advantageous alternative for research considering scenarios that are not feasible in real-life conditions. Thus, this technology was used in the presented study for the behavioral observation of participants when being exposed to autonomous vehicles (AVs). Further data was collected via questionnaires before, directly after the experience and one month later to measure the impact that the experience had on participants' general attitude towards AVs. Despite a nonsignificance of the results, first insights suggest that participants with low prior gaming experience were more impacted than gamers. Future work will involve bigger sample size and refined questionnaires.", "fno": "08797750", "keywords": [ "Behavioural Sciences Computing", "Computer Games", "Virtual Reality", "Real Life Conditions", "Behavioral Observation", "Autonomous Vehicles", "Low Prior Gaming Experience", "Virtual Reality", "VR", "Nongamer Attitudes", "Nongamer Behaviors", "Autonomous Vehicles", "Virtual Environments", "Atmospheric Measurements", "Particle Measurements", "Data Collection", "User Interfaces", "Virtual Reality", "Autonomous Vehicles", "Human Centered Design", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems X 2013 Artificial Augmented And Virtual Realities", "H 1 2 User Machine Systems Human Factors", "H 5 2 User Interfaces User Centered Design" ], "authors": [ { "affiliation": "TUMCREATE, Singapore", "fullName": "Sebastian Stadler", "givenName": "Sebastian", "surname": "Stadler", "__typename": "ArticleAuthorType" }, { "affiliation": "TUMCREATE, Singapore", "fullName": "Henriette Cornet", "givenName": "Henriette", "surname": "Cornet", "__typename": "ArticleAuthorType" }, { "affiliation": "Chair of Industrial Design, Technical University of Munich, Germany", "fullName": "Fritz Frenkler", "givenName": "Fritz", "surname": "Frenkler", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1169-1170", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798114", "articleId": "1cJ12s1nvQk", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797956", "articleId": "1cJ17BLEK88", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892317", "title": "A preliminary study of users' experiences of meditation in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892317/12OmNApcufx", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itag/2014/6795/0/6795a013", "title": "How Body Movement Influences Virtual Reality Analgesia?", "doi": null, "abstractUrl": "/proceedings-article/itag/2014/6795a013/12OmNC4wtBe", "parentPublication": { "id": "proceedings/itag/2014/6795/0", "title": "2014 International Conference on Interactive Technologies and Games (iTAG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892344", "title": "Immersion and coherence in a visual cliff environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892344/12OmNy50gc9", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/lt/2017/02/07480447", "title": "Investigating the Impact of Gaming Habits, Gender, and Age on the Effectiveness of an Educational Video Game: An Exploratory Study", "doi": null, "abstractUrl": "/journal/lt/2017/02/07480447/13rRUwwaKoA", "parentPublication": { "id": "trans/lt", "title": "IEEE Transactions on Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874769", "title": "Objects May Be Farther Than They Appear: Depth Compression Diminishes Over Time with Repeated Calibration in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874769/1GjwLV9sW7m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2022/6814/0/681400a218", "title": "Virtual Reality as a tool for furniture design", "doi": null, "abstractUrl": "/proceedings-article/cw/2022/681400a218/1I6ROhgWA1y", "parentPublication": { "id": "proceedings/cw/2022/6814/0", "title": "2022 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2022/9007/0/900700a215", "title": "In-Place Collaboration in Extended Reality Data Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2022/900700a215/1KaFNF7EG6k", "parentPublication": { "id": "proceedings/iv/2022/9007/0", "title": "2022 26th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09293401", "title": "Self-Illusion: A Study on Cognition of Role-Playing in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2022/08/09293401/1pyonpfZjoY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a474", "title": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a474/1pysuR65ESQ", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a437", "title": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a437/1yeQD8KNChO", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pBMeBWXAZ2", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pBMiVCpEGY", "doi": "10.1109/ISMAR-Adjunct51615.2020.00049", "title": "An Exploratory Study for Designing Social Experience of Watching VR Movies Based on Audience&#x2019;s Voice Comments", "normalizedTitle": "An Exploratory Study for Designing Social Experience of Watching VR Movies Based on Audience’s Voice Comments", "abstract": "Social experience is important when audience are watching movies. Virtual reality (VR) movies engage audience through immersive environment and interactive narrative. However, VR headsets restrict audience to an individual experience, which disrupt the potential for shared social realities. In our study, we propose an approach to design an asynchronous social experience that allows the participant to receive other audiences' voice comments (such as their opinions, impressions or emotional reactions) in VR movies. We measured the participants' feedback on their engagement levels, recall abilities and social presence. The results showed that in VR-Voice Comment (VR-VC) movie, the audience's voice comments could affect participant's engagement and the recall of information in the scenes. The participants obtained social awareness and enjoyment at the same time. A few of them were worried mainly because of the potential auditory clutter that resulted from unpredictable voice comments. We discuss the design implications for this and directions for future research. Overall, we observe a positive tendency in watching VR-VC movie, which could be adapted for future VR movie experience.", "abstracts": [ { "abstractType": "Regular", "content": "Social experience is important when audience are watching movies. Virtual reality (VR) movies engage audience through immersive environment and interactive narrative. However, VR headsets restrict audience to an individual experience, which disrupt the potential for shared social realities. In our study, we propose an approach to design an asynchronous social experience that allows the participant to receive other audiences' voice comments (such as their opinions, impressions or emotional reactions) in VR movies. We measured the participants' feedback on their engagement levels, recall abilities and social presence. The results showed that in VR-Voice Comment (VR-VC) movie, the audience's voice comments could affect participant's engagement and the recall of information in the scenes. The participants obtained social awareness and enjoyment at the same time. A few of them were worried mainly because of the potential auditory clutter that resulted from unpredictable voice comments. We discuss the design implications for this and directions for future research. Overall, we observe a positive tendency in watching VR-VC movie, which could be adapted for future VR movie experience.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Social experience is important when audience are watching movies. Virtual reality (VR) movies engage audience through immersive environment and interactive narrative. However, VR headsets restrict audience to an individual experience, which disrupt the potential for shared social realities. In our study, we propose an approach to design an asynchronous social experience that allows the participant to receive other audiences' voice comments (such as their opinions, impressions or emotional reactions) in VR movies. We measured the participants' feedback on their engagement levels, recall abilities and social presence. The results showed that in VR-Voice Comment (VR-VC) movie, the audience's voice comments could affect participant's engagement and the recall of information in the scenes. The participants obtained social awareness and enjoyment at the same time. A few of them were worried mainly because of the potential auditory clutter that resulted from unpredictable voice comments. We discuss the design implications for this and directions for future research. Overall, we observe a positive tendency in watching VR-VC movie, which could be adapted for future VR movie experience.", "fno": "767500a147", "keywords": [ "Entertainment", "Virtual Reality", "Engagement Levels", "Social Presence", "Social Awareness", "Enjoyment", "VR VC Movie", "Virtual Reality Movies", "VR Headsets", "Social Realities", "Asynchronous Social Experience", "Impressions", "Emotional Reactions", "VR Movie Experience", "Audience Voice Comments", "Immersive Environment", "Interactive Narrative", "Recall Abilities", "Headphones", "Atmospheric Measurements", "Prototypes", "Motion Pictures", "Particle Measurements", "User Experience", "Information Systems", "VR Movie", "Social Presence", "Audience Engagement", "Asynchronous Interaction", "Voice Comments" ], "authors": [ { "affiliation": "School of New Media Art and Design, State Key Laboratory of Virtual Reality Technology and Systems, Beihang University", "fullName": "Shuo Yan", "givenName": "Shuo", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": "School of New Media Art and Design, Beihang University", "fullName": "Wenli Jiang", "givenName": "Wenli", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of New Media Art and Design, Beihang University", "fullName": "Menghan Xiong", "givenName": "Menghan", "surname": "Xiong", "__typename": "ArticleAuthorType" }, { "affiliation": "School of New Media Art and Design, State Key Laboratory of Virtual Reality Technology and Systems, Beihang University", "fullName": "Xukun Shen", "givenName": "Xukun", "surname": "Shen", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "147-152", "year": "2020", "issn": null, "isbn": "978-1-7281-7675-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "767500a141", "articleId": "1pBMicBhWGA", "__typename": "AdjacentArticleType" }, "next": { "fno": "767500a153", "articleId": "1pBMgRIZa6I", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/avss/2017/2939/0/08078459", "title": "Movies tags extraction using deep learning", "doi": null, "abstractUrl": "/proceedings-article/avss/2017/08078459/12OmNAlNiOX", "parentPublication": { "id": "proceedings/avss/2017/2939/0", "title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2014/4985/0/06835987", "title": "Predicting movie ratings from audience behaviors", "doi": null, "abstractUrl": "/proceedings-article/wacv/2014/06835987/12OmNBkfRjp", "parentPublication": { "id": "proceedings/wacv/2014/4985/0", "title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a019", "title": "Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a019/12OmNro0HYa", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890664", "title": "Influence of social media on performance of movies", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890664/12OmNro0Ife", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2013/2240/0/06785857", "title": "Prediction of movies box office performance using social media", "doi": null, "abstractUrl": "/proceedings-article/asonam/2013/06785857/12OmNs59JG4", "parentPublication": { "id": "proceedings/asonam/2013/2240/0", "title": "2013 International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcity/2015/1893/0/1893a273", "title": "Using Crowd-Source Based Features from Social Media and Conventional Features to Predict the Movies Popularity", "doi": null, "abstractUrl": "/proceedings-article/smartcity/2015/1893a273/12OmNyKrH6S", "parentPublication": { "id": "proceedings/smartcity/2015/1893/0", "title": "2015 IEEE International Conference on Smart City/SocialCom/SustainCom (SmartCity)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a073", "title": "Affective Benchmarking of Movies Based on the Physiological Responses of a Real Audience", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a073/12OmNzFdt7L", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446046", "title": "The Effect of Immersion on Emotional Responses to Film Viewing in a Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446046/13bd1gCd7Th", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797726", "title": "Evaluation on a Wheelchair Simulator Using Limited-Motion Patterns and Vection-Inducing Movies", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797726/1cJ0VwwRIA0", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300e591", "title": "A Graph-Based Framework to Bridge Movies and Synopses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300e591/1hVlJrW8LXW", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pysuR65ESQ", "doi": "10.1109/ISMAR50242.2020.00072", "title": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments", "normalizedTitle": "A Neurophysiological Approach for Measuring Presence in Immersive Virtual Environments", "abstract": "Presence, the feeling of being there, is an important factor that affects the overall experience of Virtual Reality (VR). Higher presence commonly provides a better experience in VR than lower presence. However, presence is commonly measured subjectively through postexperience questionnaires, which can suffer from participant biases, dishonest answers, and fatigue. It can also be difficult for subjects to accurately remember their feelings of presence after they have left the VR experience. In this paper, we measured the effects of different levels of presence (high and low) in VR using physiological and neurological signals. The experiment involved 24 participants in a between-subjects design. Results indicated a significant effect of presence on both physiological and neurological signals. We noticed that higher presence results in higher heart rate, less visual stress, higher theta and beta activities in the frontal region, and higher alpha activities in the parietal region. These findings and insights could lead to an alternative objective measure of presence.", "abstracts": [ { "abstractType": "Regular", "content": "Presence, the feeling of being there, is an important factor that affects the overall experience of Virtual Reality (VR). Higher presence commonly provides a better experience in VR than lower presence. However, presence is commonly measured subjectively through postexperience questionnaires, which can suffer from participant biases, dishonest answers, and fatigue. It can also be difficult for subjects to accurately remember their feelings of presence after they have left the VR experience. In this paper, we measured the effects of different levels of presence (high and low) in VR using physiological and neurological signals. The experiment involved 24 participants in a between-subjects design. Results indicated a significant effect of presence on both physiological and neurological signals. We noticed that higher presence results in higher heart rate, less visual stress, higher theta and beta activities in the frontal region, and higher alpha activities in the parietal region. These findings and insights could lead to an alternative objective measure of presence.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presence, the feeling of being there, is an important factor that affects the overall experience of Virtual Reality (VR). Higher presence commonly provides a better experience in VR than lower presence. However, presence is commonly measured subjectively through postexperience questionnaires, which can suffer from participant biases, dishonest answers, and fatigue. It can also be difficult for subjects to accurately remember their feelings of presence after they have left the VR experience. In this paper, we measured the effects of different levels of presence (high and low) in VR using physiological and neurological signals. The experiment involved 24 participants in a between-subjects design. Results indicated a significant effect of presence on both physiological and neurological signals. We noticed that higher presence results in higher heart rate, less visual stress, higher theta and beta activities in the frontal region, and higher alpha activities in the parietal region. These findings and insights could lead to an alternative objective measure of presence.", "fno": "850800a474", "keywords": [ "Human Factors", "Neurophysiology", "Signal Processing", "User Interfaces", "Virtual Reality", "Immersive Virtual Environments", "Virtual Reality", "VR Experience", "Physiological Signals", "Neurological Signals", "Heart Rate", "Beta Activities", "Alpha Activities", "Neurophysiological Approach", "Presence Measurement", "Theta Activities", "Heart Rate", "Visualization", "Atmospheric Measurements", "Virtual Environments", "Particle Measurements", "Physiology", "Stress", "Human X 2013 Centered Computing X 2013 Human Computer Interaction HCI X 2013 Empirical Studies In HCI", "Human X 2013 Centered Computing X 2013 Visualization X 2013 Visualization Design And Evaluation Methods" ], "authors": [ { "affiliation": "University of Queensland,Australia", "fullName": "Arindam Dey", "givenName": "Arindam", "surname": "Dey", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Queensland,Australia", "fullName": "Jane Phoon", "givenName": "Jane", "surname": "Phoon", "__typename": "ArticleAuthorType" }, { "affiliation": "CSIO India", "fullName": "Shuvodeep Saha", "givenName": "Shuvodeep", "surname": "Saha", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Queensland,Australia", "fullName": "Chelsea Dobbins", "givenName": "Chelsea", "surname": "Dobbins", "__typename": "ArticleAuthorType" }, { "affiliation": "University of South Australia,Australia", "fullName": "Mark Billinghurst", "givenName": "Mark", "surname": "Billinghurst", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "474-485", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a462", "articleId": "1pysu9tPcGc", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a486", "articleId": "1pysyhDXiw0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892315", "title": "The effect of geometric realism on presence in a virtual reality game", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892315/12OmNBTawwY", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a429", "title": "Neuroticism, Extraversion and Stress: Physiological Correlates", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a429/12OmNC4wtJx", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643560", "title": "Experiences with an AR evaluation test bed: Presence, performance, and physiological measurement", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643560/12OmNCmGNZi", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a165", "title": "Effects of Sharing Real-Time Multi-Sensory Heart Rate Feedback in Different Immersive Collaborative Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a165/17D45VTRov4", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09729540", "title": "Using Heart Rate Variability for Comparing the Effectiveness of Virtual vs Real Training Environments for Firefighters", "doi": null, "abstractUrl": "/journal/tg/5555/01/09729540/1Bya8YD1tUk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a193", "title": "Comparing Meditation and Immersive Virtual Environment for Relaxation", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a193/1KmFfgROQxO", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797858", "title": "Immersive EEG: Evaluating Electroencephalography in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797858/1cJ0JWkSE3m", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090463", "title": "Neurophysiological Effects of Presence in Calm Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090463/1jIxs1q7J6g", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09206143", "title": "Spatial Presence, Performance, and Behavior between Real, Remote, and Virtual Immersive Environments", "doi": null, "abstractUrl": "/journal/tg/2020/12/09206143/1npxM6fDN7i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09229513", "title": "Estimating Affective Taste Experience Using Combined Implicit Behavioral and Neurophysiological Measures", "doi": null, "abstractUrl": "/journal/ta/2023/01/09229513/1o3nfbzpzhe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeQD8KNChO", "doi": "10.1109/ISMAR-Adjunct54149.2021.00099", "title": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness", "normalizedTitle": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness", "abstract": "Social Virtual Reality (VR) platforms enable multiple users to be present together in the same virtual environment (VE) and interact with each other in this space. These platforms are used in different application areas including teaching and learning, conferences, and meetings. To improve the engagement, safety, and overall positive experience in such platforms it is important to understand the effect they have on users&#x2019; emotional states and self-awareness while being in the VE. In this work, we present a focus group study where we dis-cussed users&#x2019; opinions about social VR and we ran the focus group in a social VR platform created in Hubs by Mozilla. Our primary goal was to investigate users&#x2019; emotional states and self-awareness while using this platform. We measured these effects using positive and negative affect schedule (PANAS) and Self-Assessment Questionnaire (SAQ). The experiment involved 12 adult participants who were volunteers from around the world with previous experience of VR.", "abstracts": [ { "abstractType": "Regular", "content": "Social Virtual Reality (VR) platforms enable multiple users to be present together in the same virtual environment (VE) and interact with each other in this space. These platforms are used in different application areas including teaching and learning, conferences, and meetings. To improve the engagement, safety, and overall positive experience in such platforms it is important to understand the effect they have on users&#x2019; emotional states and self-awareness while being in the VE. In this work, we present a focus group study where we dis-cussed users&#x2019; opinions about social VR and we ran the focus group in a social VR platform created in Hubs by Mozilla. Our primary goal was to investigate users&#x2019; emotional states and self-awareness while using this platform. We measured these effects using positive and negative affect schedule (PANAS) and Self-Assessment Questionnaire (SAQ). The experiment involved 12 adult participants who were volunteers from around the world with previous experience of VR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Social Virtual Reality (VR) platforms enable multiple users to be present together in the same virtual environment (VE) and interact with each other in this space. These platforms are used in different application areas including teaching and learning, conferences, and meetings. To improve the engagement, safety, and overall positive experience in such platforms it is important to understand the effect they have on users’ emotional states and self-awareness while being in the VE. In this work, we present a focus group study where we dis-cussed users’ opinions about social VR and we ran the focus group in a social VR platform created in Hubs by Mozilla. Our primary goal was to investigate users’ emotional states and self-awareness while using this platform. We measured these effects using positive and negative affect schedule (PANAS) and Self-Assessment Questionnaire (SAQ). The experiment involved 12 adult participants who were volunteers from around the world with previous experience of VR.", "fno": "129800a437", "keywords": [ "Computer Aided Instruction", "Teaching", "Virtual Reality", "Self Awareness", "Social Virtual Reality Platforms", "Multiple Users", "Virtual Environment", "Different Application Areas", "Focus Group Study", "Social VR Platform", "Headphones", "Schedules", "Atmospheric Measurements", "Education", "Virtual Environments", "Particle Measurements", "Safety", "Virtual Reality", "Focus Group" ], "authors": [ { "affiliation": "University of Queensland", "fullName": "Patricia Manyuru", "givenName": "Patricia", "surname": "Manyuru", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Queensland", "fullName": "Chelsea Dobbins", "givenName": "Chelsea", "surname": "Dobbins", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Queensland", "fullName": "Benjamin Matthews", "givenName": "Benjamin", "surname": "Matthews", "__typename": "ArticleAuthorType" }, { "affiliation": "Bond University", "fullName": "Oliver Baumann", "givenName": "Oliver", "surname": "Baumann", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Queensland", "fullName": "Arindam Dey", "givenName": "Arindam", "surname": "Dey", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "437-438", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a435", "articleId": "1yeQLyb4LpC", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a439", "articleId": "1yeQPu8aFlm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504764", "title": "Visual feedback to improve the accessibility of head-mounted displays for persons with balance impairments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504764/12OmNy6qfPt", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892288", "title": "The impact of transitions on user experience in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892288/12OmNzUPptg", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504761", "title": "Avatar realism and social interaction quality in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504761/12OmNzdoMvk", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2017/3881/0/07957712", "title": "Effects of tracking scale on user performance in virtual reality games", "doi": null, "abstractUrl": "/proceedings-article/wevr/2017/07957712/12OmNzh5z54", "parentPublication": { "id": "proceedings/wevr/2017/3881/0", "title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09874769", "title": "Objects May Be Farther Than They Appear: Depth Compression Diminishes Over Time with Repeated Calibration in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09874769/1GjwLV9sW7m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/06/09984060", "title": "Teaching Social Virtual Reality With Ubiq", "doi": null, "abstractUrl": "/magazine/cg/2022/06/09984060/1J4y69NDkek", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a631", "title": "Gait Differences in the Real World and Virtual Reality: The Effect of Prior Virtual Reality Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a631/1JrRaogbK6I", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797858", "title": "Immersive EEG: Evaluating Electroencephalography in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797858/1cJ0JWkSE3m", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864578", "title": "Mono-Stereoscopic Camera in a Virtual Reality Environment: Case Study in Cybersickness", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864578/1e5Zs94AhSE", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a021", "title": "Promoting Reality Awareness in Virtual Reality through Proxemics", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a021/1tuAsajTP8c", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrMHOd6", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "acronym": "hicss", "groupId": "1000730", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNAkEU6t", "doi": "10.1109/HICSS.2016.55", "title": "Selecting Physiological Features for Predicting Bidding Behavior in Electronic Auctions", "normalizedTitle": "Selecting Physiological Features for Predicting Bidding Behavior in Electronic Auctions", "abstract": "Affective processes play an important role in determining human behavior in auctions. While previous research has shown that physiological measurements provide insights into these processes, it remains unclear which of the many features that can be computed from physiological data are particularly useful in predicting human behavior. Identifying these features is important for gaining a better understanding of affective processes in electronic auctions and for building biofeedback systems. In this study, we propose a new approach to identify physiological features for predicting auction behavior. We apply an Evolutionary Algorithm in combination with either the Multiple Linear Regression or Artificial Neural Network models to select physiological features and assess their predictive power. To test the approach, we use a unique dataset of participants' auction decisions and their synchronously recorded electrocardiography data. Our results show that the approach is able to identify subsets of physiological features that consistently outperform other physiological features.", "abstracts": [ { "abstractType": "Regular", "content": "Affective processes play an important role in determining human behavior in auctions. While previous research has shown that physiological measurements provide insights into these processes, it remains unclear which of the many features that can be computed from physiological data are particularly useful in predicting human behavior. Identifying these features is important for gaining a better understanding of affective processes in electronic auctions and for building biofeedback systems. In this study, we propose a new approach to identify physiological features for predicting auction behavior. We apply an Evolutionary Algorithm in combination with either the Multiple Linear Regression or Artificial Neural Network models to select physiological features and assess their predictive power. To test the approach, we use a unique dataset of participants' auction decisions and their synchronously recorded electrocardiography data. Our results show that the approach is able to identify subsets of physiological features that consistently outperform other physiological features.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Affective processes play an important role in determining human behavior in auctions. While previous research has shown that physiological measurements provide insights into these processes, it remains unclear which of the many features that can be computed from physiological data are particularly useful in predicting human behavior. Identifying these features is important for gaining a better understanding of affective processes in electronic auctions and for building biofeedback systems. In this study, we propose a new approach to identify physiological features for predicting auction behavior. We apply an Evolutionary Algorithm in combination with either the Multiple Linear Regression or Artificial Neural Network models to select physiological features and assess their predictive power. To test the approach, we use a unique dataset of participants' auction decisions and their synchronously recorded electrocardiography data. Our results show that the approach is able to identify subsets of physiological features that consistently outperform other physiological features.", "fno": "5670a396", "keywords": [ "Physiology", "Predictive Models", "Atmospheric Measurements", "Particle Measurements", "Electrocardiography", "Heart Rate", "Decision Support", "Electronic Auction", "Neuro IS", "Predicting", "Decision Making", "Bidding Behavior", "Physiology" ], "authors": [ { "affiliation": null, "fullName": "Marius B. Müller", "givenName": "Marius B.", "surname": "Müller", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Marc T. P. Adam", "givenName": "Marc T. P.", "surname": "Adam", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "David J. Cornforth", "givenName": "David J.", "surname": "Cornforth", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Raymond Chiong", "givenName": "Raymond", "surname": "Chiong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jan Krämer", "givenName": "Jan", "surname": "Krämer", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Christof Weinhardt", "givenName": "Christof", "surname": "Weinhardt", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2016-01-01T00:00:00", "pubType": "proceedings", "pages": "396-405", "year": "2016", "issn": "1530-1605", "isbn": "978-0-7695-5670-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5670a386", "articleId": "12OmNx7ouSb", "__typename": "AdjacentArticleType" }, "next": { "fno": "5670a406", "articleId": "12OmNB9bvjd", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/compsacw/2014/3578/0/3578a258", "title": "Physiological Mouse: Towards an Emotion-Aware Mouse", "doi": null, "abstractUrl": "/proceedings-article/compsacw/2014/3578a258/12OmNAoDicJ", "parentPublication": { "id": "proceedings/compsacw/2014/3578/0", "title": "2014 IEEE 38th International Computer Software and Applications Conference Workshops (COMPSACW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmas/2000/0625/0/06250399", "title": "BiddingBot: A Multiagent Support System for Cooperative Bidding in Multiple Auctions", "doi": null, "abstractUrl": "/proceedings-article/icmas/2000/06250399/12OmNBcAGM5", "parentPublication": { "id": "proceedings/icmas/2000/0625/0", "title": "Multi-Agent Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a429", "title": "Neuroticism, Extraversion and Stress: Physiological Correlates", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a429/12OmNC4wtJx", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2009/3596/0/3596a928", "title": "Analysing Bidding Trends in Online Auctions", "doi": null, "abstractUrl": "/proceedings-article/itng/2009/3596a928/12OmNCwUmwb", "parentPublication": { "id": "proceedings/itng/2009/3596/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2014/02/06825835", "title": "Emotion Recognition Based on Multi-Variant Correlation of Physiological Signals", "doi": null, "abstractUrl": "/journal/ta/2014/02/06825835/13rRUIJuxnO", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/04/08326711", "title": "Physiological Detection of Affective States in Children with Autism Spectrum Disorder", "doi": null, "abstractUrl": "/journal/ta/2020/04/08326711/13rRUyYBlfe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2018/3227/0/08480086", "title": "The Impact of Physical Activities on the Physiological Response to Emotions", "doi": null, "abstractUrl": "/proceedings-article/percomw/2018/08480086/17D45WLdYQe", "parentPublication": { "id": "proceedings/percomw/2018/3227/0", "title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873958", "title": "Characterizing Physiological Responses to Fear, Frustration, and Insight in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873958/1GjwGGW9cSA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2019/3891/0/08925063", "title": "Physiological Signal- Driven Camera Using EOG, EEG, and ECG", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2019/08925063/1fHFees3Aas", "parentPublication": { "id": "proceedings/aciiw/2019/3891/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09229513", "title": "Estimating Affective Taste Experience Using Combined Implicit Behavioral and Neurophysiological Measures", "doi": null, "abstractUrl": "/journal/ta/2023/01/09229513/1o3nfbzpzhe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCcKQAf", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "acronym": "vs-games", "groupId": "1002788", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNqJHFDH", "doi": "10.1109/VS-GAMES.2017.8056587", "title": "The effect of cognitive load on physiological arousal in a decision-making serious game", "normalizedTitle": "The effect of cognitive load on physiological arousal in a decision-making serious game", "abstract": "The aim of this paper is to investigate how a substantial cognitive load overshadows the physiological arousal effect, in an attempt to study cognitive abilities of participants engaged on decision-making tasks in serious games. Participants were engaged in a dynamic serious game environment displaying online biofeedback based on the physiological measurements of arousal. The pupil diameter was analyzed in relation to the heart rate during a challenging decision-making task. It was found that the moment when a substantial cognitive load overshadows the physiological arousal effect is observable on the pupil diameter in relation to the heart rate.", "abstracts": [ { "abstractType": "Regular", "content": "The aim of this paper is to investigate how a substantial cognitive load overshadows the physiological arousal effect, in an attempt to study cognitive abilities of participants engaged on decision-making tasks in serious games. Participants were engaged in a dynamic serious game environment displaying online biofeedback based on the physiological measurements of arousal. The pupil diameter was analyzed in relation to the heart rate during a challenging decision-making task. It was found that the moment when a substantial cognitive load overshadows the physiological arousal effect is observable on the pupil diameter in relation to the heart rate.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The aim of this paper is to investigate how a substantial cognitive load overshadows the physiological arousal effect, in an attempt to study cognitive abilities of participants engaged on decision-making tasks in serious games. Participants were engaged in a dynamic serious game environment displaying online biofeedback based on the physiological measurements of arousal. The pupil diameter was analyzed in relation to the heart rate during a challenging decision-making task. It was found that the moment when a substantial cognitive load overshadows the physiological arousal effect is observable on the pupil diameter in relation to the heart rate.", "fno": "08056587", "keywords": [ "Physiology", "Games", "Biological Control Systems", "Atmospheric Measurements", "Particle Measurements", "Serious Games", "Physiology", "Electrocardiogram", "Pupil Diameter", "Arousal", "Cognitive Load" ], "authors": [ { "affiliation": "Department of Creative Technologies, Blekinge Institute of Technology, Karlskrona, Sweden", "fullName": "Petar Jerčič", "givenName": "Petar", "surname": "Jerčič", "__typename": "ArticleAuthorType" }, { "affiliation": "CSIRO Mineral Resources, Technology Court, Pullenvale, Australia", "fullName": "Charlotte Sennersten", "givenName": "Charlotte", "surname": "Sennersten", "__typename": "ArticleAuthorType" }, { "affiliation": "Intelligent Sensing and Systems Laboratory, CSIRO ICT Centre, Hobart, Australia", "fullName": "Craig Lindley", "givenName": "Craig", "surname": "Lindley", "__typename": "ArticleAuthorType" } ], "idPrefix": "vs-games", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-09-01T00:00:00", "pubType": "proceedings", "pages": "153-156", "year": "2017", "issn": "2474-0489", "isbn": "978-1-5090-5812-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08056586", "articleId": "12OmNxzuMIl", "__typename": "AdjacentArticleType" }, "next": { "fno": "08056588", "articleId": "12OmNz61d7s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2016/5670/0/5670a396", "title": "Selecting Physiological Features for Predicting Bidding Behavior in Electronic Auctions", "doi": null, "abstractUrl": "/proceedings-article/hicss/2016/5670a396/12OmNAkEU6t", "parentPublication": { "id": "proceedings/hicss/2016/5670/0", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a289", "title": "Heart Rate Variability and Skin Conductance Biofeedback: A Triple-Blind Randomized Controlled Study", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a289/12OmNAtK4n3", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273576", "title": "Effects of valence and arousal on working memory performance in virtual reality gaming", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273576/12OmNvonIJv", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2017/3800/0/3800a486", "title": "Deep Physiological Arousal Detection in a Driving Simulator Using Wearable Sensors", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2017/3800a486/12OmNz61diU", "parentPublication": { "id": "proceedings/icdmw/2017/3800/0", "title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2022/1647/0/09767281", "title": "An Exploratory Analysis of Interactive VR-Based Framework for Multi-Componential Analysis of Emotion", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2022/09767281/1Df82pGW23e", "parentPublication": { "id": "proceedings/percom-workshops/2022/1647/0", "title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873958", "title": "Characterizing Physiological Responses to Fear, Frustration, and Insight in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873958/1GjwGGW9cSA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/10093978", "title": "Emotion Arousal Assessment Based on Multimodal Physiological Signals for Game Users", "doi": null, "abstractUrl": "/journal/ta/5555/01/10093978/1M80DcMUM7e", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2019/3888/0/08925531", "title": "Engineering Music to Slow Breathing and Invite Relaxed Physiology", "doi": null, "abstractUrl": "/proceedings-article/acii/2019/08925531/1fHGEL5fRde", "parentPublication": { "id": "proceedings/acii/2019/3888/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09229513", "title": "Estimating Affective Taste Experience Using Combined Implicit Behavioral and Neurophysiological Measures", "doi": null, "abstractUrl": "/journal/ta/2023/01/09229513/1o3nfbzpzhe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09290431", "title": "Scalability of Network Visualisation from a Cognitive Load Perspective", "doi": null, "abstractUrl": "/journal/tg/2021/02/09290431/1prKPEmGFPO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzBOhX1", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "acronym": "acii", "groupId": "1002992", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNya72tj", "doi": "10.1109/ACII.2013.75", "title": "Multimodal Expressions of Stress during a Public Speaking Task: Collection, Annotation and Global Analyses", "normalizedTitle": "Multimodal Expressions of Stress during a Public Speaking Task: Collection, Annotation and Global Analyses", "abstract": "Databases of spontaneous multimodal expressions of affective states occurring during a task are few. This paper presents a protocol for eliciting stress in a public speaking task. Behaviors of 19 participants were recorded via a multimodal setup including speech, video of the facial expressions and body movements, balance via a force plate, and physiological measures. Questionnaires were used to assert emotional states, personality profiles and relevant coping behaviors to study how participants cope with stressful situations. Several subjective and objective performances were also evaluated. Results show a significant impact of the overall task and conditions on the participants' emotional activation. The possible future use of this new multimodal emotional corpus is described.", "abstracts": [ { "abstractType": "Regular", "content": "Databases of spontaneous multimodal expressions of affective states occurring during a task are few. This paper presents a protocol for eliciting stress in a public speaking task. Behaviors of 19 participants were recorded via a multimodal setup including speech, video of the facial expressions and body movements, balance via a force plate, and physiological measures. Questionnaires were used to assert emotional states, personality profiles and relevant coping behaviors to study how participants cope with stressful situations. Several subjective and objective performances were also evaluated. Results show a significant impact of the overall task and conditions on the participants' emotional activation. The possible future use of this new multimodal emotional corpus is described.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Databases of spontaneous multimodal expressions of affective states occurring during a task are few. This paper presents a protocol for eliciting stress in a public speaking task. Behaviors of 19 participants were recorded via a multimodal setup including speech, video of the facial expressions and body movements, balance via a force plate, and physiological measures. Questionnaires were used to assert emotional states, personality profiles and relevant coping behaviors to study how participants cope with stressful situations. Several subjective and objective performances were also evaluated. Results show a significant impact of the overall task and conditions on the participants' emotional activation. The possible future use of this new multimodal emotional corpus is described.", "fno": "5048a417", "keywords": [ "Stress", "Atmospheric Measurements", "Particle Measurements", "Databases", "Physiology", "Psychology", "Public Speaking", "Individual Differences", "Databases", "Stress", "Emotion", "Multimodality" ], "authors": [ { "affiliation": "LIMSI, Orsay, France", "fullName": "Tom Giraud", "givenName": "Tom", "surname": "Giraud", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Orsay, France", "fullName": "Mariette Soury", "givenName": "Mariette", "surname": "Soury", "__typename": "ArticleAuthorType" }, { "affiliation": "CIAMS, Univ. Paris-Sud, Orsay, France", "fullName": "Jiewen Hua", "givenName": null, "surname": "Jiewen Hua", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Orsay, France", "fullName": "Agnes Delaborde", "givenName": "Agnes", "surname": "Delaborde", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Orsay, France", "fullName": "Marie Tahon", "givenName": "Marie", "surname": "Tahon", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Orsay, France", "fullName": "David Antonio Gomez Jauregui", "givenName": "David Antonio", "surname": "Gomez Jauregui", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Orsay, France", "fullName": "Victoria Eyharabide", "givenName": "Victoria", "surname": "Eyharabide", "__typename": "ArticleAuthorType" }, { "affiliation": "CIAMS, Univ. Paris-Sud, Orsay, France", "fullName": "Edith Filaire", "givenName": "Edith", "surname": "Filaire", "__typename": "ArticleAuthorType" }, { "affiliation": "CIAMS, Univ. Paris-Sud, Orsay, France", "fullName": "Christine Le Scanff", "givenName": "Christine", "surname": "Le Scanff", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Univ. Paris-Sorbonne, Orsay, France", "fullName": "Laurence Devillers", "givenName": "Laurence", "surname": "Devillers", "__typename": "ArticleAuthorType" }, { "affiliation": "CIAMS, Univ. Paris-Sud, Orsay, France", "fullName": "Brice Isableu", "givenName": "Brice", "surname": "Isableu", "__typename": "ArticleAuthorType" }, { "affiliation": "LIMSI, Orsay, France", "fullName": "Jean Claude Martin", "givenName": "Jean Claude", "surname": "Martin", "__typename": "ArticleAuthorType" } ], "idPrefix": "acii", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-09-01T00:00:00", "pubType": "proceedings", "pages": "417-422", "year": "2013", "issn": "2156-8103", "isbn": "978-0-7695-5048-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5048a411", "articleId": "12OmNyFU79Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "5048a423", "articleId": "12OmNzCF4Xq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2013/5048/0/5048a405", "title": "Multimodal Emotion Expressions of Virtual Agents, Mimic and Vocal Emotion Expressions and Their Effects on Emotion Recognition", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a405/12OmNBSSVck", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2015/9953/0/07344601", "title": "Utilizing multimodal cues to automatically evaluate public speaking performance", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344601/12OmNwCsdFr", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a529", "title": "Stress Detection from Audio on Multiple Window Analysis Size in a Public Speaking Task", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a529/12OmNy2Jt0n", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/02/08122047", "title": "Computational Study of Primitive Emotional Contagion in Dyadic Interactions", "doi": null, "abstractUrl": "/journal/ta/2020/02/08122047/13rRUwgQpBw", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0/751800a046", "title": "Correlation Analyses Between Personality Traits and Personal Behaviors Under Specific Emotion States Using Physiological Data from Wearable Devices", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2018/751800a046/17D45Xq6dzr", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0", "title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900c459", "title": "Video-based multimodal spontaneous emotion recognition using facial expressions and physiological signals", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900c459/1G56VA4lzxu", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2019/3888/0/08925485", "title": "Context matters: protocol ordering effects on physiological arousal and experienced stress during a simulated driving task", "doi": null, "abstractUrl": "/proceedings-article/acii/2019/08925485/1fHGCTzVUBy", "parentPublication": { "id": "proceedings/acii/2019/3888/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089655", "title": "The Effects of Virtual Audience Size on Social Anxiety during Public Speaking", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089655/1jIxeyU0vGo", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/03/09311251", "title": "Exploring Individual Differences of Public Speaking Anxiety in Real-Life and Virtual Presentations", "doi": null, "abstractUrl": "/journal/ta/2022/03/09311251/1pYWAX0Po6A", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09346017", "title": "UBFC-Phys: A Multimodal Database For Psychophysiological Studies of Social Stress", "doi": null, "abstractUrl": "/journal/ta/2023/01/09346017/1qV2oCT26dy", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiq0", "title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "acronym": "percomw", "groupId": "1000552", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WLdYQe", "doi": "10.1109/PERCOMW.2018.8480086", "title": "The Impact of Physical Activities on the Physiological Response to Emotions", "normalizedTitle": "The Impact of Physical Activities on the Physiological Response to Emotions", "abstract": "Despite the advantages of using physiological sensors to collect emotion data, emotion recognition systems using physiological signals such as Electrodermal Activity (EDA), Electrocardiogram (ECG) or Electromyography (EMG) are mainly tested in controlled environments or under laboratory conditions. The use of physiological data in real-world scenarios has not been widely investigated. One of the main issues of using physiological data from real-world scenarios is that the data may also be influenced by movement and in some cases, the physiological response to emotions can be even confused with the one due to physical activities, such as walking or running. In this paper, we investigate the impact of physical activities in the recognition of emotions and provide new insights on how emotion data from physiological sensors are affected by these activities. We use two scenarios (one with and one without the influence of physical movement) to investigate the effect of physical activities in the Blood Volume Pulse (BVP) and the Skin Temperature (TMP) signals. To overcome these issues we used a random forest algorithm to model both scenarios. Our results show that by combining emotion data from both scenarios, we can achieve a recognition accuracy of up to 96%.", "abstracts": [ { "abstractType": "Regular", "content": "Despite the advantages of using physiological sensors to collect emotion data, emotion recognition systems using physiological signals such as Electrodermal Activity (EDA), Electrocardiogram (ECG) or Electromyography (EMG) are mainly tested in controlled environments or under laboratory conditions. The use of physiological data in real-world scenarios has not been widely investigated. One of the main issues of using physiological data from real-world scenarios is that the data may also be influenced by movement and in some cases, the physiological response to emotions can be even confused with the one due to physical activities, such as walking or running. In this paper, we investigate the impact of physical activities in the recognition of emotions and provide new insights on how emotion data from physiological sensors are affected by these activities. We use two scenarios (one with and one without the influence of physical movement) to investigate the effect of physical activities in the Blood Volume Pulse (BVP) and the Skin Temperature (TMP) signals. To overcome these issues we used a random forest algorithm to model both scenarios. Our results show that by combining emotion data from both scenarios, we can achieve a recognition accuracy of up to 96%.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Despite the advantages of using physiological sensors to collect emotion data, emotion recognition systems using physiological signals such as Electrodermal Activity (EDA), Electrocardiogram (ECG) or Electromyography (EMG) are mainly tested in controlled environments or under laboratory conditions. The use of physiological data in real-world scenarios has not been widely investigated. One of the main issues of using physiological data from real-world scenarios is that the data may also be influenced by movement and in some cases, the physiological response to emotions can be even confused with the one due to physical activities, such as walking or running. In this paper, we investigate the impact of physical activities in the recognition of emotions and provide new insights on how emotion data from physiological sensors are affected by these activities. We use two scenarios (one with and one without the influence of physical movement) to investigate the effect of physical activities in the Blood Volume Pulse (BVP) and the Skin Temperature (TMP) signals. To overcome these issues we used a random forest algorithm to model both scenarios. Our results show that by combining emotion data from both scenarios, we can achieve a recognition accuracy of up to 96%.", "fno": "08480086", "keywords": [ "Physiology", "Emotion Recognition", "Sensors", "Electromyography", "Electrocardiography", "Cutoff Frequency", "Atmospheric Measurements" ], "authors": [ { "affiliation": "Chair for Communication Technology (ComTec) University of Kassel, Kassel, Germany", "fullName": "Judith S. Heinisch", "givenName": "Judith S.", "surname": "Heinisch", "__typename": "ArticleAuthorType" }, { "affiliation": "Chair for Communication Technology (ComTec) University of Kassel, Kassel, Germany", "fullName": "Isabel Hübener", "givenName": "Isabel", "surname": "Hübener", "__typename": "ArticleAuthorType" }, { "affiliation": "Chair for Communication Technology (ComTec) University of Kassel, Kassel, Germany", "fullName": "Klaus David", "givenName": "Klaus", "surname": "David", "__typename": "ArticleAuthorType" } ], "idPrefix": "percomw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "824-829", "year": "2018", "issn": null, "isbn": "978-1-5386-3227-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08480412", "articleId": "17D45WGGoMY", "__typename": "AdjacentArticleType" }, "next": { "fno": "08480090", "articleId": "17D45Vw15w0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isspit/2009/5949/0/05407547", "title": "Hilbert-Huang transform based physiological signals analysis for emotion recognition", "doi": null, "abstractUrl": "/proceedings-article/isspit/2009/05407547/12OmNxA3YZi", "parentPublication": { "id": "proceedings/isspit/2009/5949/0", "title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcomp/2014/5711/0/07043860", "title": "Physiological-based emotion recognition with IRS model", "doi": null, "abstractUrl": "/proceedings-article/smartcomp/2014/07043860/12OmNxFsmnm", "parentPublication": { "id": "proceedings/smartcomp/2014/5711/0", "title": "2014 International Conference on Smart Computing (SMARTCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2012/4779/0/4779a592", "title": "Physiological Angry Emotion Detection Using Support Vector Regression", "doi": null, "abstractUrl": "/proceedings-article/nbis/2012/4779a592/12OmNzC5T7s", "parentPublication": { "id": "proceedings/nbis/2012/4779/0", "title": "2012 15th International Conference on Network-Based Information Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/03/07010926", "title": "DECAF: MEG-Based Multimodal Database for Decoding Affective Physiological Responses", "doi": null, "abstractUrl": "/journal/ta/2015/03/07010926/13rRUwdrdR8", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/02/08249871", "title": "Deep Physiological Affect Network for the Recognition of Human Emotions", "doi": null, "abstractUrl": "/journal/ta/2020/02/08249871/13rRUwjoNvg", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/04/08326711", "title": "Physiological Detection of Affective States in Children with Autism Spectrum Disorder", "doi": null, "abstractUrl": "/journal/ta/2020/04/08326711/13rRUyYBlfe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2017/4338/0/07917586", "title": "From the lab to the real-world: An investigation on the influence of human movement on Emotion Recognition using physiological signals", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2017/07917586/19wALomZQgo", "parentPublication": { "id": "proceedings/percom-workshops/2017/4338/0", "title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/10093978", "title": "Emotion Arousal Assessment Based on Multimodal Physiological Signals for Game Users", "doi": null, "abstractUrl": "/journal/ta/5555/01/10093978/1M80DcMUM7e", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2019/9151/0/08730725", "title": "Angry or Climbing Stairs? Towards Physiological Emotion Recognition in the Wild", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2019/08730725/1aDSE3CbIru", "parentPublication": { "id": "proceedings/percom-workshops/2019/9151/0", "title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2020/2903/0/09153878", "title": "Implementation of physiological signal based emotion recognition algorithm", "doi": null, "abstractUrl": "/proceedings-article/icde/2020/09153878/1lZEbFXU1H2", "parentPublication": { "id": "proceedings/icde/2020/2903/0", "title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiqL", "title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "acronym": "dasc-picom-datacom-cyberscitech", "groupId": "1001364", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45Xq6dzr", "doi": "10.1109/DASC/PiCom/DataCom/CyberSciTec.2018.00023", "title": "Correlation Analyses Between Personality Traits and Personal Behaviors Under Specific Emotion States Using Physiological Data from Wearable Devices", "normalizedTitle": "Correlation Analyses Between Personality Traits and Personal Behaviors Under Specific Emotion States Using Physiological Data from Wearable Devices", "abstract": "In addition to the computational ability, human-like characteristics such as behavior, emotion, and personality can also be augmented to many personalized computers, applications robots and other systems. In recent years, there are many studies about human characteristics by using rich personal data collected from information systems and ubiquitous devices such as wearables. Besides separated studies on each aspect of human behavior, emotion and personality by using the personal data, it is also necessary to further study various relationships among these human characteristics. Therefore, this research is to examine how personality traits are associated with personal behaviors under specific emotional states based on physiological data collected from three wearable devices, Emotive Insight, Spire Stone and Huawei Fit Watch. Experimental data was gathered from 50 participants subjected to; a Big Five Inventory (BFI) questionnaire to get their personality traits, presenting before a crowd and/or watching a movie where physiological data measured by wearables. Attributes of personal behavior, e.g. blink, wink, surprise, furrow, smile and clench, are analyzed correlatively with the participants' personality traits under respective emotion states of excitement, relaxation, stress, engagement, interest and focus. Finally, we identify significant attribute correlations and find that correlations between the personality traits and the personal behaviors are greatly depended on the emotional states.", "abstracts": [ { "abstractType": "Regular", "content": "In addition to the computational ability, human-like characteristics such as behavior, emotion, and personality can also be augmented to many personalized computers, applications robots and other systems. In recent years, there are many studies about human characteristics by using rich personal data collected from information systems and ubiquitous devices such as wearables. Besides separated studies on each aspect of human behavior, emotion and personality by using the personal data, it is also necessary to further study various relationships among these human characteristics. Therefore, this research is to examine how personality traits are associated with personal behaviors under specific emotional states based on physiological data collected from three wearable devices, Emotive Insight, Spire Stone and Huawei Fit Watch. Experimental data was gathered from 50 participants subjected to; a Big Five Inventory (BFI) questionnaire to get their personality traits, presenting before a crowd and/or watching a movie where physiological data measured by wearables. Attributes of personal behavior, e.g. blink, wink, surprise, furrow, smile and clench, are analyzed correlatively with the participants' personality traits under respective emotion states of excitement, relaxation, stress, engagement, interest and focus. Finally, we identify significant attribute correlations and find that correlations between the personality traits and the personal behaviors are greatly depended on the emotional states.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In addition to the computational ability, human-like characteristics such as behavior, emotion, and personality can also be augmented to many personalized computers, applications robots and other systems. In recent years, there are many studies about human characteristics by using rich personal data collected from information systems and ubiquitous devices such as wearables. Besides separated studies on each aspect of human behavior, emotion and personality by using the personal data, it is also necessary to further study various relationships among these human characteristics. Therefore, this research is to examine how personality traits are associated with personal behaviors under specific emotional states based on physiological data collected from three wearable devices, Emotive Insight, Spire Stone and Huawei Fit Watch. Experimental data was gathered from 50 participants subjected to; a Big Five Inventory (BFI) questionnaire to get their personality traits, presenting before a crowd and/or watching a movie where physiological data measured by wearables. Attributes of personal behavior, e.g. blink, wink, surprise, furrow, smile and clench, are analyzed correlatively with the participants' personality traits under respective emotion states of excitement, relaxation, stress, engagement, interest and focus. Finally, we identify significant attribute correlations and find that correlations between the personality traits and the personal behaviors are greatly depended on the emotional states.", "fno": "751800a046", "keywords": [ "Behavioural Sciences Computing", "Data Privacy", "Emotion Recognition", "Human Computer Interaction", "Physiology", "Wearable Computers", "Emotive Insight", "Spire Stone", "Huawei Fit Watch", "Big Five Inventory", "BFI", "Rich Personal Data", "Personalized Computers", "Specific Emotion States", "Respective Emotion States", "Participants", "Wearable Devices", "Physiological Data", "Specific Emotional States", "Personal Behaviors", "Personality Traits", "Human Characteristics", "Human Behavior", "Correlation", "Stress", "Motion Pictures", "Physiology", "Brain Modeling", "Heart Rate", "Atmospheric Measurements", "Wearable Devices", "Personality Traits", "Emotion States", "Personal Behavior", "Correlation" ], "authors": [ { "affiliation": null, "fullName": "Ruiying Cai", "givenName": "Ruiying", "surname": "Cai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ao Guo", "givenName": "Ao", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianhua Ma", "givenName": "Jianhua", "surname": "Ma", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Runhe Huang", "givenName": "Runhe", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ruiyun Yu", "givenName": "Ruiyun", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chen Yang", "givenName": "Chen", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "dasc-picom-datacom-cyberscitech", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-08-01T00:00:00", "pubType": "proceedings", "pages": "46-53", "year": "2018", "issn": null, "isbn": "978-1-5386-7518-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "751800a041", "articleId": "17D45WODaoJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "751800a054", "articleId": "17D45VN31hk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2015/6026/1/07163100", "title": "Inference of personality traits and affect schedule by analysis of spontaneous reactions to affective videos", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163100/12OmNAJDByz", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/agile/2006/2562/0/25620089", "title": "Critical Personality Traits in Successful Pair Programming", "doi": null, "abstractUrl": "/proceedings-article/agile/2006/25620089/12OmNBiygtm", "parentPublication": { "id": "proceedings/agile/2006/2562/0", "title": "AGILE 2006", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2016/4470/0/4470a288", "title": "Knowledge-Driven Approach to Predict Personality Traits by Leveraging Social Media Data", "doi": null, "abstractUrl": "/proceedings-article/wi/2016/4470a288/12OmNvTjZRz", "parentPublication": { "id": "proceedings/wi/2016/4470/0", "title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/02/07736040", "title": "ASCERTAIN: Emotion and Personality Recognition Using Commercial Sensors", "doi": null, "abstractUrl": "/journal/ta/2018/02/07736040/13rRUyY2938", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/03/08241761", "title": "Emotion Analysis for Personality Inference from EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2018/03/08241761/13rRUytF47R", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2019/07/08747565", "title": "Predicting Personality Traits From Physical Activity Intensity", "doi": null, "abstractUrl": "/magazine/co/2019/07/08747565/1bcFjqwvpLO", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2019/0990/0/08909839", "title": "Personality Traits Classification on Twitter", "doi": null, "abstractUrl": "/proceedings-article/avss/2019/08909839/1febNVOlteg", "parentPublication": { "id": "proceedings/avss/2019/0990/0", "title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2019/1867/0/08983342", "title": "Using an Affective Computing Taxonomy Management System to Support Data Management in Personality Traits", "doi": null, "abstractUrl": "/proceedings-article/bibm/2019/08983342/1hgu14J2Yve", "parentPublication": { "id": "proceedings/bibm/2019/1867/0", "title": "2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047374", "title": "Facial-Based Personality Prediction Models for Estimating Individuals Private Traits", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047374/1iC6BNzuL3G", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0", "title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2021/0424/0/09430936", "title": "Investigation of Relation between Physiological Responses and Personality during Stress Recovery", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2021/09430936/1tRORzZ0Mzm", "parentPublication": { "id": "proceedings/percom-workshops/2021/0424/0", "title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiru", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45Xtvp9d", "doi": "10.1109/CVPRW.2018.00173", "title": "Advertisement Effectiveness Estimation Based on Crowdsourced Multimodal Affective Responses", "normalizedTitle": "Advertisement Effectiveness Estimation Based on Crowdsourced Multimodal Affective Responses", "abstract": "In this paper, we estimate the effectiveness of an advertisement using online data collection and the remote measurement of facial expressions and physiological responses. Recently, the online advertisement market has expanded, and the measurement of advertisement effectiveness has become very important. We collected a significant number of videos of Japanese faces watching video advertisements in the same scenario in which media is normally used via the Internet. Facial expression and physiological responses such as heart rate and gaze were remotely measured by analyzing facial videos. By combining the measured responses into multimodal features and using machine learning, we show that ad liking can be predicted (ROC AUC = 0.93) better than when only single-mode features are used. Furthermore, intent to purchase can be estimated well (ROC AUC = 0.91) using multimodal features.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we estimate the effectiveness of an advertisement using online data collection and the remote measurement of facial expressions and physiological responses. Recently, the online advertisement market has expanded, and the measurement of advertisement effectiveness has become very important. We collected a significant number of videos of Japanese faces watching video advertisements in the same scenario in which media is normally used via the Internet. Facial expression and physiological responses such as heart rate and gaze were remotely measured by analyzing facial videos. By combining the measured responses into multimodal features and using machine learning, we show that ad liking can be predicted (ROC AUC = 0.93) better than when only single-mode features are used. Furthermore, intent to purchase can be estimated well (ROC AUC = 0.91) using multimodal features.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we estimate the effectiveness of an advertisement using online data collection and the remote measurement of facial expressions and physiological responses. Recently, the online advertisement market has expanded, and the measurement of advertisement effectiveness has become very important. We collected a significant number of videos of Japanese faces watching video advertisements in the same scenario in which media is normally used via the Internet. Facial expression and physiological responses such as heart rate and gaze were remotely measured by analyzing facial videos. By combining the measured responses into multimodal features and using machine learning, we show that ad liking can be predicted (ROC AUC = 0.93) better than when only single-mode features are used. Furthermore, intent to purchase can be estimated well (ROC AUC = 0.91) using multimodal features.", "fno": "610000b344", "keywords": [ "Advertising", "Consumer Behaviour", "Emotion Recognition", "Face Recognition", "Internet", "Learning Artificial Intelligence", "Purchasing", "Video Signal Processing", "Physiological Responses", "Online Advertisement Market", "Japanese Faces", "Video Advertisements", "Facial Expression", "Heart Rate", "Facial Videos", "Measured Responses", "Multimodal Features", "Advertisement Effectiveness Estimation", "Crowdsourced Multimodal Affective Responses", "Online Data Collection", "Remote Measurement", "Machine Learning", "Videos", "Physiology", "Internet", "Heart Rate", "Laboratories", "Atmospheric Measurements", "Particle Measurements" ], "authors": [ { "affiliation": null, "fullName": "Genki Okada", "givenName": "Genki", "surname": "Okada", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kenta Masui", "givenName": "Kenta", "surname": "Masui", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Norimichi Tsumura", "givenName": "Norimichi", "surname": "Tsumura", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "1344-13448", "year": "2018", "issn": null, "isbn": "978-1-5386-6100-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "610000b335", "articleId": "17D45VTRoE3", "__typename": "AdjacentArticleType" }, "next": { "fno": "610000b353", "articleId": "17D45WWzW7f", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2015/6026/1/07163100", "title": "Inference of personality traits and affect schedule by analysis of spontaneous reactions to affective videos", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163100/12OmNAJDByz", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a582", "title": "User-centric Affective Video Tagging from MEG and Peripheral Physiological Responses", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a582/12OmNrkjVpA", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a369", "title": "Measuring Voter's Candidate Preference Based on Affective Responses to Election Debates", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a369/12OmNzuIjn7", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/03/07010926", "title": "DECAF: MEG-Based Multimodal Database for Decoding Affective Physiological Responses", "doi": null, "abstractUrl": "/journal/ta/2015/03/07010926/13rRUwdrdR8", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/03/06991558", "title": "Predicting Ad Liking and Purchase Intent: Large-Scale Analysis of Facial Responses to Ads", "doi": null, "abstractUrl": "/journal/ta/2015/03/06991558/13rRUzphDwh", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873958", "title": "Characterizing Physiological Responses to Fear, Frustration, and Insight in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873958/1GjwGGW9cSA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/10015779", "title": "Automated Classification of Dyadic Conversation Scenarios using Autonomic Nervous System Responses", "doi": null, "abstractUrl": "/journal/ta/5555/01/10015779/1JSl18dZbxK", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151089", "title": "Continuous estimation of emotional change usingmultimodal affective responses", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151089/1lPHcgXNFba", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/03/09187561", "title": "Persuasion-Induced Physiology as Predictor of Persuasion Effectiveness", "doi": null, "abstractUrl": "/journal/ta/2022/03/09187561/1mVFghLI8wM", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09229513", "title": "Estimating Affective Taste Experience Using Combined Implicit Behavioral and Neurophysiological Measures", "doi": null, "abstractUrl": "/journal/ta/2023/01/09229513/1o3nfbzpzhe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1nkDclx75Kg", "title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)", "acronym": "compsac", "groupId": "1000143", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1nkDoR6CPtu", "doi": "10.1109/COMPSAC48688.2020.00-86", "title": "Reconstructing Compound Affective States using Physiological Sensor Data", "normalizedTitle": "Reconstructing Compound Affective States using Physiological Sensor Data", "abstract": "The human affective state is a product of complex biological processes and environmental stimuli. Situation aware systems aim at identifying the affective state of an individual using data from a gamut of connected devices. The bottle necks for such systems include continuous data streams, mobility of the data collection apparatus, device ubiquity and the device cost. While there is research done using physiological sensors that can overcome these challenges, their accuracy is often dismal and the results are not granular, i.e. the affective state is singular. In this paper we present results from an experiment that enabled us to generate models to identify an individuals affective state as a mixture of emotional states and their respective activation's. Secondly, we show that the affective state of an individual is actually a mixture of emotional states ( amusement, anger, neutral, sad, fear and disgust). During an experimental study, 85 participants were induced with specific emotions using audio-visual stimulus. Physiological data including heart rate, blood volume pressure (BVP), inter beat interval(IBI) and electrodermal activity(EDA) along with a self-report indicating the levels of 6 emotional states that include Amusement, Anger, Sad, Disgust, Fear and Neutral was recorded. Additionally, we recorded a self-reported score for Anxiety. The videos used to induce emotions were validated in a recently published study in Psychology. The data collected was used to create models that identify the dominant emotional state and the emotional spectrum (activation levels of all emotional states) for an individual. We create a map between the physiological data and the dominant emotional state and also between physiological data and the self-report scores. In addition, we identify often overlooked characteristics of human emotion such as variability in perception and overlap of emotional states and finally create a topological map of emotional states based on physiological data.", "abstracts": [ { "abstractType": "Regular", "content": "The human affective state is a product of complex biological processes and environmental stimuli. Situation aware systems aim at identifying the affective state of an individual using data from a gamut of connected devices. The bottle necks for such systems include continuous data streams, mobility of the data collection apparatus, device ubiquity and the device cost. While there is research done using physiological sensors that can overcome these challenges, their accuracy is often dismal and the results are not granular, i.e. the affective state is singular. In this paper we present results from an experiment that enabled us to generate models to identify an individuals affective state as a mixture of emotional states and their respective activation's. Secondly, we show that the affective state of an individual is actually a mixture of emotional states ( amusement, anger, neutral, sad, fear and disgust). During an experimental study, 85 participants were induced with specific emotions using audio-visual stimulus. Physiological data including heart rate, blood volume pressure (BVP), inter beat interval(IBI) and electrodermal activity(EDA) along with a self-report indicating the levels of 6 emotional states that include Amusement, Anger, Sad, Disgust, Fear and Neutral was recorded. Additionally, we recorded a self-reported score for Anxiety. The videos used to induce emotions were validated in a recently published study in Psychology. The data collected was used to create models that identify the dominant emotional state and the emotional spectrum (activation levels of all emotional states) for an individual. We create a map between the physiological data and the dominant emotional state and also between physiological data and the self-report scores. In addition, we identify often overlooked characteristics of human emotion such as variability in perception and overlap of emotional states and finally create a topological map of emotional states based on physiological data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The human affective state is a product of complex biological processes and environmental stimuli. Situation aware systems aim at identifying the affective state of an individual using data from a gamut of connected devices. The bottle necks for such systems include continuous data streams, mobility of the data collection apparatus, device ubiquity and the device cost. While there is research done using physiological sensors that can overcome these challenges, their accuracy is often dismal and the results are not granular, i.e. the affective state is singular. In this paper we present results from an experiment that enabled us to generate models to identify an individuals affective state as a mixture of emotional states and their respective activation's. Secondly, we show that the affective state of an individual is actually a mixture of emotional states ( amusement, anger, neutral, sad, fear and disgust). During an experimental study, 85 participants were induced with specific emotions using audio-visual stimulus. Physiological data including heart rate, blood volume pressure (BVP), inter beat interval(IBI) and electrodermal activity(EDA) along with a self-report indicating the levels of 6 emotional states that include Amusement, Anger, Sad, Disgust, Fear and Neutral was recorded. Additionally, we recorded a self-reported score for Anxiety. The videos used to induce emotions were validated in a recently published study in Psychology. The data collected was used to create models that identify the dominant emotional state and the emotional spectrum (activation levels of all emotional states) for an individual. We create a map between the physiological data and the dominant emotional state and also between physiological data and the self-report scores. In addition, we identify often overlooked characteristics of human emotion such as variability in perception and overlap of emotional states and finally create a topological map of emotional states based on physiological data.", "fno": "730300b241", "keywords": [ "Cardiology", "Emotion Recognition", "Human Computer Interaction", "Physiology", "Psychology", "Compound Affective States", "Physiological Sensor Data", "Human Affective State", "Continuous Data Streams", "Data Collection Apparatus", "Individuals Affective State", "Physiological Data", "Dominant Emotional State", "Heart Rate", "Videos", "Physiology", "Feature Extraction", "Emotion Recognition", "Skin", "Blood", "Psychology", "Affective Computing Emotion Modelling Wearable Sensors Feature Extraction Affective States Human Centered Computing Situation Awareness" ], "authors": [ { "affiliation": "Direct Supply", "fullName": "Piyush Saxena", "givenName": "Piyush", "surname": "Saxena", "__typename": "ArticleAuthorType" }, { "affiliation": "Marquette Univeristy", "fullName": "Sarthak Dabas", "givenName": "Sarthak", "surname": "Dabas", "__typename": "ArticleAuthorType" }, { "affiliation": "Marquette University", "fullName": "Devansh Saxena", "givenName": "Devansh", "surname": "Saxena", "__typename": "ArticleAuthorType" }, { "affiliation": "Direct Supply", "fullName": "Nithin Ramachandran", "givenName": "Nithin", "surname": "Ramachandran", "__typename": "ArticleAuthorType" }, { "affiliation": "Marquette University", "fullName": "Sheikh Iqbal Ahamed", "givenName": "Sheikh Iqbal", "surname": "Ahamed", "__typename": "ArticleAuthorType" } ], "idPrefix": "compsac", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-07-01T00:00:00", "pubType": "proceedings", "pages": "1241-1249", "year": "2020", "issn": "0730-3157", "isbn": "978-1-7281-7303-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "730300b235", "articleId": "1nkDpUuTwiY", "__typename": "AdjacentArticleType" }, "next": { "fno": "730300b250", "articleId": "1nkDdILBijm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2017/0563/0/08273584", "title": "Multiple users' emotion recognition: Improving performance by joint modeling of affective reactions", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273584/12OmNs0TKVi", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2015/0481/0/07394401", "title": "Using physiological signal analysis to design affective VR games", "doi": null, "abstractUrl": "/proceedings-article/isspit/2015/07394401/12OmNySosHh", "parentPublication": { "id": "proceedings/isspit/2015/0481/0", "title": "2015 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349384", "title": "Game adaptivity impact on affective physical interaction", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349384/12OmNyuya9n", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nesea/2012/4721/0/06474007", "title": "An open affective platform", "doi": null, "abstractUrl": "/proceedings-article/nesea/2012/06474007/12OmNzmLxB9", "parentPublication": { "id": "proceedings/nesea/2012/4721/0", "title": "Networked Embedded Systems for Enterprise Applications, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/03/07010926", "title": "DECAF: MEG-Based Multimodal Database for Decoding Affective Physiological Responses", "doi": null, "abstractUrl": "/journal/ta/2015/03/07010926/13rRUwdrdR8", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/02/07736040", "title": "ASCERTAIN: Emotion and Personality Recognition Using Commercial Sensors", "doi": null, "abstractUrl": "/journal/ta/2018/02/07736040/13rRUyY2938", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/04/08326711", "title": "Physiological Detection of Affective States in Children with Autism Spectrum Disorder", "doi": null, "abstractUrl": "/journal/ta/2020/04/08326711/13rRUyYBlfe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873958", "title": "Characterizing Physiological Responses to Fear, Frustration, and Insight in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873958/1GjwGGW9cSA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2019/3891/0/08925190", "title": "Detection of Real-World Driving-Induced Affective State Using Physiological Signals and Multi-View Multi-Task Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2019/08925190/1fHFbu2cLsY", "parentPublication": { "id": "proceedings/aciiw/2019/3891/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09470976", "title": "Multimodal Affective States Recognition Based on Multiscale CNNs and Biologically Inspired Decision Fusion Model", "doi": null, "abstractUrl": "/journal/ta/5555/01/09470976/1uSOwMJI2TS", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tROFXZKX3q", "title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "acronym": "percom-workshops", "groupId": "1000552", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tRORzZ0Mzm", "doi": "10.1109/PerComWorkshops51409.2021.9430936", "title": "Investigation of Relation between Physiological Responses and Personality during Stress Recovery", "normalizedTitle": "Investigation of Relation between Physiological Responses and Personality during Stress Recovery", "abstract": "Stress is seen as an individual&#x0027;s reaction to external circumstances that are perceived as a threat. Reactions to stress are highly subjective in nature, depending upon numerous individualistic factors. The study of stress recovery and associated coping efforts can help mitigate adverse health effects. Therefore, understanding the interplay of psychological and physiological manifestations of stress in modeling the stress recovery patterns is of high importance. Previous studies have indicated an association between personality traits and physiological responses. However, definitive evidence for this association is lacking. This work attempts to investigate the correlation between personality traits, such as neuroticism and extraversion, and physiological responses such as electrocardiogram and salivary cortisol responses, to the Trier Social Stress Test. Gaussian mixture modeling technique is employed to automatically cluster individuals based on their personality traits and electrocardiogram responses. Simultaneously, individuals are classified based on changes in salivary cortisol levels. Resulting clusters are labelled based on the literature on stress recovery. The relationships between personality and physiology groups are investigated. Reduced stress recovery observed via salivary cortisol responses is associated with higher neuroticism and lower extraversion, as well as attenuated electrocardiogram recovery responses. Higher cortisol reactivity during stress is found to be positively associated with higher cortisol recovery. Therefore, the study implies that consideration of personality traits is likely to aid stress detection and recovery models.", "abstracts": [ { "abstractType": "Regular", "content": "Stress is seen as an individual&#x0027;s reaction to external circumstances that are perceived as a threat. Reactions to stress are highly subjective in nature, depending upon numerous individualistic factors. The study of stress recovery and associated coping efforts can help mitigate adverse health effects. Therefore, understanding the interplay of psychological and physiological manifestations of stress in modeling the stress recovery patterns is of high importance. Previous studies have indicated an association between personality traits and physiological responses. However, definitive evidence for this association is lacking. This work attempts to investigate the correlation between personality traits, such as neuroticism and extraversion, and physiological responses such as electrocardiogram and salivary cortisol responses, to the Trier Social Stress Test. Gaussian mixture modeling technique is employed to automatically cluster individuals based on their personality traits and electrocardiogram responses. Simultaneously, individuals are classified based on changes in salivary cortisol levels. Resulting clusters are labelled based on the literature on stress recovery. The relationships between personality and physiology groups are investigated. Reduced stress recovery observed via salivary cortisol responses is associated with higher neuroticism and lower extraversion, as well as attenuated electrocardiogram recovery responses. Higher cortisol reactivity during stress is found to be positively associated with higher cortisol recovery. Therefore, the study implies that consideration of personality traits is likely to aid stress detection and recovery models.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Stress is seen as an individual's reaction to external circumstances that are perceived as a threat. Reactions to stress are highly subjective in nature, depending upon numerous individualistic factors. The study of stress recovery and associated coping efforts can help mitigate adverse health effects. Therefore, understanding the interplay of psychological and physiological manifestations of stress in modeling the stress recovery patterns is of high importance. Previous studies have indicated an association between personality traits and physiological responses. However, definitive evidence for this association is lacking. This work attempts to investigate the correlation between personality traits, such as neuroticism and extraversion, and physiological responses such as electrocardiogram and salivary cortisol responses, to the Trier Social Stress Test. Gaussian mixture modeling technique is employed to automatically cluster individuals based on their personality traits and electrocardiogram responses. Simultaneously, individuals are classified based on changes in salivary cortisol levels. Resulting clusters are labelled based on the literature on stress recovery. The relationships between personality and physiology groups are investigated. Reduced stress recovery observed via salivary cortisol responses is associated with higher neuroticism and lower extraversion, as well as attenuated electrocardiogram recovery responses. Higher cortisol reactivity during stress is found to be positively associated with higher cortisol recovery. Therefore, the study implies that consideration of personality traits is likely to aid stress detection and recovery models.", "fno": "09430936", "keywords": [ "Biochemistry", "Biomedical Measurement", "Cognition", "Electrocardiography", "Gaussian Processes", "Human Factors", "Medical Signal Processing", "Neurophysiology", "Physiological Responses", "Psychological Manifestations", "Physiological Manifestations", "Salivary Cortisol Responses", "Trier Social Stress Test", "Automatically Cluster Individuals", "Electrocardiogram Responses", "Reduced Stress Recovery", "Stress Detection", "Cortisol Recovery", "Attenuated Electrocardiogram Recovery", "Pervasive Computing", "Correlation", "Conferences", "Computational Modeling", "Psychology", "Physiology", "Stress", "Physiological Stress Recovery", "Heart Rate Variability", "Salivary Cortisol", "Personality", "Neuroticism", "Extraversion", "Clustering", "Gaussian Mixture Model", "Trier Social Stress Test" ], "authors": [ { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Bitan Saha", "givenName": "Bitan", "surname": "Saha", "__typename": "ArticleAuthorType" }, { "affiliation": "Friedrich-Alexander-Universität Erlangen-Nürnberg,Department of Psychology,Germany", "fullName": "Linda Becker", "givenName": "Linda", "surname": "Becker", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Jens-Uwe Garbas", "givenName": "Jens-Uwe", "surname": "Garbas", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Maximilian Oppelt", "givenName": "Maximilian", "surname": "Oppelt", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Andreas Foltyn", "givenName": "Andreas", "surname": "Foltyn", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Sebastian Hettenkofer", "givenName": "Sebastian", "surname": "Hettenkofer", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Nadine Lang", "givenName": "Nadine", "surname": "Lang", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Matthias Struck", "givenName": "Matthias", "surname": "Struck", "__typename": "ArticleAuthorType" }, { "affiliation": "Friedrich-Alexander-Universität Erlangen-Nürnberg,Department of Psychology,Germany", "fullName": "Nicolas Rohleder", "givenName": "Nicolas", "surname": "Rohleder", "__typename": "ArticleAuthorType" }, { "affiliation": "Fraunhofer Institute for Integrated Circuits IIS,Smart Sensing and Electronics,Erlangen,Germany", "fullName": "Bhargavi Mahesh", "givenName": "Bhargavi", "surname": "Mahesh", "__typename": "ArticleAuthorType" } ], "idPrefix": "percom-workshops", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "57-62", "year": "2021", "issn": null, "isbn": "978-1-6654-0424-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09431082", "articleId": "1tROZOU87ja", "__typename": "AdjacentArticleType" }, "next": { "fno": "09430985", "articleId": "1tROKMRFkd2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2013/5048/0/5048a429", "title": "Neuroticism, Extraversion and Stress: Physiological Correlates", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a429/12OmNC4wtJx", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbeb/2012/4706/0/4706a929", "title": "The Relationship between Mental Stress Induced Changes in Cortisol Levels and Vascular Responses Quantified by Waveform Analysis: Investigating Stress-Dependent Indices of Vascular Changes", "doi": null, "abstractUrl": "/proceedings-article/icbeb/2012/4706a929/12OmNrIrPwG", "parentPublication": { "id": "proceedings/icbeb/2012/4706/0", "title": "Biomedical Engineering and Biotechnology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a523", "title": "Stress Detection for PTSD via the StartleMart Game", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a523/12OmNvjyxuk", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273624", "title": "Toward automatic detection of acute stress: Relevant nonverbal behaviors and impact of personality traits", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273624/12OmNxvwoLP", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/02/06821267", "title": "Neuroticism, Extraversion, Conscientiousness and Stress: Physiological Correlates", "doi": null, "abstractUrl": "/journal/ta/2015/02/06821267/13rRUwjoNvd", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/02/07736040", "title": "ASCERTAIN: Emotion and Personality Recognition Using Commercial Sensors", "doi": null, "abstractUrl": "/journal/ta/2018/02/07736040/13rRUyY2938", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0/751800a046", "title": "Correlation Analyses Between Personality Traits and Personal Behaviors Under Specific Emotion States Using Physiological Data from Wearable Devices", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2018/751800a046/17D45Xq6dzr", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0", "title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2019/9151/0/08730779", "title": "Analysis of Personality Dependent Differences in Pupillary Response and its Relation to Stress Recovery Ability", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2019/08730779/1aDSKxbLAwo", "parentPublication": { "id": "proceedings/percom-workshops/2019/9151/0", "title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2020/9574/0/957400a509", "title": "Stress Level Detection Using Physiological Sensors", "doi": null, "abstractUrl": "/proceedings-article/bibe/2020/957400a509/1pBMuOcDoIM", "parentPublication": { "id": "proceedings/bibe/2020/9574/0", "title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09417627", "title": "Incorporating Forthcoming Events and Personality Traits in Social Media Based Stress Prediction", "doi": null, "abstractUrl": "/journal/ta/2023/01/09417627/1taAGhGKixG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxdDFCL", "title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on", "acronym": "iros", "groupId": "1000393", "volume": "3", "displayVolume": "3", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNBDQblW", "doi": "10.1109/IROS.1995.525876", "title": "A constraint-based god-object method for haptic display", "normalizedTitle": "A constraint-based god-object method for haptic display", "abstract": "Haptic display is the process of applying forces to a human \"observer\" giving the sensation of touching and interacting with real physical objects. Touch is unique among the senses because it allows simultaneous exploration and manipulation of an environment. A haptic display system has three main components. The first is the haptic interface, or display device, generally some type of electro-mechanical system able to exert controllable forces on the user with one or more degrees of freedom. The second is the object model-a mathematical representation of the object containing its shape and other properties related to the way it feels. The third component, the haptic rendering algorithm, joins the first two components to compute, in real time, the model-based forces to give the user the sensation of touching the simulated objects. This paper focuses on a new haptic rendering algorithm for generating convincing interaction forces for objects modeled as rigid polyhedra. We create a virtual model of the haptic interface, called the god-object, which conforms to the virtual environment. The haptic interface can then be servo-ed to this virtual model. This algorithm is extensible to other functional descriptions and lays the groundwork for displaying not only shape information, but surface properties such as friction and compliance.", "abstracts": [ { "abstractType": "Regular", "content": "Haptic display is the process of applying forces to a human \"observer\" giving the sensation of touching and interacting with real physical objects. Touch is unique among the senses because it allows simultaneous exploration and manipulation of an environment. A haptic display system has three main components. The first is the haptic interface, or display device, generally some type of electro-mechanical system able to exert controllable forces on the user with one or more degrees of freedom. The second is the object model-a mathematical representation of the object containing its shape and other properties related to the way it feels. The third component, the haptic rendering algorithm, joins the first two components to compute, in real time, the model-based forces to give the user the sensation of touching the simulated objects. This paper focuses on a new haptic rendering algorithm for generating convincing interaction forces for objects modeled as rigid polyhedra. We create a virtual model of the haptic interface, called the god-object, which conforms to the virtual environment. The haptic interface can then be servo-ed to this virtual model. This algorithm is extensible to other functional descriptions and lays the groundwork for displaying not only shape information, but surface properties such as friction and compliance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Haptic display is the process of applying forces to a human \"observer\" giving the sensation of touching and interacting with real physical objects. Touch is unique among the senses because it allows simultaneous exploration and manipulation of an environment. A haptic display system has three main components. The first is the haptic interface, or display device, generally some type of electro-mechanical system able to exert controllable forces on the user with one or more degrees of freedom. The second is the object model-a mathematical representation of the object containing its shape and other properties related to the way it feels. The third component, the haptic rendering algorithm, joins the first two components to compute, in real time, the model-based forces to give the user the sensation of touching the simulated objects. This paper focuses on a new haptic rendering algorithm for generating convincing interaction forces for objects modeled as rigid polyhedra. We create a virtual model of the haptic interface, called the god-object, which conforms to the virtual environment. The haptic interface can then be servo-ed to this virtual model. This algorithm is extensible to other functional descriptions and lays the groundwork for displaying not only shape information, but surface properties such as friction and compliance.", "fno": "71083146", "keywords": [ "Virtual Reality Interactive Devices Constraint Based God Object Method Haptic Display Haptic Interface Electro Mechanical System Controllable Forces Haptic Rendering Algorithm Rigid Polyhedra" ], "authors": [ { "affiliation": "Dept. of Mech. Eng., MIT, Cambridge, MA, USA", "fullName": "C.B. Zilles", "givenName": "C.B.", "surname": "Zilles", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Mech. Eng., MIT, Cambridge, MA, USA", "fullName": "J.K. Salisbury", "givenName": "J.K.", "surname": "Salisbury", "__typename": "ArticleAuthorType" } ], "idPrefix": "iros", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-08-01T00:00:00", "pubType": "proceedings", "pages": "3146", "year": "1995", "issn": null, "isbn": "0-8186-7108-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "71083140", "articleId": "12OmNBB0bXW", "__typename": "AdjacentArticleType" }, "next": { "fno": "71083152", "articleId": "12OmNy6HQW4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNCbU39s", "doi": "10.1109/VR.2015.7223425", "title": "MagicPot360: Free viewpoint shape display modifying the perception of shape", "normalizedTitle": "MagicPot360: Free viewpoint shape display modifying the perception of shape", "abstract": "In this paper we developed the free-viewpoint curved surface shape display, using the effect of visuo-haptic interaction. In our research, we aim to realize the simple mechanic visuo-haptic system with which we can touch various objects with our real hands. We proposed the free-viewpoint shape display system in which users can touch a virtual object with various shape through the tablet device, while walking around the object. The system figured the difference of shape between the real object and the virtual one from the current viewpoint, and calculates the amount of displacement of the touching hand image to fit to the virtual object, in real time. This modification of the movement of a hand image evokes the effect of the visuo-haptic interaction, and enable users to feel touching various shapes from various viewpoints, although actually users touch a physically static object.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we developed the free-viewpoint curved surface shape display, using the effect of visuo-haptic interaction. In our research, we aim to realize the simple mechanic visuo-haptic system with which we can touch various objects with our real hands. We proposed the free-viewpoint shape display system in which users can touch a virtual object with various shape through the tablet device, while walking around the object. The system figured the difference of shape between the real object and the virtual one from the current viewpoint, and calculates the amount of displacement of the touching hand image to fit to the virtual object, in real time. This modification of the movement of a hand image evokes the effect of the visuo-haptic interaction, and enable users to feel touching various shapes from various viewpoints, although actually users touch a physically static object.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we developed the free-viewpoint curved surface shape display, using the effect of visuo-haptic interaction. In our research, we aim to realize the simple mechanic visuo-haptic system with which we can touch various objects with our real hands. We proposed the free-viewpoint shape display system in which users can touch a virtual object with various shape through the tablet device, while walking around the object. The system figured the difference of shape between the real object and the virtual one from the current viewpoint, and calculates the amount of displacement of the touching hand image to fit to the virtual object, in real time. This modification of the movement of a hand image evokes the effect of the visuo-haptic interaction, and enable users to feel touching various shapes from various viewpoints, although actually users touch a physically static object.", "fno": "07223425", "keywords": [ "Shape", "Haptic Interfaces", "Distortion", "Visualization", "Image Analysis", "Tablet Computers", "Real Time Systems", "H 5 2 User Interface Haptic I O" ], "authors": [ { "affiliation": "Graduate School of Information Science and Technology, The University of Tokyo", "fullName": "Yuki Ban", "givenName": "Yuki", "surname": "Ban", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate School of Information Science and Technology, The University of Tokyo", "fullName": "Takuji Narumi", "givenName": "Takuji", "surname": "Narumi", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate School of Information Science and Technology, The University of Tokyo", "fullName": "Tomohiro Tanikawa", "givenName": "Tomohiro", "surname": "Tanikawa", "__typename": "ArticleAuthorType" }, { "affiliation": "Graduate School of Information Science and Technology, The University of Tokyo", "fullName": "Michitaka Hirose", "givenName": "Michitaka", "surname": "Hirose", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "321-322", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223424", "articleId": "12OmNyU63uL", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223426", "articleId": "12OmNqBtiCC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iros/1995/7108/3/71083146", "title": "A constraint-based god-object method for haptic display", "doi": null, "abstractUrl": "/proceedings-article/iros/1995/71083146/12OmNBDQblW", "parentPublication": { "id": "proceedings/iros/1995/7108/3", "title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019519", "title": "ForceTab: Visuo-haptic interaction with a force-sensitive actuated tablet", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019519/12OmNvA1hjt", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780233", "title": "Visuo-Haptic Display Using Head-Mounted Projector", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/27380409", "title": "A fingertip haptic display for improving local perception of shape cues", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/27380409/12OmNwcl7KO", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a160", "title": "[POSTER] Further Experiments and Considerations on Weight Perception Caused by Visual Diminishing of Real Objects", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a160/12OmNx76TWW", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a190", "title": "[POSTER] Manipulating Haptic Shape Perception by Visual Surface Deformation and Finger Displacement in Spatial Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a190/12OmNznkK1w", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/01/ttg2013010159", "title": "Visuo-Haptic Mixed Reality with Unobstructed Tool-Hand Integration", "doi": null, "abstractUrl": "/journal/tg/2013/01/ttg2013010159/13rRUyeTVi1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/5555/01/09999325", "title": "Presenting Morphing Shape Illusion: Enhanced Sense of Morphing Virtual Object with Weight Shifting VR Controller by Computational Perception Model", "doi": null, "abstractUrl": "/magazine/cg/5555/01/09999325/1JqD9O3nz68", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a422", "title": "An Object Synthesis Method to Enhance Visuo-Haptic Consistency", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a422/1JrRgd8SQ6c", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09117062", "title": "Augmenting Perceived Softness of Haptic Proxy Objects Through Transient Vibration and Visuo-Haptic Illusion in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2021/12/09117062/1kGg69DDrFe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNynsbCr", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "acronym": "haptic", "groupId": "1000312", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNrGKev3", "doi": "10.1109/HAPTIC.2006.1627102", "title": "Portable Haptic Display for Large Immersive Virtual Environments", "normalizedTitle": "Portable Haptic Display for Large Immersive Virtual Environments", "abstract": "This paper introduces Portable Haptic Display (PHD), a novel platform-independent haptic rendering system that can be conveniently integrated into a large immersive virtual environment while maximizing the individual reusability of haptic interfaces and visual displays. The PHD has an architecture of distributed rendering where two computers (or clusters of computers) are in charge of all computations needed for rendering with corresponding displays and share necessary information via network. We report the architecture, implementation details, and performance evaluation results of the PHD in this paper. The PHD is especially useful in places that have multiple haptic interfaces and large immersive visual displays and that need to use them together as well as individually, such as research laboratories, companies, and hospitals.", "abstracts": [ { "abstractType": "Regular", "content": "This paper introduces Portable Haptic Display (PHD), a novel platform-independent haptic rendering system that can be conveniently integrated into a large immersive virtual environment while maximizing the individual reusability of haptic interfaces and visual displays. The PHD has an architecture of distributed rendering where two computers (or clusters of computers) are in charge of all computations needed for rendering with corresponding displays and share necessary information via network. We report the architecture, implementation details, and performance evaluation results of the PHD in this paper. The PHD is especially useful in places that have multiple haptic interfaces and large immersive visual displays and that need to use them together as well as individually, such as research laboratories, companies, and hospitals.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper introduces Portable Haptic Display (PHD), a novel platform-independent haptic rendering system that can be conveniently integrated into a large immersive virtual environment while maximizing the individual reusability of haptic interfaces and visual displays. The PHD has an architecture of distributed rendering where two computers (or clusters of computers) are in charge of all computations needed for rendering with corresponding displays and share necessary information via network. We report the architecture, implementation details, and performance evaluation results of the PHD in this paper. The PHD is especially useful in places that have multiple haptic interfaces and large immersive visual displays and that need to use them together as well as individually, such as research laboratories, companies, and hospitals.", "fno": "01627102", "keywords": [ "Haptics", "Large Immersive Virtual Environment", "Portable Haptic Display", "Distributed Rendering", "Haptic Interfaces", "Computer Displays", "Rendering Computer Graphics", "Virtual Environment", "Distributed Computing", "Virtual Reality", "Computer Architecture", "Computer Networks", "Imaging Phantoms", "Computer Science", "Haptics", "Large Immersive Virtual Environment", "Portable Haptic Display", "Distributed Rendering" ], "authors": [ { "affiliation": "Envision Center for Data Perceptualization Purdue University, USA, e-mail: edorjgo@purdue.edu", "fullName": "E. Dorjgotov", "givenName": "E.", "surname": "Dorjgotov", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Seungmoon Choi", "givenName": null, "surname": "Seungmoon Choi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "S.R. Dunlop", "givenName": "S.R.", "surname": "Dunlop", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "G.R. Bertoline", "givenName": "G.R.", "surname": "Bertoline", "__typename": "ArticleAuthorType" } ], "idPrefix": "haptic", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-01-01T00:00:00", "pubType": "proceedings", "pages": "321-327", "year": "2006", "issn": "2324-7347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01627101", "articleId": "12OmNAle6r2", "__typename": "AdjacentArticleType" }, "next": { "fno": "01627103", "articleId": "12OmNyOq4Ul", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/haptics/2006/0226/0/02260048", "title": "Portable Haptic Display for Large Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/haptics/2006/02260048/12OmNA0dMUo", "parentPublication": { "id": "proceedings/haptics/2006/0226/0", "title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1993/1363/0/00380793", "title": "Presence in immersive virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vrais/1993/00380793/12OmNAPSMme", "parentPublication": { "id": "proceedings/vrais/1993/1363/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2007/3005/0/04390938", "title": "MHaptic : a Haptic Manipulation Library for Generic Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/cw/2007/04390938/12OmNASraZe", "parentPublication": { "id": "proceedings/cw/2007/3005/0", "title": "2007 International Conference on Cyberworlds (CW'07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a157", "title": "Stable Dynamic Algorithm Based on Virtual Coupling for 6-DOF Haptic Rendering", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a157/12OmNqJ8tk6", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2004/2140/0/21400361", "title": "Immersive Multi-Projector Display on Hybrid Screens with Human-Scale Haptic and Locomotion Interfaces", "doi": null, "abstractUrl": "/proceedings-article/cw/2004/21400361/12OmNxGALbe", "parentPublication": { "id": "proceedings/cw/2004/2140/0", "title": "2004 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04810990", "title": "Spatialized Haptic Rendering: Providing Impact Position Information in 6DOF Haptic Simulations Using Vibrations", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04810990/12OmNxw5BnV", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145225", "title": "Fast Rendering for a Multifinger Haptic Display", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145225/12OmNzyp5Vq", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446128", "title": "Rendering of Pressure and Textures Using Wearable Haptics in Immersive VR Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446128/13bd1eSlyt0", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/04/tth2011040321", "title": "Collocation Accuracy of Visuo-Haptic System: Metrics and Calibration", "doi": null, "abstractUrl": "/journal/th/2011/04/tth2011040321/13rRUxASuhM", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/02/tth2011020088", "title": "Exploration of Tactile Contact in a Haptic Display: Effects of Contact Velocity and Transient Vibrations", "doi": null, "abstractUrl": "/journal/th/2011/02/tth2011020088/13rRUxE04tK", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNvA1hFe", "doi": "10.1109/VR.2017.7892308", "title": "A haptic three-dimensional shape display with three fingers grasping", "normalizedTitle": "A haptic three-dimensional shape display with three fingers grasping", "abstract": "The main goal of our research is to develop a haptic display that makes it possible to convey shapes, hardness, and textures of objects displayed on 3D TV. Our evolved device has three 5 mm diameter actuating spheres arranged in triangular geometry on each of three fingertips (thumb, index finger, middle finger). In this paper, we describe an overview of a novel haptic device and the first experimental results that twelve subjects had succeeded to recognize the size of cylinders and side geometry of a cuboid and a hexagonal prism.", "abstracts": [ { "abstractType": "Regular", "content": "The main goal of our research is to develop a haptic display that makes it possible to convey shapes, hardness, and textures of objects displayed on 3D TV. Our evolved device has three 5 mm diameter actuating spheres arranged in triangular geometry on each of three fingertips (thumb, index finger, middle finger). In this paper, we describe an overview of a novel haptic device and the first experimental results that twelve subjects had succeeded to recognize the size of cylinders and side geometry of a cuboid and a hexagonal prism.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The main goal of our research is to develop a haptic display that makes it possible to convey shapes, hardness, and textures of objects displayed on 3D TV. Our evolved device has three 5 mm diameter actuating spheres arranged in triangular geometry on each of three fingertips (thumb, index finger, middle finger). In this paper, we describe an overview of a novel haptic device and the first experimental results that twelve subjects had succeeded to recognize the size of cylinders and side geometry of a cuboid and a hexagonal prism.", "fno": "07892308", "keywords": [ "Shape", "Three Dimensional Displays", "Haptic Interfaces", "Thumb", "Grippers", "Virtual Reality", "Haptic Display", "3 D Shape", "Universal Design" ], "authors": [ { "affiliation": "NHK Science and Technology Research Laboratories, Japan", "fullName": "Takuya Handa", "givenName": "Takuya", "surname": "Handa", "__typename": "ArticleAuthorType" }, { "affiliation": "NHK Science and Technology Research Laboratories, Japan", "fullName": "Kenji Murase", "givenName": "Kenji", "surname": "Murase", "__typename": "ArticleAuthorType" }, { "affiliation": "NHK Science and Technology Research Laboratories, Japan", "fullName": "Makiko Azuma", "givenName": "Makiko", "surname": "Azuma", "__typename": "ArticleAuthorType" }, { "affiliation": "NHK Science and Technology Research Laboratories, Japan", "fullName": "Toshihiro Shimizu", "givenName": "Toshihiro", "surname": "Shimizu", "__typename": "ArticleAuthorType" }, { "affiliation": "NHK Science and Technology Research Laboratories, Japan", "fullName": "Satoru Kondo", "givenName": "Satoru", "surname": "Kondo", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo, Japan", "fullName": "Hiroyuki Shinoda", "givenName": "Hiroyuki", "surname": "Shinoda", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "325-326", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892307", "articleId": "12OmNClQ0yz", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892309", "articleId": "12OmNqFrGve", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/whc/2007/2738/0/04145197", "title": "Role of vision on haptic length perception", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145197/12OmNClQ0yd", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145249", "title": "Piezoelectric Ultrasonic Actuator for a Haptic Display for Catheterisation", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145249/12OmNwCaCpe", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a223", "title": "Development of a Haptic Device with Tactile and Proprioceptive Feedback for Spatial Design Tasks", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a223/12OmNzCF4RA", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446403", "title": "Effect of Electrical Stimulation Haptic Feedback on Perceptions of Softness-Hardness and Stickiness While Touching a Virtual Object", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446403/13bd1eSlytA", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/04/07892978", "title": "Evaluation of Wearable Haptic Systems for the Fingers in Augmented Reality Applications", "doi": null, "abstractUrl": "/journal/th/2017/04/07892978/13rRUwInv4D", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010106", "title": "Using Postural Synergies to Animate a Low-Dimensional Hand Avatar in Haptic Simulation", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010106/13rRUwInv4z", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/03/tth2013030330", "title": "Mental Rotation of Tactile Stimuli: Using Directional Haptic Cues in Mobile Devices", "doi": null, "abstractUrl": "/journal/th/2013/03/tth2013030330/13rRUwbs210", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/02/07398091", "title": "Multiple Fingers – One Gestalt", "doi": null, "abstractUrl": "/journal/th/2016/02/07398091/13rRUxD9h5k", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/03/07444168", "title": "Comparison of Walking and Traveling-Wave Piezoelectric Motors as Actuators in Kinesthetic Haptic Devices", "doi": null, "abstractUrl": "/journal/th/2016/03/07444168/13rRUxDqS8t", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873981", "title": "TapeTouch: A Handheld Shape-changing Device for Haptic Display of Soft Objects", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873981/1GjwN2tz3Gg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyPQ4vC", "title": "Virtual Reality Conference, IEEE", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2000", "__typename": "ProceedingType" }, "article": { "id": "12OmNwHz00K", "doi": "10.1109/VR.2000.840503", "title": "Visuo-Haptic Display Using Head-Mounted Projector", "normalizedTitle": "Visuo-Haptic Display Using Head-Mounted Projector", "abstract": "This paper proposes a novel visuo-haptic display using a head-mounted projector (HMP) with X'tal Vision optics. Our goal is to develop a device which enables an observer to touch a virtual object just as it is seen.We describe in detail the design of an HMP with X'tal Vision which is very suitable for augmented reality. For instance, the HMP makes the occlusion relationship between the virtual and the real environments nearly correct. Accordingly, the user can observe his/her real hand with the virtual objects. Furthermore, the HMP reduces eye fatigue because of the low inconsistency of accommodation and convergence.Therefore, we applied HMP-model 2 to a visuo-haptic display using a camouflage technique. This technique, called optical camouflage, makes an obstacle object such as a haptic display become translucent.With this method, a user can observe a stereoscopic virtual object with a nearly correct occlusion relationship between the virtual and the real environments and can actually feel the object.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a novel visuo-haptic display using a head-mounted projector (HMP) with X'tal Vision optics. Our goal is to develop a device which enables an observer to touch a virtual object just as it is seen.We describe in detail the design of an HMP with X'tal Vision which is very suitable for augmented reality. For instance, the HMP makes the occlusion relationship between the virtual and the real environments nearly correct. Accordingly, the user can observe his/her real hand with the virtual objects. Furthermore, the HMP reduces eye fatigue because of the low inconsistency of accommodation and convergence.Therefore, we applied HMP-model 2 to a visuo-haptic display using a camouflage technique. This technique, called optical camouflage, makes an obstacle object such as a haptic display become translucent.With this method, a user can observe a stereoscopic virtual object with a nearly correct occlusion relationship between the virtual and the real environments and can actually feel the object.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a novel visuo-haptic display using a head-mounted projector (HMP) with X'tal Vision optics. Our goal is to develop a device which enables an observer to touch a virtual object just as it is seen.We describe in detail the design of an HMP with X'tal Vision which is very suitable for augmented reality. For instance, the HMP makes the occlusion relationship between the virtual and the real environments nearly correct. Accordingly, the user can observe his/her real hand with the virtual objects. Furthermore, the HMP reduces eye fatigue because of the low inconsistency of accommodation and convergence.Therefore, we applied HMP-model 2 to a visuo-haptic display using a camouflage technique. This technique, called optical camouflage, makes an obstacle object such as a haptic display become translucent.With this method, a user can observe a stereoscopic virtual object with a nearly correct occlusion relationship between the virtual and the real environments and can actually feel the object.", "fno": "04780233", "keywords": [ "Visuo Haptic Display", "Head Mounted Projector", "Augmented Reality" ], "authors": [ { "affiliation": "University of Tokyo", "fullName": "Masahiko Inami", "givenName": "Masahiko", "surname": "Inami", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tokyo", "fullName": "Naoki Kawakami", "givenName": "Naoki", "surname": "Kawakami", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tokyo", "fullName": "Dairoku Sekiguchi", "givenName": "Dairoku", "surname": "Sekiguchi", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tokyo", "fullName": "Yasuyuki Yanagida", "givenName": "Yasuyuki", "surname": "Yanagida", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tokyo", "fullName": "Taro Maeda", "givenName": "Taro", "surname": "Maeda", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tokyo", "fullName": "Susumu Tachi", "givenName": "Susumu", "surname": "Tachi", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2000-03-01T00:00:00", "pubType": "proceedings", "pages": "233", "year": "2000", "issn": "1087-8270", "isbn": "0-7695-0478-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04780225", "articleId": "12OmNvqmUKH", "__typename": "AdjacentArticleType" }, "next": { "fno": "04780241", "articleId": "12OmNzmclE3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0cM", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNzCF4RA", "doi": "10.1109/ISMAR-Adjunct.2017.74", "title": "Development of a Haptic Device with Tactile and Proprioceptive Feedback for Spatial Design Tasks", "normalizedTitle": "Development of a Haptic Device with Tactile and Proprioceptive Feedback for Spatial Design Tasks", "abstract": "We present a novel, low-cost haptic feedback device for spatial design tasks that provides proprioceptive and tactile feedback. It uses the Manus VR datagloves and a custom VR CAD environment. Here, tactile feedback is provided to the index finger through a vibrating motor, which helps users in identifying points on a grid. This grid allows for alignment during the creation and manipulation of geometric shapes. Models can be adjusted by pinching at a vertex of the shape with index finger and thumb, and moving this to a different point on the grid. Here, proprioceptive feedback is provided by a solenoid locking mechanism. The system was evaluated through preliminary user testing. Results indicate that the device leads to more natural and intuitive interactions for both the point grid and vertex adjustment, but that the ergonomics needs to be improved. Future challenges involve further integration of the physical device and datagloves and refined, multi-finger feedback.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel, low-cost haptic feedback device for spatial design tasks that provides proprioceptive and tactile feedback. It uses the Manus VR datagloves and a custom VR CAD environment. Here, tactile feedback is provided to the index finger through a vibrating motor, which helps users in identifying points on a grid. This grid allows for alignment during the creation and manipulation of geometric shapes. Models can be adjusted by pinching at a vertex of the shape with index finger and thumb, and moving this to a different point on the grid. Here, proprioceptive feedback is provided by a solenoid locking mechanism. The system was evaluated through preliminary user testing. Results indicate that the device leads to more natural and intuitive interactions for both the point grid and vertex adjustment, but that the ergonomics needs to be improved. Future challenges involve further integration of the physical device and datagloves and refined, multi-finger feedback.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel, low-cost haptic feedback device for spatial design tasks that provides proprioceptive and tactile feedback. It uses the Manus VR datagloves and a custom VR CAD environment. Here, tactile feedback is provided to the index finger through a vibrating motor, which helps users in identifying points on a grid. This grid allows for alignment during the creation and manipulation of geometric shapes. Models can be adjusted by pinching at a vertex of the shape with index finger and thumb, and moving this to a different point on the grid. Here, proprioceptive feedback is provided by a solenoid locking mechanism. The system was evaluated through preliminary user testing. Results indicate that the device leads to more natural and intuitive interactions for both the point grid and vertex adjustment, but that the ergonomics needs to be improved. Future challenges involve further integration of the physical device and datagloves and refined, multi-finger feedback.", "fno": "6327a223", "keywords": [ "CAD", "Force Feedback", "Haptic Interfaces", "Human Computer Interaction", "Virtual Reality", "Spatial Design Tasks", "Tactile Feedback", "Manus VR Datagloves", "Proprioceptive Feedback", "Multifinger Feedback", "Haptic Feedback Device", "VR CAD Environment", "Geometric Shape Manipulation", "Solenoid Locking Mechanism", "Ergonomics", "Thumb", "Shape", "Indexes", "Haptic Interfaces", "Solenoids", "Three Dimensional Displays", "Haptic Feedback", "Virtual Reality", "Dataglove", "Computer Aided Design", "Immersive Design" ], "authors": [ { "affiliation": "Fac. of Ind. Design, Tech. Univ. Delft, Delft, Netherlands", "fullName": "Tim Bakker", "givenName": "Tim", "surname": "Bakker", "__typename": "ArticleAuthorType" }, { "affiliation": "Fac. of Ind. Design, Tech. Univ. Delft, Delft, Netherlands", "fullName": "Jouke Verlinden", "givenName": "Jouke", "surname": "Verlinden", "__typename": "ArticleAuthorType" }, { "affiliation": "Fac. of Mech. Eng., Tech. Univ. Delft, Delft, Netherlands", "fullName": "David Abbink", "givenName": "David", "surname": "Abbink", "__typename": "ArticleAuthorType" }, { "affiliation": "Manus VR, Eindhoven, Netherlands", "fullName": "Roel van Deventer", "givenName": "Roel", "surname": "van Deventer", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "223-228", "year": "2017", "issn": null, "isbn": "978-0-7695-6327-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "6327a220", "articleId": "12OmNBghtty", "__typename": "AdjacentArticleType" }, "next": { "fno": "6327a229", "articleId": "12OmNyQphg0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223432", "title": "Various forms of tactile feedback displayed on the back of the tablet: Latency minimized by using audio signal to control actuators", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223432/12OmNB9bvqr", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2004/8415/0/84150027", "title": "Increasing the Effective Egocentric Field of View with Proprioceptive and Tactile Feedback", "doi": null, "abstractUrl": "/proceedings-article/vr/2004/84150027/12OmNCd2rt3", "parentPublication": { "id": "proceedings/vr/2004/8415/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/27380342", "title": "Tilt Perception by Constant Tactile and Constant Proprioceptive Feedback through a Human System Interface", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/27380342/12OmNro0IcJ", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444650", "title": "A finger attachment to generate tactile feedback and make 3D gesture detectable by touch panel sensor", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444650/12OmNxveNHK", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444646", "title": "Simplified design of haptic display by extending one-point kinesthetic feedback to multipoint tactile feedback", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444646/12OmNyvGylZ", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/01/07234937", "title": "Rich Pinch: Perception of Object Movement with Tactile Illusion", "doi": null, "abstractUrl": "/journal/th/2016/01/07234937/13rRUEgarnR", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/04/tth2011040253", "title": "Improved Tactile Shear Feedback: Tactor Design and an Aperture-Based Restraint", "doi": null, "abstractUrl": "/journal/th/2011/04/tth2011040253/13rRUwfZC0p", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/04/07124519", "title": "Displaying Sensed Tactile Cues with a Fingertip Haptic Device", "doi": null, "abstractUrl": "/journal/th/2015/04/07124519/13rRUxC0SWk", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/01/07270331", "title": "Electrotactile Augmentation for Carving Guidance", "doi": null, "abstractUrl": "/journal/th/2016/01/07270331/13rRUy3gn7H", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1pystLSz19C", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1pysvf0EzfO", "doi": "10.1109/ISMAR50242.2020.00047", "title": "Haptic Handshank &#x2013; A Handheld Multimodal Haptic Feedback Controller for Virtual Reality", "normalizedTitle": "Haptic Handshank – A Handheld Multimodal Haptic Feedback Controller for Virtual Reality", "abstract": "Compared to wearable devices, handheld haptic devices are promising for large scale virtual reality applications because of their portability and capability of supporting large workspace haptic interaction. However, it remains a challenge to render multimodal haptic stimuli in handheld devices due to space confinement. In this paper, we present a modular approach to build a Multimodal Handheld Haptic Controller called &#x201C;Haptic Handshank&#x201D; that includes a thumb feedback component, a palm feedback component, and a motion tracking component. In the thumb feedback component, a compact pneumatically-driven silicone airbag is utilized to simulate softness, and a flexible membrane based on the electro-vibration principle which covers the top portion of the airbag for rendering virtual textures. In the palm feedback component, vibrational motors and Peltier devices are embedded into the device's body for rendering vibrotactile flow and distributing thermal stimuli. In the motion tracking component, an HTC-Vive tracker is mounted on the bottom of the controller's handle to enable 6-DOF palm motion tracking. The performance of the handheld device is evaluated through quantitative experimental studies, which validate the ability of the device to simulate multimodal haptic sensations in accordance with diverse hand manipulation gestures such as enclosure, static contact, rubbing, squeezing and shaking of a cup of cold drink in 3D virtual space.", "abstracts": [ { "abstractType": "Regular", "content": "Compared to wearable devices, handheld haptic devices are promising for large scale virtual reality applications because of their portability and capability of supporting large workspace haptic interaction. However, it remains a challenge to render multimodal haptic stimuli in handheld devices due to space confinement. In this paper, we present a modular approach to build a Multimodal Handheld Haptic Controller called &#x201C;Haptic Handshank&#x201D; that includes a thumb feedback component, a palm feedback component, and a motion tracking component. In the thumb feedback component, a compact pneumatically-driven silicone airbag is utilized to simulate softness, and a flexible membrane based on the electro-vibration principle which covers the top portion of the airbag for rendering virtual textures. In the palm feedback component, vibrational motors and Peltier devices are embedded into the device's body for rendering vibrotactile flow and distributing thermal stimuli. In the motion tracking component, an HTC-Vive tracker is mounted on the bottom of the controller's handle to enable 6-DOF palm motion tracking. The performance of the handheld device is evaluated through quantitative experimental studies, which validate the ability of the device to simulate multimodal haptic sensations in accordance with diverse hand manipulation gestures such as enclosure, static contact, rubbing, squeezing and shaking of a cup of cold drink in 3D virtual space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Compared to wearable devices, handheld haptic devices are promising for large scale virtual reality applications because of their portability and capability of supporting large workspace haptic interaction. However, it remains a challenge to render multimodal haptic stimuli in handheld devices due to space confinement. In this paper, we present a modular approach to build a Multimodal Handheld Haptic Controller called “Haptic Handshank” that includes a thumb feedback component, a palm feedback component, and a motion tracking component. In the thumb feedback component, a compact pneumatically-driven silicone airbag is utilized to simulate softness, and a flexible membrane based on the electro-vibration principle which covers the top portion of the airbag for rendering virtual textures. In the palm feedback component, vibrational motors and Peltier devices are embedded into the device's body for rendering vibrotactile flow and distributing thermal stimuli. In the motion tracking component, an HTC-Vive tracker is mounted on the bottom of the controller's handle to enable 6-DOF palm motion tracking. The performance of the handheld device is evaluated through quantitative experimental studies, which validate the ability of the device to simulate multimodal haptic sensations in accordance with diverse hand manipulation gestures such as enclosure, static contact, rubbing, squeezing and shaking of a cup of cold drink in 3D virtual space.", "fno": "850800a239", "keywords": [ "Haptic Interfaces", "Image Motion Analysis", "Rendering Computer Graphics", "Vibrations", "Virtual Reality", "HTC Vive Tracker", "Haptic Handshank", "Handheld Multimodal Haptic Feedback Controller", "6 DOF Palm Motion Tracking", "3 D Virtual Space", "Multimodal Haptic Sensations", "Distributing Thermal Stimuli", "Rendering Vibrotactile Flow", "Peltier Devices", "Virtual Textures", "Electro Vibration Principle", "Motion Tracking Component", "Palm Feedback Component", "Thumb Feedback Component", "Multimodal Haptic Stimuli", "Workspace Haptic Interaction", "Wearable Devices", "Tracking", "Handheld Computers", "Wearable Computers", "Thumb", "Aerospace Electronics", "Rendering Computer Graphics", "Haptic Interfaces", "Multimodal", "Handheld Device", "Haptic Feedback", "Controller", "Virtual Reality" ], "authors": [ { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China,100191", "fullName": "K M Arafat Aziz", "givenName": "K M Arafat", "surname": "Aziz", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China,100191", "fullName": "Hu Luo", "givenName": "Hu", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China,100191", "fullName": "Lehiany Asma", "givenName": "Lehiany", "surname": "Asma", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Auckland,Department of Mechanical Engineering,Auckland,New Zealand,1142", "fullName": "Weiliang Xu", "givenName": "Weiliang", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China,100191", "fullName": "Yuru Zhang", "givenName": "Yuru", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beihang University,State Key Laboratory of Virtual Reality Technology and Systems,Beijing,China,100191", "fullName": "Dangxiao Wang", "givenName": "Dangxiao", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "239-250", "year": "2020", "issn": "1554-7868", "isbn": "978-1-7281-8508-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "850800a227", "articleId": "1pyswn4avD2", "__typename": "AdjacentArticleType" }, "next": { "fno": "850800a251", "articleId": "1pysu61NEBy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2013/5048/0/5048a786", "title": "Reach Out and Touch Somebody's Virtual Hand: Affectively Connected through Mediated Touch", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a786/12OmNAq3hLn", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/case/2012/0430/0/06386463", "title": "Design of a new miniature haptic button based on magneto-rheological fluids", "doi": null, "abstractUrl": "/proceedings-article/case/2012/06386463/12OmNrYlmLy", "parentPublication": { "id": "proceedings/case/2012/0430/0", "title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890311", "title": "Handheld Haptics: A USB Media Controller with Force Sensing", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890311/12OmNwE9OBi", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/criwg/2000/0828/0/08280052", "title": "Supporting Handheld Collaboration through COMAL", "doi": null, "abstractUrl": "/proceedings-article/criwg/2000/08280052/12OmNyQ7G8s", "parentPublication": { "id": "proceedings/criwg/2000/0828/0", "title": "Groupware, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671793", "title": "Passive Deformable Haptic glove to support 3D interactions in mobile augmented reality environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671793/12OmNz5JBPu", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/03/tth2013030330", "title": "Mental Rotation of Tactile Stimuli: Using Directional Haptic Cues in Mobile Devices", "doi": null, "abstractUrl": "/journal/th/2013/03/tth2013030330/13rRUwbs210", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873981", "title": "TapeTouch: A Handheld Shape-changing Device for Haptic Display of Soft Objects", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873981/1GjwN2tz3Gg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10041940", "title": "PalmEx: Adding Palmar Force-Feedback for 3D Manipulation with Haptic Exoskeleton Gloves", "doi": null, "abstractUrl": "/journal/tg/5555/01/10041940/1KEtpYenAVW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049705", "title": "Dynamic Redirection for VR Haptics with a Handheld Stick", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049705/1KYovqncdKo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212865", "title": "Identification of Vibrotactile Flow Patterns on a Handheld Haptic Device", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212865/1nHRQWVTfMc", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxdm4Is", "title": "Eighth International Symposium on Wearable Computers", "acronym": "iswc", "groupId": "1000810", "volume": "0", "displayVolume": "1", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNsd6vrl", "doi": "10.1109/ISWC.2004.1", "title": "A Comparative Investigation into Two Pointing Systems for Use with Wearable Computers While Mobile", "normalizedTitle": "A Comparative Investigation into Two Pointing Systems for Use with Wearable Computers While Mobile", "abstract": "Target selection is a task carried out by many wearable computer users. Conventional desktop pointing devices such as mice are not appropriate for the wearable user as they are designed for use within the conventional desktop paradigm. Although many pointing systems have been devised for use with wearable computers, little empirical research has been carried out. This research investigates two different target selection systems: a touch screen stylus and an off-table mouse. This research takes a novel approach and evaluates users while moving and stationary. Twenty participants wore a wearable computer and selected targets while stationary and while mobile, input times and the participants' task load were recorded.", "abstracts": [ { "abstractType": "Regular", "content": "Target selection is a task carried out by many wearable computer users. Conventional desktop pointing devices such as mice are not appropriate for the wearable user as they are designed for use within the conventional desktop paradigm. Although many pointing systems have been devised for use with wearable computers, little empirical research has been carried out. This research investigates two different target selection systems: a touch screen stylus and an off-table mouse. This research takes a novel approach and evaluates users while moving and stationary. Twenty participants wore a wearable computer and selected targets while stationary and while mobile, input times and the participants' task load were recorded.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Target selection is a task carried out by many wearable computer users. Conventional desktop pointing devices such as mice are not appropriate for the wearable user as they are designed for use within the conventional desktop paradigm. Although many pointing systems have been devised for use with wearable computers, little empirical research has been carried out. This research investigates two different target selection systems: a touch screen stylus and an off-table mouse. This research takes a novel approach and evaluates users while moving and stationary. Twenty participants wore a wearable computer and selected targets while stationary and while mobile, input times and the participants' task load were recorded.", "fno": "21860110", "keywords": [ "Wearable Computers", "Human Computer Interaction", "Input Devices", "Evaluation", "Interfaces" ], "authors": [ { "affiliation": "Loughborough University, UK", "fullName": "Alan Chamberlain", "givenName": "Alan", "surname": "Chamberlain", "__typename": "ArticleAuthorType" }, { "affiliation": "Loughborough University, UK", "fullName": "Roy Kalawsky", "givenName": "Roy", "surname": "Kalawsky", "__typename": "ArticleAuthorType" } ], "idPrefix": "iswc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-10-01T00:00:00", "pubType": "proceedings", "pages": "110-117", "year": "2004", "issn": "1530-0811", "isbn": "0-7695-2186-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "21860102", "articleId": "12OmNqBbHPe", "__typename": "AdjacentArticleType" }, "next": { "fno": "21860120", "articleId": "12OmNBOlleF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948461", "title": "[Poster] Social panoramas using wearable computers", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948461/12OmNB0nWbG", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2009/3779/0/3779a063", "title": "A Comparison of Menu Configurations and Pointing Devices for Use with Wearable Computers while Mobile and Stationary", "doi": null, "abstractUrl": "/proceedings-article/iswc/2009/3779a063/12OmNB7LvG6", "parentPublication": { "id": "proceedings/iswc/2009/3779/0", "title": "2009 International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2006/0597/0/04067722", "title": "Evaluation of Four Wearable Computer Pointing Devices for Drag and Drop Tasks when Stationary and Walking", "doi": null, "abstractUrl": "/proceedings-article/iswc/2006/04067722/12OmNCxtyNw", "parentPublication": { "id": "proceedings/iswc/2006/0597/0", "title": "2006 10th IEEE International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kse/2011/4567/0/4567a211", "title": "Gazing and Frowning to Computers Can Be Enjoyable", "doi": null, "abstractUrl": "/proceedings-article/kse/2011/4567a211/12OmNsdo6qK", "parentPublication": { "id": "proceedings/kse/2011/4567/0", "title": "Knowledge and Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hics/1998/8341/0/83410118", "title": "Hands-Free Input Devices for Wearable Computers", "doi": null, "abstractUrl": "/proceedings-article/hics/1998/83410118/12OmNxuFBpG", "parentPublication": { "id": "proceedings/hics/1998/8341/0", "title": "Human Interaction with Complex Systems, Annual Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/1997/8192/0/81920002", "title": "Evaluation of Three Input Mechanisms for Wearable Computers", "doi": null, "abstractUrl": "/proceedings-article/iswc/1997/81920002/12OmNy3RRFe", "parentPublication": { "id": "proceedings/iswc/1997/8192/0", "title": "Digest of Papers. First International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/1999/0428/0/04280151", "title": "\"Where Are You Pointing At?\" A Study of Remote Collaboration in a Wearable Videoconference System", "doi": null, "abstractUrl": "/proceedings-article/iswc/1999/04280151/12OmNzdoN7p", "parentPublication": { "id": "proceedings/iswc/1999/0428/0", "title": "Digest of Papers. Third International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2003/01/b1071", "title": "Wearable Communities: Augmenting Social Networks with Wearable Computers", "doi": null, "abstractUrl": "/magazine/pc/2003/01/b1071/13rRUx0xQ5f", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2002/04/01158283", "title": "Attention, memory, and wearable interfaces", "doi": null, "abstractUrl": "/magazine/pc/2002/04/01158283/13rRUxBa5kR", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864589", "title": "The Golden Bullet: A Comparative Study for Target Acquisition, Pointing and Shooting", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864589/1e5ZtZ7mbZK", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBC8AAD", "title": "2010 IEEE Virtual Reality Conference (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNyRPgrg", "doi": "10.1109/VR.2010.5444753", "title": "Evaluating haptic feedback in virtual environments using ISO 9241 -- 9", "normalizedTitle": "Evaluating haptic feedback in virtual environments using ISO 9241 -- 9", "abstract": "The ISO 9241 Part 9 standard pointing task is used to evaluate passive haptic feedback in target selection in a virtual environment (VE). Participants performed a tapping task using a tracked stylus in a CAVE both with, and without passive haptic feedback provided by a plastic panel co-located with the targets. Pointing throughput (but not speed nor accuracy alone) was significantly higher with haptic feedback than without it, confirming previous results using an alternative experimental paradigm.", "abstracts": [ { "abstractType": "Regular", "content": "The ISO 9241 Part 9 standard pointing task is used to evaluate passive haptic feedback in target selection in a virtual environment (VE). Participants performed a tapping task using a tracked stylus in a CAVE both with, and without passive haptic feedback provided by a plastic panel co-located with the targets. Pointing throughput (but not speed nor accuracy alone) was significantly higher with haptic feedback than without it, confirming previous results using an alternative experimental paradigm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The ISO 9241 Part 9 standard pointing task is used to evaluate passive haptic feedback in target selection in a virtual environment (VE). Participants performed a tapping task using a tracked stylus in a CAVE both with, and without passive haptic feedback provided by a plastic panel co-located with the targets. Pointing throughput (but not speed nor accuracy alone) was significantly higher with haptic feedback than without it, confirming previous results using an alternative experimental paradigm.", "fno": "05444753", "keywords": [ "Tapping Task", "Virtual Environment", "Tracked Stylus", "CAVE", "Pointing Throughput", "ISO 9241 9", "Passive Haptic Feedback", "Target Selection" ], "authors": [ { "affiliation": null, "fullName": "Robert J Teather", "givenName": "Robert J", "surname": "Teather", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Daniel Natapov", "givenName": "Daniel", "surname": "Natapov", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Michael Jenkin", "givenName": "Michael", "surname": "Jenkin", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "307-308", "year": "2010", "issn": null, "isbn": "978-1-4244-6237-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444755", "articleId": "12OmNyen1xM", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444750", "articleId": "12OmNx5GU0r", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cca/2000/6562/0/00897525", "title": "Torque feedback control of dry friction clutches for a dissipative passive haptic interface", "doi": null, "abstractUrl": "/proceedings-article/cca/2000/00897525/12OmNBbaH5N", "parentPublication": { "id": "proceedings/cca/2000/6562/0", "title": "Proceedings of the 2000 IEEE International Conference on Control Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2004/2112/0/21120208", "title": "Can Haptic Feedback Improve the Perception of Self-Motion in Virtual Reality?", "doi": null, "abstractUrl": "/proceedings-article/haptics/2004/21120208/12OmNBzRNsV", "parentPublication": { "id": "proceedings/haptics/2004/2112/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759467", "title": "Effects of sensory feedback while interacting with graphical menus in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759467/12OmNvoFjQv", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780083", "title": "Pseudo-Haptic Feedback: Can Isometric Input Devices Simulate Force Feedback?", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780083/12OmNzVoBut", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/03/07457685", "title": "Pseudo-Haptic Feedback in Teleoperation", "doi": null, "abstractUrl": "/journal/th/2016/03/07457685/13rRUyYjK5o", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a538", "title": "CardsVR: A Two-Person VR Experience with Passive Haptic Feedback from a Deck of Playing Cards", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a538/1JrRaySJ7So", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2019/4050/0/08809589", "title": "Passive Haptic Menus for Desk-Based and HMD-Projected Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/wevr/2019/08809589/1cI61Rx4b9m", "parentPublication": { "id": "proceedings/wevr/2019/4050/0", "title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797918", "title": "Virtual Reality Training with Passive Haptic Feedback for CryoEM Sample Preparation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797918/1cJ14ZjqmCQ", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a042", "title": "Smart Haproxy: A Novel Vibrotactile Feedback Prototype Combining Passive and Active Haptic in AR Interaction", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a042/1gysov56h20", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1UrKVyhy", "doi": "10.1109/ISMAR-Adjunct.2018.00061", "title": "Mid-Air Fingertip-Based User Interaction in Mixed Reality", "normalizedTitle": "Mid-Air Fingertip-Based User Interaction in Mixed Reality", "abstract": "With data growing at a huge rate, there arises a need for advanced data visualization techniques. Visualizing these data sets in Mixed Reality(MR) mode provides an immersive experience to the user in the context of the real world applications. Most of the existing works can only be used with inordinately priced devices such as Microsoft HoloLens, Meta Glass that use proprietary hardware for data visualization and user interaction through hand gestures. In this paper, we demonstrate a cost-effective solution for data visualization using frugal devices such as Google Cardboard, VR Box etc. in MR mode. However, these devices still employ only primitive modes of interaction such as the magnetic trigger, conductive lever and have a limited user-input capability. To interact with visualizations and facilitate rich user experience, we propose the use of intuitive pointing fingertip gestural interface in the user's Field of View(FoV). The proposed pointing hand gesture recognition framework is driven by cascade of state-of-the-art deep learning model - Faster RCNN for localizing the hand followed by a proposed regression CNN for fingertip localization. We conducted both objective and subjective evaluation to demonstrate the performance of our proposed method. Objective metrics are fingertip recognition accuracy and computational time. The subjective evaluation includes user comfort and effectiveness of fingertip interaction that is proposed.", "abstracts": [ { "abstractType": "Regular", "content": "With data growing at a huge rate, there arises a need for advanced data visualization techniques. Visualizing these data sets in Mixed Reality(MR) mode provides an immersive experience to the user in the context of the real world applications. Most of the existing works can only be used with inordinately priced devices such as Microsoft HoloLens, Meta Glass that use proprietary hardware for data visualization and user interaction through hand gestures. In this paper, we demonstrate a cost-effective solution for data visualization using frugal devices such as Google Cardboard, VR Box etc. in MR mode. However, these devices still employ only primitive modes of interaction such as the magnetic trigger, conductive lever and have a limited user-input capability. To interact with visualizations and facilitate rich user experience, we propose the use of intuitive pointing fingertip gestural interface in the user's Field of View(FoV). The proposed pointing hand gesture recognition framework is driven by cascade of state-of-the-art deep learning model - Faster RCNN for localizing the hand followed by a proposed regression CNN for fingertip localization. We conducted both objective and subjective evaluation to demonstrate the performance of our proposed method. Objective metrics are fingertip recognition accuracy and computational time. The subjective evaluation includes user comfort and effectiveness of fingertip interaction that is proposed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With data growing at a huge rate, there arises a need for advanced data visualization techniques. Visualizing these data sets in Mixed Reality(MR) mode provides an immersive experience to the user in the context of the real world applications. Most of the existing works can only be used with inordinately priced devices such as Microsoft HoloLens, Meta Glass that use proprietary hardware for data visualization and user interaction through hand gestures. In this paper, we demonstrate a cost-effective solution for data visualization using frugal devices such as Google Cardboard, VR Box etc. in MR mode. However, these devices still employ only primitive modes of interaction such as the magnetic trigger, conductive lever and have a limited user-input capability. To interact with visualizations and facilitate rich user experience, we propose the use of intuitive pointing fingertip gestural interface in the user's Field of View(FoV). The proposed pointing hand gesture recognition framework is driven by cascade of state-of-the-art deep learning model - Faster RCNN for localizing the hand followed by a proposed regression CNN for fingertip localization. We conducted both objective and subjective evaluation to demonstrate the performance of our proposed method. Objective metrics are fingertip recognition accuracy and computational time. The subjective evaluation includes user comfort and effectiveness of fingertip interaction that is proposed.", "fno": "08699224", "keywords": [ "Augmented Reality", "Convolutional Neural Nets", "Data Visualisation", "Gesture Recognition", "Human Computer Interaction", "Learning Artificial Intelligence", "Recurrent Neural Nets", "Regression Analysis", "Frugal Devices", "MR Mode", "User Input Capability", "Intuitive Pointing Fingertip Gestural Interface", "Pointing Hand Gesture Recognition Framework", "Fingertip Localization", "User Comfort", "Fingertip Interaction", "Mid Air Fingertip Based User Interaction", "Data Sets", "Immersive Experience", "Hand Gestures", "User Experience", "Deep Learning Model", "Data Visualization Techniques", "Mixed Reality Mode", "Faster RCNN", "Field Of View", "Regression CNN", "Data Visualization", "Google", "Gesture Recognition", "Cameras", "Computer Architecture", "Virtual Reality", "User Experience", "Fingertip Gestures", "Deep Learning", "Mixed Reality", "Google Cardboard", "Diectic Interaction", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems X 2014 Artificial Augmented And Virtual Realities", "H 5 2 Information Interfaces And Presentation User Interfaces X 2014 Input Devices And Strategies", "I 2 10 Artificial Intelligence Vision And Scene Understanding X 2014 Video Analysis", "I 4 8 Image Processing And Computer Vision Scene Analysis X 2014 Time Varying Imagery" ], "authors": [ { "affiliation": "IIIT-Delhi, TCS Research-Delhi", "fullName": "Meghal Dani", "givenName": "Meghal", "surname": "Dani", "__typename": "ArticleAuthorType" }, { "affiliation": "IIIT-Delhi, TCS Research-Delhi", "fullName": "Gaurav Garg", "givenName": "Gaurav", "surname": "Garg", "__typename": "ArticleAuthorType" }, { "affiliation": "IIIT-Delhi, TCS Research-Delhi", "fullName": "Ramakrishna Perla", "givenName": "Ramakrishna", "surname": "Perla", "__typename": "ArticleAuthorType" }, { "affiliation": "IIIT-Delhi, TCS Research-Delhi", "fullName": "Ramya Hebbalaguppe", "givenName": "Ramya", "surname": "Hebbalaguppe", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "174-178", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699333", "articleId": "19F1UPr0V1e", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699172", "articleId": "19F1MoBSw5G", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2013/6097/0/06550212", "title": "Poster: Markerless fingertip-based 3D interaction for handheld augmented reality in a small workspace", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550212/12OmNBsue2b", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2016/8942/0/8942a242", "title": "Categorizing Issues in Mid-air InfoVis Interaction", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a242/12OmNyKrH2A", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699185", "title": "A Fingertip Gestural User Interface Without Depth Data for Mixed Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699185/19F1SPiUHm0", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a178", "title": "NailRing: An Intelligent Ring for Recognizing Micro-gestures in Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a178/1JrQTEHcxXy", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a414", "title": "Evaluating the Object-Centered User Interface in Head-Worn Mixed Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a414/1JrRiVjEd44", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a637", "title": "Blending On-Body and Mid-Air Interaction in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a637/1JrRmvhGko0", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a149", "title": "Active Visualization of Visual Cues on Hand for Better User Interface Design Generalization in Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a149/1KmFaKoZWhy", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798128", "title": "Supporting Visual Annotation Cues in a Live 360 Panorama-based Mixed Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798128/1cJ1aXJnUyI", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a778", "title": "Evaluating Object Manipulation Interaction Techniques in Mixed Reality: Tangible User Interfaces and Gesture", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a778/1tuBngWRAC4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a075", "title": "Immersive Experience Prototyping: Using Mixed Reality to Integrate Real Devices in Virtual Simulated Contexts to Prototype Experiences with Mobile Apps", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a075/1yfxIU5uhR6", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJdE39vY9G", "doi": "10.1109/VRW55335.2022.00188", "title": "A Comparison of Input Devices for Precise Interaction Tasks in VR-based Surgical Planning and Training", "normalizedTitle": "A Comparison of Input Devices for Precise Interaction Tasks in VR-based Surgical Planning and Training", "abstract": "We present a comparison of input devices for common interaction tasks in medical VR training and planning based on two relevant applications. The chosen devices, VR controllers, VR Ink, data gloves, and a real medical instrument, differ in their degree of spe-cialization and their grip. The conducted user study shows that the controllers and VR Ink performed significantly better than the other devices regarding precision. Concerning questionnaire results, no device stands out but most participants preferred the VR Ink for both applications. These results can serve as a guide to identify an appropriate device for future medical VR applications.", "abstracts": [ { "abstractType": "Regular", "content": "We present a comparison of input devices for common interaction tasks in medical VR training and planning based on two relevant applications. The chosen devices, VR controllers, VR Ink, data gloves, and a real medical instrument, differ in their degree of spe-cialization and their grip. The conducted user study shows that the controllers and VR Ink performed significantly better than the other devices regarding precision. Concerning questionnaire results, no device stands out but most participants preferred the VR Ink for both applications. These results can serve as a guide to identify an appropriate device for future medical VR applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a comparison of input devices for common interaction tasks in medical VR training and planning based on two relevant applications. The chosen devices, VR controllers, VR Ink, data gloves, and a real medical instrument, differ in their degree of spe-cialization and their grip. The conducted user study shows that the controllers and VR Ink performed significantly better than the other devices regarding precision. Concerning questionnaire results, no device stands out but most participants preferred the VR Ink for both applications. These results can serve as a guide to identify an appropriate device for future medical VR applications.", "fno": "840200a674", "keywords": [ "Data Gloves", "Medical Computing", "Surgery", "Virtual Reality", "Future Medical VR Applications", "Appropriate Device", "Medical Instrument", "VR Ink", "VR Controllers", "Chosen Devices", "Common Interaction Tasks", "VR Based Surgical Planning", "Precise Interaction Tasks", "Input Devices", "Training", "Performance Evaluation", "Conferences", "Surgery", "Input Devices", "Ink", "Virtual Reality", "Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality", "Human Centered Computing Human Computer Interaction HCI Interaction Devices", "Human Centered Computing Human Computer Interaction HCI HCI Design And Evaluation Methods Usability Testing" ], "authors": [ { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Mareen Allgaier", "givenName": "Mareen", "surname": "Allgaier", "__typename": "ArticleAuthorType" }, { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Vuthea Chheang", "givenName": "Vuthea", "surname": "Chheang", "__typename": "ArticleAuthorType" }, { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Patrick Saalfeld", "givenName": "Patrick", "surname": "Saalfeld", "__typename": "ArticleAuthorType" }, { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Vikram Apilla", "givenName": "Vikram", "surname": "Apilla", "__typename": "ArticleAuthorType" }, { "affiliation": "Visceral and Transplant Surgery, University Medicine of the Johannes Gutenberg-University,Department of General,Mainz,Germany", "fullName": "Tobias Huber", "givenName": "Tobias", "surname": "Huber", "__typename": "ArticleAuthorType" }, { "affiliation": "Visceral and Transplant Surgery, University Medicine of the Johannes Gutenberg-University,Department of General,Mainz,Germany", "fullName": "Florentine Huettl", "givenName": "Florentine", "surname": "Huettl", "__typename": "ArticleAuthorType" }, { "affiliation": "University Hospital,Department of Neurosurgery,Magdeburg,Germany", "fullName": "Belal Neyazi", "givenName": "Belal", "surname": "Neyazi", "__typename": "ArticleAuthorType" }, { "affiliation": "University Hospital,Department of Neurosurgery,Magdeburg,Germany", "fullName": "I. Erol Sandalcioglu", "givenName": "I. Erol", "surname": "Sandalcioglu", "__typename": "ArticleAuthorType" }, { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Christian Hansen", "givenName": "Christian", "surname": "Hansen", "__typename": "ArticleAuthorType" }, { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Bernhard Preim", "givenName": "Bernhard", "surname": "Preim", "__typename": "ArticleAuthorType" }, { "affiliation": "Otto-von-Guericke University Magdeburg,Department of Simulation and Graphics,Germany", "fullName": "Sylvia Saalfeld", "givenName": "Sylvia", "surname": "Saalfeld", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "674-675", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "840200a672", "articleId": "1CJdlUeTTlC", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a676", "articleId": "1CJfr9wrq1i", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmu/2017/31/0/08330085", "title": "A Japanese input method using leap motion in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/icmu/2017/08330085/12OmNAXPyk6", "parentPublication": { "id": "proceedings/icmu/2017/31/0", "title": "2017 Tenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446469", "title": "Use of Virtual Reality to Teach Teamwork and Patient Safety in Surgical Education", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446469/13bd1eSlysL", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446053", "title": "High-Fidelity Interaction for Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446053/13bd1tl2omt", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/02/mcg2006020015", "title": "On 3D Input Devices", "doi": null, "abstractUrl": "/magazine/cg/2006/02/mcg2006020015/13rRUytF43D", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a968", "title": "Asymmetric interfaces with stylus and gesture for VR sketching", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a968/1CJdzTRQ9s4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a064", "title": "Smart Motion Trails for Animating in VR", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a064/1KmFbVCEHxm", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090491", "title": "Precision vs. Power Grip: A Comparison of Pen Grip Styles for Selection in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090491/1jIxqBC6XqU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a434", "title": "A-Visor and A-Camera: Arduino-based Cardboard Head-Mounted Controllers for VR Games", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a434/1tnWy6iYjMk", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a768", "title": "Collaborative VR for Liver Surgery Planning using Wearable Data Gloves: An Interactive Demonstration", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a768/1tnXmv1kxNe", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a214", "title": "\"Lend Me a Hand\" &#x2013; Extending the Reach of Seated VR Players in Unmodified Games Through Remote Co-Piloting", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a214/1tnXoU9ycMM", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1J7W6LmbCw0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "9973799", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1J7WnZV8Mo0", "doi": "10.1109/ISMAR-Adjunct57072.2022.00110", "title": "Multimodal lnteraction with Gaze and Controller Gesture", "normalizedTitle": "Multimodal lnteraction with Gaze and Controller Gesture", "abstract": "Dragging tasks are a common Human Computer Interaction (HCI) activity observed in VR. In conjunction with the growing availability of eye-tracking devices, the team proceeded to investigate the performance of eye tracking-based multimodal interactions for dragging tasks for VR applications. Thirty-one participants were recruited for the study which evaluated various combinations of input interactions involving eye tracking. The evaluation approach took into consideration assessment metrics defined within ISO 9241&#x2013;9 to design and develop an immersive 3D dragging task testbed to facilitate the experiment. An additional categorical task was implemented and administered in tandem with the dragging task to investigate concurrent task performance. Based on experiment data, the VR handheld controller-only condition performed tasks fastest, achieved the highest throughput, accuracy and precision, and was the most preferred modality. Certain eye tracking-based interactions achieved similar speed to the VR handheld controller-only condition and were preferred by participants, albeit having displayed imprecisions in the dragging task. Based on the study findings and the interaction implementation approach, we recommend VR handheld controller as a primary modality for the immersive 3D dragging task.", "abstracts": [ { "abstractType": "Regular", "content": "Dragging tasks are a common Human Computer Interaction (HCI) activity observed in VR. In conjunction with the growing availability of eye-tracking devices, the team proceeded to investigate the performance of eye tracking-based multimodal interactions for dragging tasks for VR applications. Thirty-one participants were recruited for the study which evaluated various combinations of input interactions involving eye tracking. The evaluation approach took into consideration assessment metrics defined within ISO 9241&#x2013;9 to design and develop an immersive 3D dragging task testbed to facilitate the experiment. An additional categorical task was implemented and administered in tandem with the dragging task to investigate concurrent task performance. Based on experiment data, the VR handheld controller-only condition performed tasks fastest, achieved the highest throughput, accuracy and precision, and was the most preferred modality. Certain eye tracking-based interactions achieved similar speed to the VR handheld controller-only condition and were preferred by participants, albeit having displayed imprecisions in the dragging task. Based on the study findings and the interaction implementation approach, we recommend VR handheld controller as a primary modality for the immersive 3D dragging task.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dragging tasks are a common Human Computer Interaction (HCI) activity observed in VR. In conjunction with the growing availability of eye-tracking devices, the team proceeded to investigate the performance of eye tracking-based multimodal interactions for dragging tasks for VR applications. Thirty-one participants were recruited for the study which evaluated various combinations of input interactions involving eye tracking. The evaluation approach took into consideration assessment metrics defined within ISO 9241–9 to design and develop an immersive 3D dragging task testbed to facilitate the experiment. An additional categorical task was implemented and administered in tandem with the dragging task to investigate concurrent task performance. Based on experiment data, the VR handheld controller-only condition performed tasks fastest, achieved the highest throughput, accuracy and precision, and was the most preferred modality. Certain eye tracking-based interactions achieved similar speed to the VR handheld controller-only condition and were preferred by participants, albeit having displayed imprecisions in the dragging task. Based on the study findings and the interaction implementation approach, we recommend VR handheld controller as a primary modality for the immersive 3D dragging task.", "fno": "536500a518", "keywords": [ "Gaze Tracking", "Gesture Recognition", "Human Computer Interaction", "Interactive Devices", "Mouse Controllers Computers", "User Interfaces", "Virtual Reality", "Additional Categorical Task", "Common Human Computer Interaction Activity", "Concurrent Task Performance", "Dragging Task", "Eye Tracking Based Interactions", "Eye Tracking Based Multimodal Interactions", "Eye Tracking Devices", "VR Handheld Controller Only", "Performance Evaluation", "Visualization", "Three Dimensional Displays", "ISO Standards", "Gaze Tracking", "Throughput", "Task Analysis", "Dragging", "ISO 9241 X 2013 9", "Eye Tracking", "Gaze Interaction" ], "authors": [ { "affiliation": "Nanyang Technological University", "fullName": "Wen Han Chia", "givenName": "Wen Han", "surname": "Chia", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University", "fullName": "Yiyu Cai", "givenName": "Yiyu", "surname": "Cai", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University", "fullName": "Andrew Ho", "givenName": "Andrew", "surname": "Ho", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "518-523", "year": "2022", "issn": null, "isbn": "978-1-6654-5365-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "536500a512", "articleId": "1J7Wx5q1ySI", "__typename": "AdjacentArticleType" }, "next": { "fno": "536500a524", "articleId": "1J7WaFB7xNC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446215", "title": "Gaze Guidance in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446215/13bd1gJ1v0y", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a310", "title": "My Eyes Hurt: Effects of Jitter in 3D Gaze Tracking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a310/1CJdbzCNHUc", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a082", "title": "Real-time Gaze Tracking with Head-eye Coordination for Head-mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a082/1JrQQ8dsLKM", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a787", "title": "VRDoc: Gaze-based Interactions for VR Reading Experience", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a787/1JrRgFp6G2s", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049755", "title": "Leveling the Playing Field: A Comparative Reevaluation of Unmodified Eye Tracking as an Input and Interaction Modality for VR", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049755/1KYoozDk3v2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797902", "title": "Gaze and Motion-aware Real-Time Dome Projection System", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797902/1cJ0TDtBm0w", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089578", "title": "Exploring Eye Gaze Visualization Techniques for Identifying Distracted Students in Educational VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089578/1jIxfimnIaY", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090461", "title": "Front Camera Eye Tracking For Mobile VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090461/1jIxzvZw4YU", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a707", "title": "[DC] Eye Fixation Forecasting in Task-Oriented Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a707/1tnWQmeJsZi", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1e5ZpIoqcVi", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "acronym": "vs-games", "groupId": "1002788", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1e5ZtZ7mbZK", "doi": "10.1109/VS-Games.2019.8864589", "title": "The Golden Bullet: A Comparative Study for Target Acquisition, Pointing and Shooting", "normalizedTitle": "The Golden Bullet: A Comparative Study for Target Acquisition, Pointing and Shooting", "abstract": "In this study, we evaluate an interaction sequence performed by six modalities consisting of desktop-based (DB) and virtual reality (VR) environments using different input devices. For the given study, we implemented a vertical prototype of a first person shooter (FPS) game scenario, focusing on the genre-defining point-and-shoot mechanic. We introduce measures to evaluate the success of the according interaction sequence (times for target acquisition, pointing, shooting, overall net time, and number of shots) and conduct experiments to record and compare the users' performances. We show that interacting using head-tracking for landscape-rotation is performing similarly to the input of a screen-centered mouse and also yielded shortest times in target acquisition and pointing. Although using head-tracking for target acquisition and pointing was most efficient, subjects rated the modality using head-tracking for target acquisition and a 3DOF Controller for pointing best. Eye-tracking (ET) yields promising results, but calibration issues need to be resolved to enhance reliability and overall user experience.", "abstracts": [ { "abstractType": "Regular", "content": "In this study, we evaluate an interaction sequence performed by six modalities consisting of desktop-based (DB) and virtual reality (VR) environments using different input devices. For the given study, we implemented a vertical prototype of a first person shooter (FPS) game scenario, focusing on the genre-defining point-and-shoot mechanic. We introduce measures to evaluate the success of the according interaction sequence (times for target acquisition, pointing, shooting, overall net time, and number of shots) and conduct experiments to record and compare the users' performances. We show that interacting using head-tracking for landscape-rotation is performing similarly to the input of a screen-centered mouse and also yielded shortest times in target acquisition and pointing. Although using head-tracking for target acquisition and pointing was most efficient, subjects rated the modality using head-tracking for target acquisition and a 3DOF Controller for pointing best. Eye-tracking (ET) yields promising results, but calibration issues need to be resolved to enhance reliability and overall user experience.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this study, we evaluate an interaction sequence performed by six modalities consisting of desktop-based (DB) and virtual reality (VR) environments using different input devices. For the given study, we implemented a vertical prototype of a first person shooter (FPS) game scenario, focusing on the genre-defining point-and-shoot mechanic. We introduce measures to evaluate the success of the according interaction sequence (times for target acquisition, pointing, shooting, overall net time, and number of shots) and conduct experiments to record and compare the users' performances. We show that interacting using head-tracking for landscape-rotation is performing similarly to the input of a screen-centered mouse and also yielded shortest times in target acquisition and pointing. Although using head-tracking for target acquisition and pointing was most efficient, subjects rated the modality using head-tracking for target acquisition and a 3DOF Controller for pointing best. Eye-tracking (ET) yields promising results, but calibration issues need to be resolved to enhance reliability and overall user experience.", "fno": "08864589", "keywords": [ "Computer Games", "Gaze Tracking", "Human Computer Interaction", "Interactive Devices", "Mouse Controllers Computers", "User Interfaces", "Virtual Reality", "Target Acquisition", "Target Pointing", "Input Devices", "Interaction Sequence", "Head Tracking", "Target Shooting", "Point And Shoot Mechanic", "First Person Shooter Game Scenario", "Virtual Reality Environment", "Desktop Based Environment", "Eye Tracking", "Screen Centered Mouse", "User Interfaces", "Mice", "Task Analysis", "Keyboards", "Input Devices", "Presses", "Performance Evaluation", "Human Computer Interaction", "Head Mounted Device", "Modality", "Ray Casting", "Eye Tracking", "Virtual Reality", "User Centered Design" ], "authors": [ { "affiliation": "Deggendorf Institute of Technology, University of Applied Sciences, Deggendorf, Germany", "fullName": "Katharina Anna Maria Heydn", "givenName": "Katharina Anna Maria", "surname": "Heydn", "__typename": "ArticleAuthorType" }, { "affiliation": "Deggendorf Institute of Technology, University of Applied Sciences, Deggendorf, Germany", "fullName": "Marc Philipp Dietrich", "givenName": "Marc Philipp", "surname": "Dietrich", "__typename": "ArticleAuthorType" }, { "affiliation": "Deggendorf Institute of Technology, University of Applied Sciences, Deggendorf, Germany", "fullName": "Marcus Barkowsky", "givenName": "Marcus", "surname": "Barkowsky", "__typename": "ArticleAuthorType" }, { "affiliation": "Deggendorf Institute of Technology, University of Applied Sciences, Deggendorf, Germany", "fullName": "Götz Winterfeldt", "givenName": "Götz", "surname": "Winterfeldt", "__typename": "ArticleAuthorType" }, { "affiliation": "Games Engineering, Julius-Maximilians University of Würzburg, University of Würzburg, Würzburg, Germany", "fullName": "Sebastian von Mammen", "givenName": "Sebastian", "surname": "von Mammen", "__typename": "ArticleAuthorType" }, { "affiliation": "Robotics and Telematics, University of Würzburg, Julius-Maximilians University of Würzburg, Würzburg, Germany", "fullName": "Andreas Nüchter", "givenName": "Andreas", "surname": "Nüchter", "__typename": "ArticleAuthorType" } ], "idPrefix": "vs-games", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-09-01T00:00:00", "pubType": "proceedings", "pages": "1-8", "year": "2019", "issn": null, "isbn": "978-1-7281-4540-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08864538", "articleId": "1e5Zt3fsrSg", "__typename": "AdjacentArticleType" }, "next": { "fno": "08864532", "articleId": "1e5ZqQhIACc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2009/3943/0/04811029", "title": "Effects of Latency and Spatial Jitter on 2D and 3D Pointing", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811029/12OmNBqv2g2", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gocict/2015/2314/0/2314a041", "title": "Predictive Pointing in Cascading Pull-Down Menus", "doi": null, "abstractUrl": "/proceedings-article/gocict/2015/2314a041/12OmNrEL2zZ", "parentPublication": { "id": "proceedings/gocict/2015/2314/0", "title": "2015 Annual Global Online Conference on Information and Computer Technology (GOCICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2004/2186/0/21860110", "title": "A Comparative Investigation into Two Pointing Systems for Use with Wearable Computers While Mobile", "doi": null, "abstractUrl": "/proceedings-article/iswc/2004/21860110/12OmNsd6vrl", "parentPublication": { "id": "proceedings/iswc/2004/2186/0", "title": "Eighth International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2015/9795/0/9795a849", "title": "Pointing Gesture Recognition Using Robot Head Control", "doi": null, "abstractUrl": "/proceedings-article/csci/2015/9795a849/12OmNz4SOqV", "parentPublication": { "id": "proceedings/csci/2015/9795/0", "title": "2015 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2017/2937/0/2937a268", "title": "A Gaze Tracking Based, Multi Modal Human Computer Interaction Concept for Efficient Input", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a268/12OmNzSyCjo", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813448", "title": "Real-time 3D pointing gesture recognition in mobile space", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813448/12OmNzahc3t", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2006/0225/0/02250087", "title": "Toward Disambiguating Multiple Selections for Frustum-Based Pointing", "doi": null, "abstractUrl": "/proceedings-article/3dui/2006/02250087/12OmNzayN6A", "parentPublication": { "id": "proceedings/3dui/2006/0225/0", "title": "3D User Interfaces (3DUI'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/01/ttg2011010003", "title": "JanusVF: Accurate Navigation Using SCAAT and Virtual Fiducials", "doi": null, "abstractUrl": "/journal/tg/2011/01/ttg2011010003/13rRUwbs2aY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfeict/2022/5476/0/547600a457", "title": "Pointing Accuracy Test Practice of Airborne Active Phased Array Radar", "doi": null, "abstractUrl": "/proceedings-article/icfeict/2022/547600a457/1IFK1vGfOMg", "parentPublication": { "id": "proceedings/icfeict/2022/5476/0", "title": "2022 2nd International Conference on Frontiers of Electronics, Information and Computation Technologies (ICFEICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icispc/2022/5480/0/548000a127", "title": "Research on Pointing Acquisition Tracking Technology of Shipborne Laser Communication", "doi": null, "abstractUrl": "/proceedings-article/icispc/2022/548000a127/1KExFruNNHq", "parentPublication": { "id": "proceedings/icispc/2022/5480/0", "title": "2022 6th International Conference on Imaging, Signal Processing and Communications (ICISPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnWFBgcYAo", "doi": "10.1109/VRW52623.2021.00042", "title": "Effects of Different Auditory Feedback Frequencies in Virtual Reality 3D Pointing Tasks", "normalizedTitle": "Effects of Different Auditory Feedback Frequencies in Virtual Reality 3D Pointing Tasks", "abstract": "Auditory error feedback is commonly used in 3D Virtual Reality (VR) pointing experiments to increase participants' awareness of their misses. However, few papers describe the parameters of the auditory feedback, such as the frequency. In this study, we asked 15 participants to perform an ISO 9241-411 pointing task in a distributed remote experiment. In our study, we used three forms of auditory feedback, i.e., C4 (262 Hz), C8 (4186 Hz) and none. According to the results, we observed a speed-accuracy trade-off for the C8 tones compared to C4 ones: subjects were slower, and their throughput performance decreased with the C8 while their error rate decreased. Still, for larger targets there was no speed-accuracy trade-off, and subjects were only slower with C8 tones. Overall, the frequency of the feedback had a significant impact on the user's performance. We thus suggest that practitioners, developers, and designers report the frequency they used in their VR applications.", "abstracts": [ { "abstractType": "Regular", "content": "Auditory error feedback is commonly used in 3D Virtual Reality (VR) pointing experiments to increase participants' awareness of their misses. However, few papers describe the parameters of the auditory feedback, such as the frequency. In this study, we asked 15 participants to perform an ISO 9241-411 pointing task in a distributed remote experiment. In our study, we used three forms of auditory feedback, i.e., C4 (262 Hz), C8 (4186 Hz) and none. According to the results, we observed a speed-accuracy trade-off for the C8 tones compared to C4 ones: subjects were slower, and their throughput performance decreased with the C8 while their error rate decreased. Still, for larger targets there was no speed-accuracy trade-off, and subjects were only slower with C8 tones. Overall, the frequency of the feedback had a significant impact on the user's performance. We thus suggest that practitioners, developers, and designers report the frequency they used in their VR applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Auditory error feedback is commonly used in 3D Virtual Reality (VR) pointing experiments to increase participants' awareness of their misses. However, few papers describe the parameters of the auditory feedback, such as the frequency. In this study, we asked 15 participants to perform an ISO 9241-411 pointing task in a distributed remote experiment. In our study, we used three forms of auditory feedback, i.e., C4 (262 Hz), C8 (4186 Hz) and none. According to the results, we observed a speed-accuracy trade-off for the C8 tones compared to C4 ones: subjects were slower, and their throughput performance decreased with the C8 while their error rate decreased. Still, for larger targets there was no speed-accuracy trade-off, and subjects were only slower with C8 tones. Overall, the frequency of the feedback had a significant impact on the user's performance. We thus suggest that practitioners, developers, and designers report the frequency they used in their VR applications.", "fno": "405700a189", "keywords": [ "Hearing", "Human Computer Interaction", "Virtual Reality", "Auditory Error Feedback", "Participants", "ISO 9241 411 Pointing Task", "Distributed Remote Experiment", "Throughput Performance", "Error Rate", "Speed Accuracy Trade Off", "Virtual Reality 3 D Pointing Tasks", "Auditory Feedback Frequencies", "Three Dimensional Displays", "Error Analysis", "Conferences", "ISO Standards", "Virtual Reality", "User Interfaces", "Throughput", "Human Centered Computing", "Human Computer Interaction HCI", "Virtual Reality", "Pointing" ], "authors": [ { "affiliation": "Simon Fraser University,BC,Canada", "fullName": "Anil Ufuk Batmaz", "givenName": "Anil Ufuk", "surname": "Batmaz", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,BC,Canada", "fullName": "Wolfgang Stuerzlinger", "givenName": "Wolfgang", "surname": "Stuerzlinger", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "189-194", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "405700a183", "articleId": "1tnX9YpX3Nu", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a195", "articleId": "1tnXJfM0Bzi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223341", "title": "Touching sounds: Perception of the curvature of auditory virtual surfaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223341/12OmNxuo0jm", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2015/01/mmu2015010024", "title": "The Effects of Ecological Auditory Feedback on Rhythmic Walking Interaction", "doi": null, "abstractUrl": "/magazine/mu/2015/01/mmu2015010024/13rRUIJcWtD", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/01/07801928", "title": "Haptics in Music: The Effects of Vibrotactile Stimulus in Low Frequency Auditory Difference Detection Tasks", "doi": null, "abstractUrl": "/journal/th/2017/01/07801928/13rRUxNEqQ8", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/04/07906610", "title": "Design and Implementation of a Tactile Stimulation Device to Increase Auditory Discrimination", "doi": null, "abstractUrl": "/journal/th/2017/04/07906610/13rRUxYrbUQ", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a051", "title": "Real-time Auditory Feedback System for Bow-tilt Correction while Aiming in Archery", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a051/1J6hEwB72mc", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a847", "title": "Auditory Feedback to Make Walking in Virtual Reality More Accessible", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a847/1JrR8Ihk9Tq", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089548", "title": "The Impact of Multi-sensory Stimuli on Confidence Levels for Perceptual-cognitive Tasks in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089548/1jIxbiGNwCQ", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a577", "title": "Evaluating Presence in VR with Self-Representing Auditory-Vibrotactile Input", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a577/1tnY3LmpKwg", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a085", "title": "The Effect of Pitch in Auditory Error Feedback for Fitts&#x0027; Tasks in Virtual Reality Training Systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a085/1tuB3I1bc9G", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a510", "title": "MusiKeys: Investigating Auditory-Physical Feedback Replacement Technique for Mid-air Typing", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a510/1yeQWHyOQes", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuB3I1bc9G", "doi": "10.1109/VR50410.2021.00029", "title": "The Effect of Pitch in Auditory Error Feedback for Fitts&#x0027; Tasks in Virtual Reality Training Systems", "normalizedTitle": "The Effect of Pitch in Auditory Error Feedback for Fitts' Tasks in Virtual Reality Training Systems", "abstract": "Fitts' law and the associated throughput measure characterize user pointing performance in virtual reality (VR) training systems and simulators well. Yet, pointing performance can be affected by the feedback users receive from a VR application. This work examines the effect of the pitch of auditory error feedback on user performance in a Fitts' task through a distributed experiment. In our first study, we used middle- and high-frequency sound feedback and demonstrated that high-pitch error feedback significantly decreases user performance in terms of time and throughput. In the second study, we used adaptive sound feedback, where we increased the frequency with the error rate, while asking subjects to execute the task &#x201C;as fast/as precise/as fast and precise as possible&#x201D;. Results showed that adaptive sound feedback decreases the error rate for &#x201C;as fast as possible&#x201D; task execution without affecting the time. The results can be used to enhance and design various VR systems.", "abstracts": [ { "abstractType": "Regular", "content": "Fitts' law and the associated throughput measure characterize user pointing performance in virtual reality (VR) training systems and simulators well. Yet, pointing performance can be affected by the feedback users receive from a VR application. This work examines the effect of the pitch of auditory error feedback on user performance in a Fitts' task through a distributed experiment. In our first study, we used middle- and high-frequency sound feedback and demonstrated that high-pitch error feedback significantly decreases user performance in terms of time and throughput. In the second study, we used adaptive sound feedback, where we increased the frequency with the error rate, while asking subjects to execute the task &#x201C;as fast/as precise/as fast and precise as possible&#x201D;. Results showed that adaptive sound feedback decreases the error rate for &#x201C;as fast as possible&#x201D; task execution without affecting the time. The results can be used to enhance and design various VR systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Fitts' law and the associated throughput measure characterize user pointing performance in virtual reality (VR) training systems and simulators well. Yet, pointing performance can be affected by the feedback users receive from a VR application. This work examines the effect of the pitch of auditory error feedback on user performance in a Fitts' task through a distributed experiment. In our first study, we used middle- and high-frequency sound feedback and demonstrated that high-pitch error feedback significantly decreases user performance in terms of time and throughput. In the second study, we used adaptive sound feedback, where we increased the frequency with the error rate, while asking subjects to execute the task “as fast/as precise/as fast and precise as possible”. Results showed that adaptive sound feedback decreases the error rate for “as fast as possible” task execution without affecting the time. The results can be used to enhance and design various VR systems.", "fno": "255600a085", "keywords": [ "Computer Based Training", "Feedback", "Virtual Reality", "Fitts Task", "VR Systems", "Task Execution", "Error Rate", "Adaptive Sound Feedback", "High Pitch Error Feedback", "High Frequency Sound Feedback", "User Performance", "Auditory Error Feedback", "VR Application", "Feedback Users", "User Pointing Performance", "Virtual Reality Training Systems", "Training", "Three Dimensional Displays", "Error Analysis", "Virtual Reality", "User Interfaces", "Throughput", "Task Analysis", "Human Centered Computing Human Computer Interaction HCI", "Human Centered Computing Virtual Reality", "Human Centered Computing Pointing" ], "authors": [ { "affiliation": "Simon Fraser University,BC,Canada", "fullName": "Anil Ufuk Batmaz", "givenName": "Anil Ufuk", "surname": "Batmaz", "__typename": "ArticleAuthorType" }, { "affiliation": "Simon Fraser University,BC,Canada", "fullName": "Wolfgang Stuerzlinger", "givenName": "Wolfgang", "surname": "Stuerzlinger", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "85-94", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tuB3ySBQwU", "name": "pvr202118380-09417777s1-mm_255600a085.zip", "size": "19.6 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417777s1-mm_255600a085.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "255600a075", "articleId": "1tuAtVjRLUc", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a095", "articleId": "1tuAwxIGXQI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2012/1204/0/06184214", "title": "Poster: Investigating one-eyed and stereo cursors for 3D pointing tasks", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184214/12OmNC8dggJ", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446399", "title": "Three Haptic Shape-Feedback Controllers for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446399/13bd1fHrlRF", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446317", "title": "Analysis of Proximity-Based Multimodal Feedback for 3D Selection in Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446317/13bd1sx4Zta", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a590", "title": "Augmented Reality Fitts&#x0027; Law Input Comparison Between Touchpad, Pointing Gesture, and Raycast", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a590/1CJcB3cmKuk", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a847", "title": "Auditory Feedback to Make Walking in Virtual Reality More Accessible", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a847/1JrR8Ihk9Tq", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798287", "title": "Integrating Tactile Feedback in an Acetabular Reamer for Surgical VR-Training", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798287/1cJ0U048bMQ", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089548", "title": "The Impact of Multi-sensory Stimuli on Confidence Levels for Perceptual-cognitive Tasks in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089548/1jIxbiGNwCQ", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2020/7397/0/739700a456", "title": "Development of Touch Valve UI with pseudo-haptics feedback based on vibration of tablet PC", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2020/739700a456/1tGcjlaxzMs", "parentPublication": { "id": "proceedings/iiai-aai/2020/7397/0", "title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a189", "title": "Effects of Different Auditory Feedback Frequencies in Virtual Reality 3D Pointing Tasks", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a189/1tnWFBgcYAo", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a582", "title": "Head Up Visualization of Spatial Sound Sources in Virtual Reality for Deaf and Hard-of-Hearing People", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a582/1tuAPlsZnMc", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBTawna", "title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)", "acronym": "cse", "groupId": "1002115", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNx3q6Yv", "doi": "10.1109/CSE.2014.111", "title": "Eye Detection for Gaze Tracker with Near Infrared Illuminator", "normalizedTitle": "Eye Detection for Gaze Tracker with Near Infrared Illuminator", "abstract": "In this paper, an eye detection method is proposed based on the features of eye images generated in the gaze tracking environment. By configuring the corneal specular reflection detector, pupil detector, and eyelid detector in a cascade format, in actual applications reliable eye detection can be achieved even when an image of only one eye is captured because a part of the user moves out of camera range or when changes occur in the brightness and focus because the user moves back and forth. For the corneal specular reflection detector, a block setting method and a thresholding method are proposed to ensure the corneal specular reflection is not missed. Furthermore, a method of suppressing the effects of eyelashes and corneal specular reflection is proposed that allows more accurate determination of the pupil in the pupil detection process. The eyelid detector determines the presence/absence of an eyelid by using a vertical integral projection function. The eyes were detected reliably when the proposed methods were applied to the captured images in an actual gaze tracking system.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, an eye detection method is proposed based on the features of eye images generated in the gaze tracking environment. By configuring the corneal specular reflection detector, pupil detector, and eyelid detector in a cascade format, in actual applications reliable eye detection can be achieved even when an image of only one eye is captured because a part of the user moves out of camera range or when changes occur in the brightness and focus because the user moves back and forth. For the corneal specular reflection detector, a block setting method and a thresholding method are proposed to ensure the corneal specular reflection is not missed. Furthermore, a method of suppressing the effects of eyelashes and corneal specular reflection is proposed that allows more accurate determination of the pupil in the pupil detection process. The eyelid detector determines the presence/absence of an eyelid by using a vertical integral projection function. The eyes were detected reliably when the proposed methods were applied to the captured images in an actual gaze tracking system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, an eye detection method is proposed based on the features of eye images generated in the gaze tracking environment. By configuring the corneal specular reflection detector, pupil detector, and eyelid detector in a cascade format, in actual applications reliable eye detection can be achieved even when an image of only one eye is captured because a part of the user moves out of camera range or when changes occur in the brightness and focus because the user moves back and forth. For the corneal specular reflection detector, a block setting method and a thresholding method are proposed to ensure the corneal specular reflection is not missed. Furthermore, a method of suppressing the effects of eyelashes and corneal specular reflection is proposed that allows more accurate determination of the pupil in the pupil detection process. The eyelid detector determines the presence/absence of an eyelid by using a vertical integral projection function. The eyes were detected reliably when the proposed methods were applied to the captured images in an actual gaze tracking system.", "fno": "7981a458", "keywords": [ "Eyelids", "Reflection", "Detectors", "Iris", "Eyelashes", "Shape", "Face", "Circularity", "Eye Detection", "Gaze Tracking", "Eyelash" ], "authors": [ { "affiliation": null, "fullName": "Hyun-Cheol Kim", "givenName": "Hyun-Cheol", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jihun Cha", "givenName": "Jihun", "surname": "Cha", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Won Don Lee", "givenName": "Won Don", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "cse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-12-01T00:00:00", "pubType": "proceedings", "pages": "458-464", "year": "2014", "issn": null, "isbn": "978-1-4799-7981-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "7981a450", "articleId": "12OmNvjgWEe", "__typename": "AdjacentArticleType" }, "next": { "fno": "7981a465", "articleId": "12OmNvA1hwK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/gcis/2009/3571/2/3571b133", "title": "Key Techniques of Eye Gaze Tracking Based on Pupil Corneal Reflection", "doi": null, "abstractUrl": "/proceedings-article/gcis/2009/3571b133/12OmNA0vo1q", "parentPublication": { "id": "proceedings/gcis/2009/3571/2", "title": "2009 WRI Global Congress on Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2016/5698/0/07907439", "title": "Eye Shape and Corners Detection in Periocular Images Using Particle Filters", "doi": null, "abstractUrl": "/proceedings-article/sitis/2016/07907439/12OmNAObbHf", "parentPublication": { "id": "proceedings/sitis/2016/5698/0", "title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisa/2014/4443/0/06847398", "title": "Eye Detection for near Infrared Based Gaze Tracking System", "doi": null, "abstractUrl": "/proceedings-article/icisa/2014/06847398/12OmNrJAdSv", "parentPublication": { "id": "proceedings/icisa/2014/4443/0", "title": "2014 International Conference on Information Science and Applications (ICISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761343", "title": "3D gaze estimation with a single camera without IR illumination", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761343/12OmNvvLi4R", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a351", "title": "A Performance Comparison between Circular and Spline-Based Methods for Iris Segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a351/12OmNzRZpTB", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926709", "title": "Fast and Robust Eyelid Outline and Aperture Detection in Real-World Scenarios", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926709/12OmNzZEAtB", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d937", "title": "Event-Based Kilohertz Eye Tracking using Coded Differential Lighting", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d937/1B13uiL4IUM", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnlp/2022/9544/0/954400a001", "title": "A Two-stage Algorithm for Automatic Diagnosis of Keratitis", "doi": null, "abstractUrl": "/proceedings-article/icnlp/2022/954400a001/1GNtmSxkybu", "parentPublication": { "id": "proceedings/icnlp/2022/9544/0", "title": "2022 4th International Conference on Natural Language Processing (ICNLP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872121", "title": ": From real infrared eye-images to synthetic sequences of gaze behavior<italic/>", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872121/1GhRV18KGvC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/10076795", "title": "A High-Quality Landmarked Infrared Eye Video Dataset (IREye4Task): Eye Behaviors, Insights and Benchmarks for Wearable Mental State Analysis", "doi": null, "abstractUrl": "/journal/ta/5555/01/10076795/1LFOIJwV6KI", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbCrVT", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNyKJipS", "doi": "10.1109/CVPR.2014.229", "title": "Geometric Generative Gaze Estimation (G3E) for Remote RGB-D Cameras", "normalizedTitle": "Geometric Generative Gaze Estimation (G3E) for Remote RGB-D Cameras", "abstract": "We propose a head pose invariant gaze estimation model for distant RGB-D cameras. It relies on a geometric understanding of the 3D gaze action and generation of eye images. By introducing a semantic segmentation of the eye region within a generative process, the model (i) avoids the critical feature tracking of geometrical approaches requiring high resolution images, (ii) decouples the person dependent geometry from the ambient conditions, allowing adaptation to different conditions without retraining. Priors in the generative framework are adequate for training from few samples. In addition, the model is capable of gaze extrapolation allowing for less restrictive training schemes. Comparisons with state of the art methods validate these properties which make our method highly valuable for addressing many diverse tasks in sociology, HRI and HCI.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a head pose invariant gaze estimation model for distant RGB-D cameras. It relies on a geometric understanding of the 3D gaze action and generation of eye images. By introducing a semantic segmentation of the eye region within a generative process, the model (i) avoids the critical feature tracking of geometrical approaches requiring high resolution images, (ii) decouples the person dependent geometry from the ambient conditions, allowing adaptation to different conditions without retraining. Priors in the generative framework are adequate for training from few samples. In addition, the model is capable of gaze extrapolation allowing for less restrictive training schemes. Comparisons with state of the art methods validate these properties which make our method highly valuable for addressing many diverse tasks in sociology, HRI and HCI.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a head pose invariant gaze estimation model for distant RGB-D cameras. It relies on a geometric understanding of the 3D gaze action and generation of eye images. By introducing a semantic segmentation of the eye region within a generative process, the model (i) avoids the critical feature tracking of geometrical approaches requiring high resolution images, (ii) decouples the person dependent geometry from the ambient conditions, allowing adaptation to different conditions without retraining. Priors in the generative framework are adequate for training from few samples. In addition, the model is capable of gaze extrapolation allowing for less restrictive training schemes. Comparisons with state of the art methods validate these properties which make our method highly valuable for addressing many diverse tasks in sociology, HRI and HCI.", "fno": "5118b773", "keywords": [ "Eyelids", "Head", "Image Color Analysis", "Three Dimensional Displays", "Image Segmentation", "Training", "Visualization", "HHI", "Gaze Estimation", "Generative Model", "Segmentation", "RGB D", "HRI", "HCI" ], "authors": [ { "affiliation": "Res. Inst., Martigny, Switzerland", "fullName": "Kenneth Alberto Funes Mora", "givenName": "Kenneth Alberto", "surname": "Funes Mora", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jean-Marc Odobez", "givenName": "Jean-Marc", "surname": "Odobez", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-06-01T00:00:00", "pubType": "proceedings", "pages": "1773-1780", "year": "2014", "issn": "1063-6919", "isbn": "978-1-4799-5118-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5118b765", "articleId": "12OmNxFsmw2", "__typename": "AdjacentArticleType" }, "next": { "fno": "5118b781", "articleId": "12OmNvCRgji", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2012/2216/0/06460306", "title": "Head pose-free appearance-based gaze sensing via eye image synthesis", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460306/12OmNrMHOcV", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a351", "title": "Gaze Estimation Using Human Joint Rotation Angel", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a351/12OmNx57HJj", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b869", "title": "Appearance-Based Gaze Tracking with Free Head Movement", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b869/12OmNyo1nKa", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a440", "title": "A Hierarchical Generative Model for Eye Image Synthesis and Eye Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a440/17D45VUZMX5", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545162", "title": "Automatic Eye Gaze Estimation using Geometric & Texture-based Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545162/17D45WrVg95", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09872121", "title": ": From real infrared eye-images to synthetic sequences of gaze behavior<italic/>", "doi": null, "abstractUrl": "/journal/tg/2022/11/09872121/1GhRV18KGvC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/03/08920005", "title": "A Differential Approach for Gaze Estimation", "doi": null, "abstractUrl": "/journal/tp/2021/03/08920005/1fsFnejO2IM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g931", "title": "Photo-Realistic Monocular Gaze Redirection Using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g931/1hVloxAEVA4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093433", "title": "EyeGAN: Gaze&#x2013;Preserving, Mask&#x2013;Mediated Eye Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093433/1jPbyYn40WA", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a011", "title": "Subject Guided Eye Image Synthesis with Application to Gaze Redirection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a011/1uqGyw32uVq", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "14tNJlStZmx", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "acronym": "vs-games", "groupId": "1002788", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "14tNJoD4Uxi", "doi": "10.1109/VS-Games.2018.8493406", "title": "A Model for Eye and Head Motion for Virtual Agents", "normalizedTitle": "A Model for Eye and Head Motion for Virtual Agents", "abstract": "In this paper we propose a model for generating head and eye movements during gaze shifts of virtual characters, including eyelid and eyebrow motion. A user study with 30 participants was conducted to evaluate the communicative accuracy and perceived naturalness of the model. Results showed that the model communicates gaze targets with an accuracy that closely matches that of a human confederate, and participants subjectively rated the head and eye movements as natural (as opposed to artificial). The implementation can be used as-is in applications where virtual characters act as idle bystanders or observers, or it can be paired with a lip synchronization solution.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we propose a model for generating head and eye movements during gaze shifts of virtual characters, including eyelid and eyebrow motion. A user study with 30 participants was conducted to evaluate the communicative accuracy and perceived naturalness of the model. Results showed that the model communicates gaze targets with an accuracy that closely matches that of a human confederate, and participants subjectively rated the head and eye movements as natural (as opposed to artificial). The implementation can be used as-is in applications where virtual characters act as idle bystanders or observers, or it can be paired with a lip synchronization solution.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we propose a model for generating head and eye movements during gaze shifts of virtual characters, including eyelid and eyebrow motion. A user study with 30 participants was conducted to evaluate the communicative accuracy and perceived naturalness of the model. Results showed that the model communicates gaze targets with an accuracy that closely matches that of a human confederate, and participants subjectively rated the head and eye movements as natural (as opposed to artificial). The implementation can be used as-is in applications where virtual characters act as idle bystanders or observers, or it can be paired with a lip synchronization solution.", "fno": "08493406", "keywords": [ "Face", "Eyelids", "Eyebrows", "Facial Animation", "Shape", "Task Analysis" ], "authors": [ { "affiliation": null, "fullName": "Jan Krejsa", "givenName": "Jan", "surname": "Krejsa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Bojan Kerou", "givenName": "Bojan", "surname": "Kerou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Fotis Liarokapis", "givenName": "Fotis", "surname": "Liarokapis", "__typename": "ArticleAuthorType" } ], "idPrefix": "vs-games", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-09-01T00:00:00", "pubType": "proceedings", "pages": "1-4", "year": "2018", "issn": null, "isbn": "978-1-5386-7123-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08493405", "articleId": "14tNJosRlQY", "__typename": "AdjacentArticleType" }, "next": { "fno": "08493407", "articleId": "14tNJq72WGc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2017/6716/0/07893315", "title": "Exploring natural eye-gaze-based interaction for immersive virtual reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893315/12OmNApcuh1", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981691", "title": "Using eye gaze, head pose, and facial expression for personalized non-player character interaction", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981691/12OmNvT2oTP", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926684", "title": "A Statistical Approach to Continuous Self-Calibrating Eye Gaze Tracking for Head-Mounted Virtual Reality Systems", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926684/12OmNvlxJrb", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349573", "title": "Relations between facial display, eye gaze and head tilt: Dominance perception variations of virtual agents", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349573/12OmNxHJ9qY", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2010/6331/0/05460171", "title": "Animating Gaze Shifts for Virtual Characters Based on Head Movement Propensity", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2010/05460171/12OmNyKa67Q", "parentPublication": { "id": "proceedings/vs-games/2010/6331/0", "title": "2010 2nd International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811014", "title": "Natural Eye Motion Synthesis by Modeling Gaze-Head Coupling", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811014/12OmNzC5T34", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2005/02/mcg2005020024", "title": "Automated Eye Motion Using Texture Synthesis", "doi": null, "abstractUrl": "/magazine/cg/2005/02/mcg2005020024/13rRUwInvLR", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111902", "title": "Live Speech Driven Head-and-Eye Motion Generators", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111902/13rRUyv53Fo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643434", "title": "SGaze: A Data-Driven Eye-Head Coordination Model for Realtime Gaze Prediction", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643434/18K0lRIKi7m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09664291", "title": "EHTask: Recognizing User Tasks From Eye and Head Movements in Immersive Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2023/04/09664291/1zHDIPIlNBe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1i5mkDyiIUg", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1i5msGHDmec", "doi": "10.1109/ICCVW.2019.00542", "title": "Weakly-Supervised Degree of Eye-Closeness Estimation", "normalizedTitle": "Weakly-Supervised Degree of Eye-Closeness Estimation", "abstract": "Following recent technological advances there is a growing interest in building non-intrusive methods that help us communicate with computing devices. In this regard, accurate information from eye is a promising input medium between a user and computing devices. In this paper we propose a method that captures the degree of eye closeness. Although many methods exist for detection of eyelid openness, they are inherently unable to satisfactorily perform in real world applications. Detailed eye state estimation is more important, in extracting meaningful information, than estimating whether eyes are open or closed. However, learning reliable eye state estimator requires accurate annotations which is cost prohibitive. In this work, we leverage synthetic face images which can be generated via computer graphics rendering techniques and automatically annotated with different levels of eye openness. These synthesized training data images, however, have a domain shift from real-world data. To alleviate this issue, we propose a weakly-supervised method which utilizes the accurate annotation from the synthetic data set, to learn accurate degree of eye openness, and the weakly labeled (open or closed) real world eye data set to control the domain shift. We introduce a data set of 1.3M synthetic face images with detail eye openness and eye gaze information, and 21k real-world images with open/closed annotation. The dataset will be released online upon acceptance. Extensive experiments validate the effectiveness of the proposed approach.", "abstracts": [ { "abstractType": "Regular", "content": "Following recent technological advances there is a growing interest in building non-intrusive methods that help us communicate with computing devices. In this regard, accurate information from eye is a promising input medium between a user and computing devices. In this paper we propose a method that captures the degree of eye closeness. Although many methods exist for detection of eyelid openness, they are inherently unable to satisfactorily perform in real world applications. Detailed eye state estimation is more important, in extracting meaningful information, than estimating whether eyes are open or closed. However, learning reliable eye state estimator requires accurate annotations which is cost prohibitive. In this work, we leverage synthetic face images which can be generated via computer graphics rendering techniques and automatically annotated with different levels of eye openness. These synthesized training data images, however, have a domain shift from real-world data. To alleviate this issue, we propose a weakly-supervised method which utilizes the accurate annotation from the synthetic data set, to learn accurate degree of eye openness, and the weakly labeled (open or closed) real world eye data set to control the domain shift. We introduce a data set of 1.3M synthetic face images with detail eye openness and eye gaze information, and 21k real-world images with open/closed annotation. The dataset will be released online upon acceptance. Extensive experiments validate the effectiveness of the proposed approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Following recent technological advances there is a growing interest in building non-intrusive methods that help us communicate with computing devices. In this regard, accurate information from eye is a promising input medium between a user and computing devices. In this paper we propose a method that captures the degree of eye closeness. Although many methods exist for detection of eyelid openness, they are inherently unable to satisfactorily perform in real world applications. Detailed eye state estimation is more important, in extracting meaningful information, than estimating whether eyes are open or closed. However, learning reliable eye state estimator requires accurate annotations which is cost prohibitive. In this work, we leverage synthetic face images which can be generated via computer graphics rendering techniques and automatically annotated with different levels of eye openness. These synthesized training data images, however, have a domain shift from real-world data. To alleviate this issue, we propose a weakly-supervised method which utilizes the accurate annotation from the synthetic data set, to learn accurate degree of eye openness, and the weakly labeled (open or closed) real world eye data set to control the domain shift. We introduce a data set of 1.3M synthetic face images with detail eye openness and eye gaze information, and 21k real-world images with open/closed annotation. The dataset will be released online upon acceptance. Extensive experiments validate the effectiveness of the proposed approach.", "fno": "502300e416", "keywords": [ "Computer Graphics", "Estimation Theory", "Image Processing", "Reliability", "State Estimation", "Supervised Learning", "Computer Graphics Rendering Techniques", "Eyelid Openness Detection", "Synthetic Face Image Synthesis", "Training Data Image Synthesis", "Reliable Eye State Estimator", "Eye Closeness Estimation", "Eye Gaze Information", "Weakly Supervised Method", "Face", "Feature Extraction", "Eyelids", "Machine Learning", "Training", "Computer Vision", "Training Data", "Degree Of Eye Openess", "Weakly Supervised" ], "authors": [ { "affiliation": "Qualcomm AI Research, USA", "fullName": "Eyasu Mequanint", "givenName": "Eyasu", "surname": "Mequanint", "__typename": "ArticleAuthorType" }, { "affiliation": "Qualcomm AI Research, USA", "fullName": "Shuai Zhang", "givenName": "Shuai", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Qualcomm AI Research, USA", "fullName": "Bijan Forutanpour", "givenName": "Bijan", "surname": "Forutanpour", "__typename": "ArticleAuthorType" }, { "affiliation": "Qualcomm AI Research, USA", "fullName": "Yingyong Qi", "givenName": "Yingyong", "surname": "Qi", "__typename": "ArticleAuthorType" }, { "affiliation": "Qualcomm AI Research, USA", "fullName": "Ning Bi", "givenName": "Ning", "surname": "Bi", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "4416-4424", "year": "2019", "issn": null, "isbn": "978-1-7281-5023-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "502300e406", "articleId": "1i5mMNtQdjy", "__typename": "AdjacentArticleType" }, "next": { "fno": "502300e425", "articleId": "1i5mufNRPWw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2008/2174/0/04761409", "title": "Semantic feature extraction for accurate eye corner detection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761409/12OmNBVIUso", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2016/5698/0/07907537", "title": "Driver Drowsiness Detection Using Eye-Closeness Detection", "doi": null, "abstractUrl": "/proceedings-article/sitis/2016/07907537/12OmNrMZpG4", "parentPublication": { "id": "proceedings/sitis/2016/5698/0", "title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2014/7981/0/7981a458", "title": "Eye Detection for Gaze Tracker with Near Infrared Illuminator", "doi": null, "abstractUrl": "/proceedings-article/cse/2014/7981a458/12OmNx3q6Yv", "parentPublication": { "id": "proceedings/cse/2014/7981/0", "title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761186", "title": "Automatic eye state recognition and closed-eye photo correction", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761186/12OmNzGlRzk", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iai/1994/6250/0/00336666", "title": "Head/eye calibration of a binocular head by use of single calibration point", "doi": null, "abstractUrl": "/proceedings-article/iai/1994/00336666/12OmNzTH0G2", "parentPublication": { "id": "proceedings/iai/1994/6250/0", "title": "Proceedings of the IEEE Southwest Symposium on Image Analysis and Interpretation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2018/7123/0/08493406", "title": "A Model for Eye and Head Motion for Virtual Agents", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2018/08493406/14tNJoD4Uxi", "parentPublication": { "id": "proceedings/vs-games/2018/7123/0", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2021/3176/0/09667031", "title": "Landmark-aware Self-supervised Eye Semantic Segmentation", "doi": null, "abstractUrl": "/proceedings-article/fg/2021/09667031/1A6BnhB8xu8", "parentPublication": { "id": "proceedings/fg/2021/3176/0", "title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/10076795", "title": "A High-Quality Landmarked Infrared Eye Video Dataset (IREye4Task): Eye Behaviors, Insights and Benchmarks for Wearable Mental State Analysis", "doi": null, "abstractUrl": "/journal/ta/5555/01/10076795/1LFOIJwV6KI", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d858", "title": "Semi-Supervised Eye Makeup Transfer by Swapping Learned Representation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d858/1i5mCnsvLfW", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d660", "title": "U2Eyes: A Binocular Dataset for Eye Tracking and Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d660/1i5mrEVhtbq", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1jPbbHBGDHq", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1jPbyYn40WA", "doi": "10.1109/WACV45572.2020.9093433", "title": "EyeGAN: Gaze&#x2013;Preserving, Mask&#x2013;Mediated Eye Image Synthesis", "normalizedTitle": "EyeGAN: Gaze–Preserving, Mask–Mediated Eye Image Synthesis", "abstract": "Automatic synthesis of realistic eye images with prescribed gaze direction is important for multiple application domains. We introduce EyeGAN, an algorithm to generate eye images in the style of a desired target domain, that inherit annotations available in images from a source domain. EyeGAN takes in input ternary masks, which are used as domain-independent proxies for gaze direction. We evaluate EyeGAN against competing eye image synthesis algorithms by measuring a specific gaze consistency index. In addition, we present results from multiple experiments (involving eye region segmentation, pupil localization, and gaze direction estimation) showing that the use of EyeGANgenerated images with inherited annotations for network training leads to superior performances compared to other domain transfer algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "Automatic synthesis of realistic eye images with prescribed gaze direction is important for multiple application domains. We introduce EyeGAN, an algorithm to generate eye images in the style of a desired target domain, that inherit annotations available in images from a source domain. EyeGAN takes in input ternary masks, which are used as domain-independent proxies for gaze direction. We evaluate EyeGAN against competing eye image synthesis algorithms by measuring a specific gaze consistency index. In addition, we present results from multiple experiments (involving eye region segmentation, pupil localization, and gaze direction estimation) showing that the use of EyeGANgenerated images with inherited annotations for network training leads to superior performances compared to other domain transfer algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Automatic synthesis of realistic eye images with prescribed gaze direction is important for multiple application domains. We introduce EyeGAN, an algorithm to generate eye images in the style of a desired target domain, that inherit annotations available in images from a source domain. EyeGAN takes in input ternary masks, which are used as domain-independent proxies for gaze direction. We evaluate EyeGAN against competing eye image synthesis algorithms by measuring a specific gaze consistency index. In addition, we present results from multiple experiments (involving eye region segmentation, pupil localization, and gaze direction estimation) showing that the use of EyeGANgenerated images with inherited annotations for network training leads to superior performances compared to other domain transfer algorithms.", "fno": "09093433", "keywords": [ "Eye", "Feature Extraction", "Gaze Tracking", "Image Segmentation", "Learning Artificial Intelligence", "Domain Transfer Algorithms", "Inherited Annotations", "Eye GA Ngenerated Images", "Gaze Direction Estimation", "Eye Region Segmentation", "Multiple Experiments", "Specific Gaze Consistency Index", "Eye Image Synthesis Algorithms", "Domain Independent Proxies", "Input Ternary Masks", "Source Domain", "Multiple Application Domains", "Prescribed Gaze Direction", "Realistic Eye Images", "Automatic Synthesis", "Mask Mediated Eye Image Synthesis", "Gaze Preserving", "Eye GAN", "Training", "Head", "Generators", "Image Segmentation", "Gallium Nitride", "Labeling", "Image Generation" ], "authors": [ { "affiliation": "University of California,Santa Cruz", "fullName": "Harsimran Kaur", "givenName": "Harsimran", "surname": "Kaur", "__typename": "ArticleAuthorType" }, { "affiliation": "University of California,Santa Cruz", "fullName": "Roberto Manduchi", "givenName": "Roberto", "surname": "Manduchi", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-03-01T00:00:00", "pubType": "proceedings", "pages": "299-308", "year": "2020", "issn": null, "isbn": "978-1-7281-6553-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09093568", "articleId": "1jPbwRjMiBO", "__typename": "AdjacentArticleType" }, "next": { "fno": "09093326", "articleId": "1jPbmiIdXFe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2012/2216/0/06460306", "title": "Head pose-free appearance-based gaze sensing via eye image synthesis", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460306/12OmNrMHOcV", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/1/290910220", "title": "IR Image Based Eye Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290910220/12OmNvqmUJv", "parentPublication": { "id": "proceedings/snpd/2007/2909/1", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a351", "title": "Gaze Estimation Using Human Joint Rotation Angel", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a351/12OmNx57HJj", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349573", "title": "Relations between facial display, eye gaze and head tilt: Dominance perception variations of virtual agents", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349573/12OmNxHJ9qY", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2017/4941/0/07912208", "title": "Gaze Estimation Based on Eyeball-Head Dynamics", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2017/07912208/12OmNzWfoVQ", "parentPublication": { "id": "proceedings/wacvw/2017/4941/0", "title": "2017 IEEE Winter Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2002/1858/0/18580191", "title": "Appearance-based Eye Gaze Estimation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2002/18580191/12OmNzYwcdp", "parentPublication": { "id": "proceedings/wacv/2002/1858/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2017/6724/0/07926555", "title": "Gaze Tracking and Object Recognition from Eye Images", "doi": null, "abstractUrl": "/proceedings-article/irc/2017/07926555/12OmNzvz6Lc", "parentPublication": { "id": "proceedings/irc/2017/6724/0", "title": "2017 First IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2018/6481/0/648101a118", "title": "WAYLA - Generating Images from Eye Movements", "doi": null, "abstractUrl": "/proceedings-article/crv/2018/648101a118/17D45Wt3Ewe", "parentPublication": { "id": "proceedings/crv/2018/6481/0", "title": "2018 15th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g931", "title": "Photo-Realistic Monocular Gaze Redirection Using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g931/1hVloxAEVA4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a011", "title": "Subject Guided Eye Image Synthesis with Application to Gaze Redirection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a011/1uqGyw32uVq", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrAdstn", "title": "Computer Graphics and Applications, Pacific Conference on", "acronym": "pg", "groupId": "1000130", "volume": "0", "displayVolume": "0", "year": "2003", "__typename": "ProceedingType" }, "article": { "id": "12OmNxGSm2n", "doi": "10.1109/PCCGA.2003.1238246", "title": "Efficient Rendering of Local Subsurface Scattering", "normalizedTitle": "Efficient Rendering of Local Subsurface Scattering", "abstract": "A novel approach is presented to efficiently render local subsurface scattering effects. We introduce an importance sampling scheme for a practical subsurface scattering model. It leads to a simple and efficient rendering algorithm, which operates in image-space, and which is even amenable for implementation on graphics hardware. We demonstrate the applicability of our technique to the problem of skin rendering, for which the subsurface transport of light typically remains local. Our implementation shows that plausible images can be rendered interactively using hardware acceleration.", "abstracts": [ { "abstractType": "Regular", "content": "A novel approach is presented to efficiently render local subsurface scattering effects. We introduce an importance sampling scheme for a practical subsurface scattering model. It leads to a simple and efficient rendering algorithm, which operates in image-space, and which is even amenable for implementation on graphics hardware. We demonstrate the applicability of our technique to the problem of skin rendering, for which the subsurface transport of light typically remains local. Our implementation shows that plausible images can be rendered interactively using hardware acceleration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel approach is presented to efficiently render local subsurface scattering effects. We introduce an importance sampling scheme for a practical subsurface scattering model. It leads to a simple and efficient rendering algorithm, which operates in image-space, and which is even amenable for implementation on graphics hardware. We demonstrate the applicability of our technique to the problem of skin rendering, for which the subsurface transport of light typically remains local. Our implementation shows that plausible images can be rendered interactively using hardware acceleration.", "fno": "20280051", "keywords": [], "authors": [ { "affiliation": "Limburgs Universitair Centrum", "fullName": "Tom Mertens", "givenName": "Tom", "surname": "Mertens", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatik", "fullName": "Jan Kautz", "givenName": "Jan", "surname": "Kautz", "__typename": "ArticleAuthorType" }, { "affiliation": "Limburgs Universitair Centrum", "fullName": "Philippe Bekaert", "givenName": "Philippe", "surname": "Bekaert", "__typename": "ArticleAuthorType" }, { "affiliation": "Limburgs Universitair Centrum", "fullName": "Frank Van Reeth", "givenName": "Frank", "surname": "Van Reeth", "__typename": "ArticleAuthorType" }, { "affiliation": "MPI Informatik", "fullName": "Hans-Peter Seidel", "givenName": "Hans-Peter", "surname": "Seidel", "__typename": "ArticleAuthorType" } ], "idPrefix": "pg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2003-10-01T00:00:00", "pubType": "proceedings", "pages": "51", "year": "2003", "issn": null, "isbn": "0-7695-2028-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "20280041", "articleId": "12OmNCwlagV", "__typename": "AdjacentArticleType" }, "next": { "fno": "20280059", "articleId": "12OmNA1mbeZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2006/2521/2/252120207", "title": "Separating Subsurface Scattering from Photometric Image", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252120207/12OmNxYbT3w", "parentPublication": { "id": "proceedings/icpr/2006/2521/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iwcse/2009/3881/1/3881a302", "title": "Real-Time Realistic Rendering of Tissue Surface with Mucous Layer", "doi": null, "abstractUrl": "/proceedings-article/iwcse/2009/3881a302/12OmNxxNbYW", "parentPublication": { "id": "proceedings/iwcse/2009/3881/1", "title": "Computer Science and Engineering, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2007/3009/0/30090403", "title": "Real-Time Approximate Subsurface Scattering on Graphics Hardware", "doi": null, "abstractUrl": "/proceedings-article/pg/2007/30090403/12OmNybfr3s", "parentPublication": { "id": "proceedings/pg/2007/3009/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vg/2005/26/0/01500525", "title": "A simplified model for inhomogeneous subsurface scattering", "doi": null, "abstractUrl": "/proceedings-article/vg/2005/01500525/12OmNzRZq0C", "parentPublication": { "id": "proceedings/vg/2005/26/0", "title": "Volume Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761892", "title": "Analysis of subsurface scattering under generic illumination", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761892/12OmNzd7bV9", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/07/06658758", "title": "Translucent Radiosity: Efficiently CombiningDiffuse Inter-Reflection andSubsurface Scattering", "doi": null, "abstractUrl": "/journal/tg/2014/07/06658758/13rRUwInvJg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/07/ttg2011070956", "title": "Heterogeneous Subsurface Scattering Using the Finite Element Method", "doi": null, "abstractUrl": "/journal/tg/2011/07/ttg2011070956/13rRUwInvys", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/01/mcg2009010066", "title": "Image-Space Subsurface Scattering for Interactive Rendering of Deformable Translucent Objects", "doi": null, "abstractUrl": "/magazine/cg/2009/01/mcg2009010066/13rRUwjoNzF", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2020/5230/0/09105209", "title": "Towards Learning-based Inverse Subsurface Scattering", "doi": null, "abstractUrl": "/proceedings-article/iccp/2020/09105209/1kkJX7XnNRe", "parentPublication": { "id": "proceedings/iccp/2020/5230/0", "title": "2020 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100c328", "title": "Fine-Grain Prediction of Strawberry Freshness using Subsurface Scattering", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100c328/1yNhlKKHvNu", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mTG", "title": "2007 International Conference on Multimedia & Expo", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNxWLTvj", "doi": "10.1109/ICME.2007.4285007", "title": "3-D Display using Motion Parallax for Extended-Depth Perception", "normalizedTitle": "3-D Display using Motion Parallax for Extended-Depth Perception", "abstract": "This paper proposes a new 3D display that can express differences between depths at extended distances of over tens of meters to meet new requirements for outdoor use. We attempted to use motion parallax for observers to perceive depth because this works as a cue at longer distances where binocular parallax, which is used in conventional 3D displays to perceive depth, does not work. We conducted subjective tests using a moving car in which observers viewed a test pattern overlapping the real view ahead of the car seen through the windshield to examine the feasibility of the 3D display we propose. The experimental results revealed that the perceived depth of a pattern could be controlled by changing its rate of expansion, demonstrating the feasibility of a 3D display for extended-depth perception using motion parallax.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a new 3D display that can express differences between depths at extended distances of over tens of meters to meet new requirements for outdoor use. We attempted to use motion parallax for observers to perceive depth because this works as a cue at longer distances where binocular parallax, which is used in conventional 3D displays to perceive depth, does not work. We conducted subjective tests using a moving car in which observers viewed a test pattern overlapping the real view ahead of the car seen through the windshield to examine the feasibility of the 3D display we propose. The experimental results revealed that the perceived depth of a pattern could be controlled by changing its rate of expansion, demonstrating the feasibility of a 3D display for extended-depth perception using motion parallax.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a new 3D display that can express differences between depths at extended distances of over tens of meters to meet new requirements for outdoor use. We attempted to use motion parallax for observers to perceive depth because this works as a cue at longer distances where binocular parallax, which is used in conventional 3D displays to perceive depth, does not work. We conducted subjective tests using a moving car in which observers viewed a test pattern overlapping the real view ahead of the car seen through the windshield to examine the feasibility of the 3D display we propose. The experimental results revealed that the perceived depth of a pattern could be controlled by changing its rate of expansion, demonstrating the feasibility of a 3D display for extended-depth perception using motion parallax.", "fno": "04285007", "keywords": [ "Image Motion Analysis", "Three Dimensional Displays", "3 D Display", "Extended Depth Perception", "Observer Motion Parallax", "Binocular Parallax", "Three Dimensional Displays", "Retina", "Testing", "Motion Control", "Navigation", "Automotive Components", "Ubiquitous Computing", "Intelligent Systems", "Augmented Reality", "Size Control" ], "authors": [ { "affiliation": "Kanagawa Institute of Technology", "fullName": "Kazutake Uehira", "givenName": "Kazutake", "surname": "Uehira", "__typename": "ArticleAuthorType" }, { "affiliation": "Kanagawa Institute of Technology", "fullName": "Masahiro Suzuki", "givenName": "Masahiro", "surname": "Suzuki", "__typename": "ArticleAuthorType" }, { "affiliation": "Kanagawa Institute of Technology", "fullName": "Takuya Abekawa", "givenName": "Takuya", "surname": "Abekawa", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-07-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2007", "issn": "1945-7871", "isbn": "1-4244-1016-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04285006", "articleId": "12OmNB0X8uH", "__typename": "AdjacentArticleType" }, "next": { "fno": "04285008", "articleId": "12OmNzwHvrN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2008/1971/0/04480794", "title": "The Effects of Virtual Reality, Augmented Reality, and Motion Parallax on Egocentric Depth Perception", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480794/12OmNrAMERg", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836523", "title": "Human Attention and fatigue for AR Head-Up Displays", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2012/1247/0/06180899", "title": "Evaluating depth perception of volumetric data in semi-immersive VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2012/06180899/12OmNx6g6eF", "parentPublication": { "id": "proceedings/vr/2012/1247/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476619", "title": "Poster: 3-D Display Using Motion Parallax for Outdoor User Interface", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476619/12OmNx8OuEv", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicic/2006/2616/1/26160302", "title": "Thin-Type Three-dimensional Display Based on the Reconstruction of Parallax Rays", "doi": null, "abstractUrl": "/proceedings-article/icicic/2006/26160302/12OmNxWuih1", "parentPublication": { "id": "proceedings/icicic/2006/2616/1", "title": "Innovative Computing ,Information and Control, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicic/2007/2882/0/28820304", "title": "Three-dimensional Camera System for the Thin Three-dimensional Display Based on the Reconstruction of Parallax Rays", "doi": null, "abstractUrl": "/proceedings-article/icicic/2007/28820304/12OmNxYbT09", "parentPublication": { "id": "proceedings/icicic/2007/2882/0", "title": "2007 Second International Conference on Innovative Computing, Information and Control", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvvrhc/1998/8283/0/82830035", "title": "Displaying Motion Parallax by Occlusion Detectable Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvvrhc/1998/82830035/12OmNzkuKJt", "parentPublication": { "id": "proceedings/cvvrhc/1998/8283/0", "title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcs/1996/7436/0/74360349", "title": "3D Image Display with Motion Parallax by Camera Matrix Stereo", "doi": null, "abstractUrl": "/proceedings-article/icmcs/1996/74360349/12OmNzn38KE", "parentPublication": { "id": "proceedings/icmcs/1996/7436/0", "title": "Multimedia Computing and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2007/0905/0/04161018", "title": "Dynallax: Solid State Dynamic Parallax Barrier Autostereoscopic VR Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2007/04161018/12OmNzn38Sd", "parentPublication": { "id": "proceedings/vr/2007/0905/0", "title": "2007 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642347", "title": "An Evaluation of Depth and Size Perception on a Spherical Fish Tank Virtual Reality Display", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642347/17PYEjbrJk7", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNs4S8vE", "title": "2008 IEEE Symposium on 3D User Interfaces", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNzT7Oz9", "doi": "10.1109/3DUI.2008.4476617", "title": "Poster: Image-Based 3D Display with Motion Parallax using Face Tracking", "normalizedTitle": "Poster: Image-Based 3D Display with Motion Parallax using Face Tracking", "abstract": "We propose an image-based 3D display with motion parallax using face tracking. Multi-view images of target objects are recorded in advance by utilizing a camera mounted on a 6 DOF manipulator. The 3D viewpoint of the user is measured by using a real-time non-contact face measurement system. One of the multi-view images is projected according as the camera position corresponding to the viewpoint of the user. The user can obtain 3D information of the object through a standard monitor by moving his/her head. A prototype system is developed and the consistency of the proposed method is confirmed by comparing generated images with captured images at the same viewpoints.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an image-based 3D display with motion parallax using face tracking. Multi-view images of target objects are recorded in advance by utilizing a camera mounted on a 6 DOF manipulator. The 3D viewpoint of the user is measured by using a real-time non-contact face measurement system. One of the multi-view images is projected according as the camera position corresponding to the viewpoint of the user. The user can obtain 3D information of the object through a standard monitor by moving his/her head. A prototype system is developed and the consistency of the proposed method is confirmed by comparing generated images with captured images at the same viewpoints.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an image-based 3D display with motion parallax using face tracking. Multi-view images of target objects are recorded in advance by utilizing a camera mounted on a 6 DOF manipulator. The 3D viewpoint of the user is measured by using a real-time non-contact face measurement system. One of the multi-view images is projected according as the camera position corresponding to the viewpoint of the user. The user can obtain 3D information of the object through a standard monitor by moving his/her head. A prototype system is developed and the consistency of the proposed method is confirmed by comparing generated images with captured images at the same viewpoints.", "fno": "04476617", "keywords": [ "Cameras", "Computer Displays", "Face Recognition", "Image Motion Analysis", "Image Based 3 D Display", "Motion Parallax", "Face Tracking", "Cameras", "6 DOF Manipulator", "Real Time Noncontact Face Measurement System", "Three Dimensional Displays", "Cameras", "Image Generation", "Target Tracking", "Computerized Monitoring", "Head", "Computer Graphics", "Lenses", "Character Generation", "Real Time Systems", "H 5 1 INFORMATION INTERFACES AND PRESENTATION E G", "HCI Multimedia Information Systems Artificial", "Augmented", "And Virtual Realities", "I 3 7 COMPUTER GRAPHICS Three Dimensional Graphics And Realism Virtual Reality", "I 4 9 IMAGE PROCESSING AND COMPUTER VISION Applications" ], "authors": [ { "affiliation": "Nara Institute of Science and Technology, 8916-5 Takayama-cho, Ikoma-shi, Nara, Japan, e-mail: tsuyo-s@is.naist.jp", "fullName": "Tsuyoshi SUENAGA", "givenName": "Tsuyoshi", "surname": "SUENAGA", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology, 8916-5 Takayama-cho, Ikoma-shi, Nara, Japan, e-mail: yasuyuki-t@is.naist.jp", "fullName": "Yasuyuki TAMAI", "givenName": "Yasuyuki", "surname": "TAMAI", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology, 8916-5 Takayama-cho, Ikoma-shi, Nara, Japan, e-mail: kurita@is.naist.jp", "fullName": "Yuichi KURITA", "givenName": "Yuichi", "surname": "KURITA", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology, 8916-5 Takayama-cho, Ikoma-shi, Nara, Japan, e-mail: yoshio@is.naist.jp", "fullName": "Yoshio MATSUMOTO", "givenName": "Yoshio", "surname": "MATSUMOTO", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology, 8916-5 Takayama-cho, Ikoma-shi, Nara, Japan, e-mail: ogasawar@is.naist.jp", "fullName": "Tsukasa OGASAWARA", "givenName": "Tsukasa", "surname": "OGASAWARA", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-03-01T00:00:00", "pubType": "proceedings", "pages": "161-162", "year": "2008", "issn": null, "isbn": "978-1-4244-2047-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04476615", "articleId": "12OmNzn393s", "__typename": "AdjacentArticleType" }, "next": { "fno": "04476602", "articleId": "12OmNxdVgVU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isvri/2011/0054/0/05759601", "title": "RePro3D: full-parallax 3D display with haptic feedback using retro-reflective projection technology", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759601/12OmNApcui8", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836472", "title": "Motion Parallax Representation for Indirect Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836472/12OmNBTs7Cu", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a168", "title": "[POSTER] Overlaying Navigation Signs on a Road Surface Using a Head-Up Display", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a168/12OmNBhZ4i1", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2006/0224/0/01667679", "title": "Long Visualization Depth Autostereoscopic Display using Light Field Rendering based Integral Videography", "doi": null, "abstractUrl": "/proceedings-article/vr/2006/01667679/12OmNvDZEZe", "parentPublication": { "id": "proceedings/vr/2006/0224/0", "title": "IEEE Virtual Reality Conference (VR 2006)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130278", "title": "MoPaCo: High telepresence video communication system using motion parallax with monocular camera", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130278/12OmNvT2oLu", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicic/2007/2882/0/28820304", "title": "Three-dimensional Camera System for the Thin Three-dimensional Display Based on the Reconstruction of Parallax Rays", "doi": null, "abstractUrl": "/proceedings-article/icicic/2007/28820304/12OmNxYbT09", "parentPublication": { "id": "proceedings/icicic/2007/2882/0", "title": "2007 Second International Conference on Innovative Computing, Information and Control", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06238891", "title": "Head-tracking virtual 3-D display for mobile devices", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06238891/12OmNzC5SVX", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcs/1996/7436/0/74360349", "title": "3D Image Display with Motion Parallax by Camera Matrix Stereo", "doi": null, "abstractUrl": "/proceedings-article/icmcs/1996/74360349/12OmNzn38KE", "parentPublication": { "id": "proceedings/icmcs/1996/7436/0", "title": "Multimedia Computing and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476615", "title": "Poster: Toward an Interactive Box-shaped 3D Display: Study of the Requirements for Wide Field of View", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476615/12OmNzn393s", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797791", "title": "A 6-DOF Telexistence Drone Controlled by a Head Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797791/1cJ12HLy2ac", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17PYEiwhTsD", "title": "2018 1st Annual International Conference on Information and Sciences (AiCIS)", "acronym": "aicis", "groupId": "1830467", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17PYEm2zquf", "doi": "10.1109/AiCIS.2018.00054", "title": "Overview: 3D Video from Capture to Display", "normalizedTitle": "Overview: 3D Video from Capture to Display", "abstract": "In recent years, Two Dimensions (2D) video has been quite developed, in many aspects particularly in their display device, the quality of their content, and enter the interactive element with the end user. All these enhancements have shifted the attention from 2D to Three Dimensions (3D) video. A novel feature called depth is added to the 2D video given it better immersive experiences to viewers than normal 2D video. Stereoscopy term refers to two classical 2D videos that are captured and delivered simultaneously to the users. This procedure is considered the first generation of 3D video. A 3D video has been applied in many areas that covered a wide range of personal and social activities, where it is obtainable in Television (TV), cinemas, mobile phones, games, laptops, Personal Digital Assistants (PDAs), etc. Art Production of the 3D video is difficult, where many different skills are required whether technical, creative, and psychological. The perception and display capabilities are taken into account also. In this paper, a general overview of the 3D video processing steps are introduced presenting its content creation techniques, representation methods, coding techniques, rendering procedures, and finally display devices. All these steps are discussed in this paper in more details. In addition to a simple comparison between imaging techniques is presented where the final Three Dimensions video is produced for the end users.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years, Two Dimensions (2D) video has been quite developed, in many aspects particularly in their display device, the quality of their content, and enter the interactive element with the end user. All these enhancements have shifted the attention from 2D to Three Dimensions (3D) video. A novel feature called depth is added to the 2D video given it better immersive experiences to viewers than normal 2D video. Stereoscopy term refers to two classical 2D videos that are captured and delivered simultaneously to the users. This procedure is considered the first generation of 3D video. A 3D video has been applied in many areas that covered a wide range of personal and social activities, where it is obtainable in Television (TV), cinemas, mobile phones, games, laptops, Personal Digital Assistants (PDAs), etc. Art Production of the 3D video is difficult, where many different skills are required whether technical, creative, and psychological. The perception and display capabilities are taken into account also. In this paper, a general overview of the 3D video processing steps are introduced presenting its content creation techniques, representation methods, coding techniques, rendering procedures, and finally display devices. All these steps are discussed in this paper in more details. In addition to a simple comparison between imaging techniques is presented where the final Three Dimensions video is produced for the end users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years, Two Dimensions (2D) video has been quite developed, in many aspects particularly in their display device, the quality of their content, and enter the interactive element with the end user. All these enhancements have shifted the attention from 2D to Three Dimensions (3D) video. A novel feature called depth is added to the 2D video given it better immersive experiences to viewers than normal 2D video. Stereoscopy term refers to two classical 2D videos that are captured and delivered simultaneously to the users. This procedure is considered the first generation of 3D video. A 3D video has been applied in many areas that covered a wide range of personal and social activities, where it is obtainable in Television (TV), cinemas, mobile phones, games, laptops, Personal Digital Assistants (PDAs), etc. Art Production of the 3D video is difficult, where many different skills are required whether technical, creative, and psychological. The perception and display capabilities are taken into account also. In this paper, a general overview of the 3D video processing steps are introduced presenting its content creation techniques, representation methods, coding techniques, rendering procedures, and finally display devices. All these steps are discussed in this paper in more details. In addition to a simple comparison between imaging techniques is presented where the final Three Dimensions video is produced for the end users.", "fno": "918800a259", "keywords": [ "Stereo Image Processing", "Video Signal Processing", "Rendering Procedures", "Coding Techniques", "Representation Methods", "Content Creation Techniques", "2 D Video", "Display Device", "3 D Video Processing Steps", "Display Capabilities", "Three Dimensional Displays", "Cameras", "Two Dimensional Displays", "Rendering Computer Graphics", "Streaming Media", "Sensors", "Stereo Image Processing", "Multimedia Stereoscopic Video Multiview Video Video Plus Depth Depth Image Based Rendering Anaglyph" ], "authors": [ { "affiliation": null, "fullName": "Abeer D. Salman", "givenName": "Abeer D.", "surname": "Salman", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hala B. Abdulwahab", "givenName": "Hala B.", "surname": "Abdulwahab", "__typename": "ArticleAuthorType" } ], "idPrefix": "aicis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-11-01T00:00:00", "pubType": "proceedings", "pages": "259-268", "year": "2018", "issn": null, "isbn": "978-1-5386-9188-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "918800a254", "articleId": "17PYEkGy9Z6", "__typename": "AdjacentArticleType" }, "next": { "fno": "918800a269", "articleId": "17PYEm5YC33", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icvrv/2015/7673/0/7673a318", "title": "Real-Time 3D Video Acquisition and Auto-Stereoscopic Display End-to-End Algorithm Based on Tiled Multi-projectors", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2015/7673a318/12OmNBO3Kkf", "parentPublication": { "id": "proceedings/icvrv/2015/7673/0", "title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06238904", "title": "The measurement of eyestrain caused from diverse binocular disparities, viewing time and display sizes in watching stereoscopic 3D content", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06238904/12OmNqJHFuT", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b707", "title": "Learning Gaze Transitions from Depth to Improve Video Saliency Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b707/12OmNwwMf0f", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csa/2015/9961/0/9961a006", "title": "Learning Stereoscopic Visual Attention Model for 3D Video", "doi": null, "abstractUrl": "/proceedings-article/csa/2015/9961a006/12OmNyKJiyr", "parentPublication": { "id": "proceedings/csa/2015/9961/0", "title": "2015 International Conference on Computer Science and Applications (CSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2014/3624/0/06798842", "title": "HybridSpace: Integrating 3D freehand input and stereo viewing into traditional desktop applications", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798842/12OmNyUnEGq", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019876", "title": "The Hologram in My Hand: How Effective is Interactive Exploration of 3D Visualizations in Immersive Tangible Augmented Reality?", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019876/13rRUzp02oy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2022/8474/0/847400a167", "title": "3D Communication System Integrating 3D Reconstruction and Rendering Display", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2022/847400a167/1IlObcruRxK", "parentPublication": { "id": "proceedings/aemcse/2022/8474/0", "title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/searis/2018/6272/0/09180229", "title": "Realtime Interactive Hybrid 2D and 3D Visual Analytics on Large High Resolution Display and Immersive Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/searis/2018/09180229/1mK7jikqxkQ", "parentPublication": { "id": "proceedings/searis/2018/6272/0", "title": "2018 IEEE 11th Workshop on Software Engineering and Architectures for Real-time Interactive Systems (SEARIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2019/4752/0/09212946", "title": "Extract Accurate 3D Human Skeleton from Video", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2019/09212946/1nHRSx20hQA", "parentPublication": { "id": "proceedings/icvrv/2019/4752/0", "title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09366825", "title": "Conditions of a Multi-View 3D Display for Accurate Reproduction of Perceived Glossiness", "doi": null, "abstractUrl": "/journal/tg/2022/10/09366825/1rDR1ZpCQ6Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1LRl0szGl3i", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "acronym": "icftic", "groupId": "10073966", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1LRl1LvUNBS", "doi": "10.1109/ICFTIC57696.2022.10075271", "title": "Holographic 3D display method based on iterative layer-oriented angular spectrum optimization", "normalizedTitle": "Holographic 3D display method based on iterative layer-oriented angular spectrum optimization", "abstract": "In order to improve the reconstruction quality of hologram, the iterative layer-oriented angular spectrum optimization (ILASO) algorithm is proposed in this paper, which combines non-iterative and iterative methods to achieve full color holographic three-dimensional (3D) display. In this paper, the hologram is generated by layer-oriented angular spectrum diffraction, and then the hologram is optimized by error feedback iteration, so as to reduce the speckle noise and improve the reconstruction quality of the hologram. Simulation results show that the holographic reconstructed image has strong detail representation ability, weak speckle noise and small crosstalk, which is suitable for complex 3D scenes and has excellent holographic 3D display ability.", "abstracts": [ { "abstractType": "Regular", "content": "In order to improve the reconstruction quality of hologram, the iterative layer-oriented angular spectrum optimization (ILASO) algorithm is proposed in this paper, which combines non-iterative and iterative methods to achieve full color holographic three-dimensional (3D) display. In this paper, the hologram is generated by layer-oriented angular spectrum diffraction, and then the hologram is optimized by error feedback iteration, so as to reduce the speckle noise and improve the reconstruction quality of the hologram. Simulation results show that the holographic reconstructed image has strong detail representation ability, weak speckle noise and small crosstalk, which is suitable for complex 3D scenes and has excellent holographic 3D display ability.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to improve the reconstruction quality of hologram, the iterative layer-oriented angular spectrum optimization (ILASO) algorithm is proposed in this paper, which combines non-iterative and iterative methods to achieve full color holographic three-dimensional (3D) display. In this paper, the hologram is generated by layer-oriented angular spectrum diffraction, and then the hologram is optimized by error feedback iteration, so as to reduce the speckle noise and improve the reconstruction quality of the hologram. Simulation results show that the holographic reconstructed image has strong detail representation ability, weak speckle noise and small crosstalk, which is suitable for complex 3D scenes and has excellent holographic 3D display ability.", "fno": "10075271", "keywords": [ "Holography", "Image Reconstruction", "Iterative Methods", "Speckle", "Three Dimensional Displays", "Color Holographic Three Dimensional", "Complex 3 D Scenes", "Error Feedback Iteration", "Excellent Holographic 3 D Display Ability", "Hologram", "Holographic 3 D Display Method", "Holographic Reconstructed Image", "Iterative Layer Oriented Angular Spectrum Optimization Algorithm", "Iterative Methods", "Layer Oriented Angular Spectrum Diffraction", "Noniterative", "Reconstruction Quality", "Three Dimensional Displays", "Diffraction", "Metaverse", "Image Color Analysis", "Simulation", "Crosstalk", "Speckle", "Holographic 3 D Display", "Computer Generated Holograms", "Fienup Algorithm", "Angular Spectrum Diffraction" ], "authors": [ { "affiliation": "Beijing Technology Research Branch, TIAN DI Science & Technology Co., Ltd,Beijing,China,100013", "fullName": "Taihui Wu", "givenName": "Taihui", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Technology Research Branch, TIAN DI Science & Technology Co., Ltd,Beijing,China,100013", "fullName": "Xin Fu", "givenName": "Xin", "surname": "Fu", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Technology Research Branch, TIAN DI Science & Technology Co., Ltd,Beijing,China,100013", "fullName": "Jiacheng Li", "givenName": "Jiacheng", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icftic", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "256-260", "year": "2022", "issn": null, "isbn": "979-8-3503-2195-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "10075291", "articleId": "1LRlbsX5r1e", "__typename": "AdjacentArticleType" }, "next": { "fno": "10075199", "articleId": "1LRl2OoG10c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a761", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239344", "title": "Geometry-corrected light field rendering for creating a holographic stereogram", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239344/12OmNBh8gW6", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2008/3357/2/3357c087", "title": "A Novel DMD Display of Phase-Only Coding Hologram", "doi": null, "abstractUrl": "/proceedings-article/icicta/2008/3357c087/12OmNBr4exO", "parentPublication": { "id": "icicta/2008/3357/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1992/0532/3/00226262", "title": "A novel approach to 3-dimensional holographic television display: principles and simulations", "doi": null, "abstractUrl": "/proceedings-article/icassp/1992/00226262/12OmNqIzhao", "parentPublication": { "id": "proceedings/icassp/1992/0532/3", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300078", "title": "Holographic Video Display of Time-Series Volumetric Medical Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300078/12OmNx76TAX", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1995/2431/4/00480125", "title": "Prediction of sound pressure fields by Picard-iterative BEM based on holographic interferometry", "doi": null, "abstractUrl": "/proceedings-article/icassp/1995/00480125/12OmNxuXcwF", "parentPublication": { "id": "proceedings/icassp/1995/2431/4", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2001/1198/0/11980027", "title": "A Simple Method of Color electro-holographic display system Using a white light source and three LCD Panels", "doi": null, "abstractUrl": "/proceedings-article/icme/2001/11980027/12OmNzxyiNi", "parentPublication": { "id": "proceedings/icme/2001/1198/0", "title": "IEEE International Conference on Multimedia and Expo, 2001. ICME 2001.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2022/5851/0/09887757", "title": "Analyzing phase masks for wide &#x00E9;tendue holographic displays", "doi": null, "abstractUrl": "/proceedings-article/iccp/2022/09887757/1GZivOBcOnS", "parentPublication": { "id": "proceedings/iccp/2022/5851/0", "title": "2022 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a237", "title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523842", "title": "Gaze-Contingent Retinal Speckle Suppression for Perceptually-Matched Foveated Holographic Displays", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523842/1wpqr1B6wA8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1kecGUxjQC4", "title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)", "acronym": "fg", "groupId": "1002160", "volume": "0", "displayVolume": "1", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1kecHZrgamQ", "doi": "10.1109/FG47880.2020.00025", "title": "Recognizing Perceived Emotions from Facial Expressions", "normalizedTitle": "Recognizing Perceived Emotions from Facial Expressions", "abstract": "Expression recognition has seen an increase in research in past years, however, little work has been on recognizing perceived emotion (i.e. subject self-reporting of emotion). Considering this, we investigate the perceived emotion of subjects that perform tasks meant to elicit emotion. To facilitate this investigation, we use the BP4D+ multimodal spontaneous emotion corpus. We first statistically analyze the subject's perceived emotions across 10 tasks available in BP4D+. We show the percentage of subjects that felt specific emotions for each of the tasks. This is done across all tested subjects, as well as male and female subjects independently. Along with our statistical analysis, we also propose a 3D convolutional neural network (CNN) architecture to recognize multiple emotions felt for each task sequence. We report accuracy, Fl-binary and AUC for all subjects, as well as male and female subjects.", "abstracts": [ { "abstractType": "Regular", "content": "Expression recognition has seen an increase in research in past years, however, little work has been on recognizing perceived emotion (i.e. subject self-reporting of emotion). Considering this, we investigate the perceived emotion of subjects that perform tasks meant to elicit emotion. To facilitate this investigation, we use the BP4D+ multimodal spontaneous emotion corpus. We first statistically analyze the subject's perceived emotions across 10 tasks available in BP4D+. We show the percentage of subjects that felt specific emotions for each of the tasks. This is done across all tested subjects, as well as male and female subjects independently. Along with our statistical analysis, we also propose a 3D convolutional neural network (CNN) architecture to recognize multiple emotions felt for each task sequence. We report accuracy, Fl-binary and AUC for all subjects, as well as male and female subjects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Expression recognition has seen an increase in research in past years, however, little work has been on recognizing perceived emotion (i.e. subject self-reporting of emotion). Considering this, we investigate the perceived emotion of subjects that perform tasks meant to elicit emotion. To facilitate this investigation, we use the BP4D+ multimodal spontaneous emotion corpus. We first statistically analyze the subject's perceived emotions across 10 tasks available in BP4D+. We show the percentage of subjects that felt specific emotions for each of the tasks. This is done across all tested subjects, as well as male and female subjects independently. Along with our statistical analysis, we also propose a 3D convolutional neural network (CNN) architecture to recognize multiple emotions felt for each task sequence. We report accuracy, Fl-binary and AUC for all subjects, as well as male and female subjects.", "fno": "307900a179", "keywords": [ "Convolutional Neural Nets", "Emotion Recognition", "Face Recognition", "Neural Net Architecture", "Statistical Analysis", "Male Subjects", "Female Subjects", "Facial Expressions", "Expression Recognition", "BP 4 D", "Multimodal Spontaneous Emotion Corpus", "Perceived Emotion Recognition", "Statistical Analysis", "CNN", "3 D Convolutional Neural Network Architecture", "F 1 Binary", "AUC", "Task Analysis", "Face Recognition", "Emotion Recognition", "Three Dimensional Displays", "Pain", "Two Dimensional Displays", "Artificial Intelligence", "Perceived Emotion", "Affective Computing", "Expressions" ], "authors": [ { "affiliation": "University of South Florida,Department of Computer Science and Engineering,Tampa,Florida", "fullName": "Saurabh Hinduja", "givenName": "Saurabh", "surname": "Hinduja", "__typename": "ArticleAuthorType" }, { "affiliation": "University of South Florida,Department of Computer Science and Engineering,Tampa,Florida", "fullName": "Shaun Canavan", "givenName": "Shaun", "surname": "Canavan", "__typename": "ArticleAuthorType" }, { "affiliation": "Binghamton University,Department of Computer Science,Binghamton,New York", "fullName": "Lijun Yin", "givenName": "Lijun", "surname": "Yin", "__typename": "ArticleAuthorType" } ], "idPrefix": "fg", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "236-240", "year": "2020", "issn": null, "isbn": "978-1-7281-3079-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "307900a200", "articleId": "1kecI1IU0h2", "__typename": "AdjacentArticleType" }, "next": { "fno": "307900a762", "articleId": "1kecIWq8r60", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/acii/2015/9953/0/07344583", "title": "Decoupling facial expressions and head motions in complex emotions", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344583/12OmNB9t6qd", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2015/9953/0/07344660", "title": "Expression training for complex emotions using facial expressions and head movements", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344660/12OmNqFa5mI", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273575", "title": "Recognizing induced emotions of movie audiences: Are induced and perceived emotions the same?", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273575/12OmNyen1lN", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2015/9953/0/07344697", "title": "Perception of congruent facial and kinesthetic expressions of emotions", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344697/12OmNynJMEK", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2021/02/08493613", "title": "Automatic Recognition of Facial Displays of Unfelt Emotions", "doi": null, "abstractUrl": "/journal/ta/2021/02/08493613/14qdcQ8jsry", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2021/01/08653953", "title": "Recognizing Induced Emotions of Movie Audiences from Multimodal Information", "doi": null, "abstractUrl": "/journal/ta/2021/01/08653953/180h189Er28", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2019/3888/0/08925486", "title": "Emotion Recognition Using Fused Physiological Signals", "doi": null, "abstractUrl": "/proceedings-article/acii/2019/08925486/1fHGFQrA3Ys", "parentPublication": { "id": "proceedings/acii/2019/3888/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a395", "title": "Learning Perceived Emotion Using Affective and Deep Features for Mental Health Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a395/1gyskQ3YBeU", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2020/7121/0/712100a666", "title": "Recognizing Developers&#x0027; Emotions while Programming", "doi": null, "abstractUrl": "/proceedings-article/icse/2020/712100a666/1pK5fZtKi76", "parentPublication": { "id": "proceedings/icse/2020/7121/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09484732", "title": "Data Augmentation via Face Morphing for Recognizing Intensities of Facial Emotions", "doi": null, "abstractUrl": "/journal/ta/5555/01/09484732/1veoidGiXcY", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNASrawz", "title": "2009 IEEE Virtual Reality Conference", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "1t0I54T7Gi4", "doi": "10.1109/VR.2009.4811010", "title": "Deskotheque: Improved Spatial Awareness in Multi-Display Environments", "normalizedTitle": "Deskotheque: Improved Spatial Awareness in Multi-Display Environments", "abstract": "In this paper we present the multi-display environment Deskotheque, which combines personal and tiled projected displays into a continuous teamspace. Its main distinguishing factor is a fine-grained spatial (i. e., both geometric and topological) model of the display layout. Using this model, Deskotheque allows seamless mouse pointer navigation and application window sharing across the multi-display environment. Geometric compensation of casually aligned multi-projector displays supports a wide range of display configurations. Mouse pointer redirection and window migration are tightly integrated into the windowing system, while geometric compensation of projected imagery is accomplished by a 3D compositing window manager. Thus, Deskotheque provides sharing of unmodified desktop application windows across display and workstation boundaries without compromising hardware-accelerated rendering of 2D or 3D content on projected tiled displays with geometric compensation.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present the multi-display environment Deskotheque, which combines personal and tiled projected displays into a continuous teamspace. Its main distinguishing factor is a fine-grained spatial (i. e., both geometric and topological) model of the display layout. Using this model, Deskotheque allows seamless mouse pointer navigation and application window sharing across the multi-display environment. Geometric compensation of casually aligned multi-projector displays supports a wide range of display configurations. Mouse pointer redirection and window migration are tightly integrated into the windowing system, while geometric compensation of projected imagery is accomplished by a 3D compositing window manager. Thus, Deskotheque provides sharing of unmodified desktop application windows across display and workstation boundaries without compromising hardware-accelerated rendering of 2D or 3D content on projected tiled displays with geometric compensation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present the multi-display environment Deskotheque, which combines personal and tiled projected displays into a continuous teamspace. Its main distinguishing factor is a fine-grained spatial (i. e., both geometric and topological) model of the display layout. Using this model, Deskotheque allows seamless mouse pointer navigation and application window sharing across the multi-display environment. Geometric compensation of casually aligned multi-projector displays supports a wide range of display configurations. Mouse pointer redirection and window migration are tightly integrated into the windowing system, while geometric compensation of projected imagery is accomplished by a 3D compositing window manager. Thus, Deskotheque provides sharing of unmodified desktop application windows across display and workstation boundaries without compromising hardware-accelerated rendering of 2D or 3D content on projected tiled displays with geometric compensation.", "fno": "04811010", "keywords": [ "Model Driven Engineering", "Mice", "Navigation", "Solid Modeling", "Computer Displays", "Two Dimensional Displays", "Three Dimensional Displays", "Collaborative Work", "Electronic Mail", "User Interfaces", "Multi Display Environment", "Geometric Display Compensation", "Collaboration", "Mouse Pointer Navigation", "H 5 3 Information Interfaces And Presentation Group And Organization Interfaces Collaborative Computing", "I 3 3 Computer Graphics Picture Image Generation Display Algorithms", "H 5 2 Information Interfaces And Presentation User Interfaces Windowing Systems" ], "authors": [ { "affiliation": "Graz University of Technology e-mail: pirchheim@icg.tugraz.at", "fullName": "Christian Pirchheim", "givenName": "Christian", "surname": "Pirchheim", "__typename": "ArticleAuthorType" }, { "affiliation": "Graz University of Technology e-mail: waldner@icg.tugraz.at", "fullName": "Manuela Waldner", "givenName": "Manuela", "surname": "Waldner", "__typename": "ArticleAuthorType" }, { "affiliation": "Graz University of Technology e-mail: schmalstieg@icg.tugraz.at", "fullName": "Dieter Schmalstieg", "givenName": "Dieter", "surname": "Schmalstieg", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-03-01T00:00:00", "pubType": "proceedings", "pages": "123-126", "year": "2009", "issn": "1087-8270", "isbn": "978-1-4244-3943-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04811009", "articleId": "12OmNB8CiYX", "__typename": "AdjacentArticleType" }, "next": { "fno": "04811011", "articleId": "12OmNyO8tMm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2017/4822/0/07926707", "title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204316", "title": "A projector-camera system for creating a display with water drops", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204316/12OmNqH9hk4", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2008/1966/0/04475472", "title": "Pixelplexing: Gaining Display Resolution Through Time", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2008/04475472/12OmNqyUUID", "parentPublication": { "id": "proceedings/pacificvis/2008/1966/0", "title": "IEEE Pacific Visualization Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019517", "title": "Improved motion compensation for 360° video projected to polytopes", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019517/12OmNvD8Ruv", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131755", "title": "Mapping 2D input to 3D immersive spatial augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131755/12OmNwAKCNT", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2007/3056/0/30560063", "title": "LUMAR: A Hybrid Spatial Display System for 2D and 3D Handheld Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/icat/2007/30560063/12OmNwpoFDq", "parentPublication": { "id": "proceedings/icat/2007/3056/0", "title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1992/2925/0/00202170", "title": "An object oriented image display system", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00202170/12OmNz2kqkK", "parentPublication": { "id": "proceedings/icpr/1992/2925/0", "title": "11th IAPR International Conference on Pattern Recognition. Vol. IV. Conference D: Architectures for Vision and Pattern Recognition,", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892378", "title": "mpCubee: Towards a mobile perspective cubic display using mobile phones", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892378/12OmNzkuKKg", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c031", "title": "Salience Guided Depth Calibration for Perceptually Optimized Compressive Light Field 3D Display", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c031/17D45VsBTZA", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2020/01/08889397", "title": "Glasses-Free 3-D and Augmented Reality Display Advances: From Theory to Implementation", "doi": null, "abstractUrl": "/magazine/mu/2020/01/08889397/1ezPlyZdxeM", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvTBBcg", "title": "2014 Eleventh International Conference on Information Technology: New Generations (ITNG)", "acronym": "itng", "groupId": "1001685", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAndigu", "doi": "10.1109/ITNG.2014.93", "title": "Empirically Derived Guidelines for Multimodal Interaction in Knowledge-Based Environments", "normalizedTitle": "Empirically Derived Guidelines for Multimodal Interaction in Knowledge-Based Environments", "abstract": "The development of multimodal user interfaces is a complex process that needs useful guidelines and hints. This paper summarizes the overall results from a previous experimental program and then suggests useful guidelines for creating usable and acceptable knowledge-based system (KBS) interfaces. The summary of the overall experimental results ranks three experimental conditions of multimodal interaction, according to five evaluation domains. These domains were: ability to recall, ability to use knowledge effectively, ability to use knowledge efficiently, extended usability attitudes and user acceptance. The proposed guidelines offer a set of useful hints while also describing the sources of the variance between the experimental conditions. The empirically derived and proposed guidelines were: knowledge communication from a single point of contact, audio-visual metaphors to tackle information overload, task complexity and knowledge-intensity as key factors, and socially rich presence which allowed for the first impression to last longer. These guidelines have presented a roadmap for both researchers and practitioners working within the field of knowledge-based software engineering (KBSE).", "abstracts": [ { "abstractType": "Regular", "content": "The development of multimodal user interfaces is a complex process that needs useful guidelines and hints. This paper summarizes the overall results from a previous experimental program and then suggests useful guidelines for creating usable and acceptable knowledge-based system (KBS) interfaces. The summary of the overall experimental results ranks three experimental conditions of multimodal interaction, according to five evaluation domains. These domains were: ability to recall, ability to use knowledge effectively, ability to use knowledge efficiently, extended usability attitudes and user acceptance. The proposed guidelines offer a set of useful hints while also describing the sources of the variance between the experimental conditions. The empirically derived and proposed guidelines were: knowledge communication from a single point of contact, audio-visual metaphors to tackle information overload, task complexity and knowledge-intensity as key factors, and socially rich presence which allowed for the first impression to last longer. These guidelines have presented a roadmap for both researchers and practitioners working within the field of knowledge-based software engineering (KBSE).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The development of multimodal user interfaces is a complex process that needs useful guidelines and hints. This paper summarizes the overall results from a previous experimental program and then suggests useful guidelines for creating usable and acceptable knowledge-based system (KBS) interfaces. The summary of the overall experimental results ranks three experimental conditions of multimodal interaction, according to five evaluation domains. These domains were: ability to recall, ability to use knowledge effectively, ability to use knowledge efficiently, extended usability attitudes and user acceptance. The proposed guidelines offer a set of useful hints while also describing the sources of the variance between the experimental conditions. The empirically derived and proposed guidelines were: knowledge communication from a single point of contact, audio-visual metaphors to tackle information overload, task complexity and knowledge-intensity as key factors, and socially rich presence which allowed for the first impression to last longer. These guidelines have presented a roadmap for both researchers and practitioners working within the field of knowledge-based software engineering (KBSE).", "fno": "06822253", "keywords": [ "Guidelines", "Usability", "Speech", "Knowledge Based Systems", "Complexity Theory", "Avatars", "Visualization", "Interaction", "Knowledge", "Multimodal", "Interface", "Guidelines", "Avatar", "Sound" ], "authors": [ { "affiliation": null, "fullName": "Mutlaq B. Alotaibi", "givenName": "Mutlaq B.", "surname": "Alotaibi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dimitrios I. Rigas", "givenName": "Dimitrios I.", "surname": "Rigas", "__typename": "ArticleAuthorType" } ], "idPrefix": "itng", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-04-01T00:00:00", "pubType": "proceedings", "pages": "539-544", "year": "2014", "issn": null, "isbn": "978-1-4799-3187-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06822252", "articleId": "12OmNAolGJb", "__typename": "AdjacentArticleType" }, "next": { "fno": "06822254", "articleId": "12OmNBZYToX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/rew/2016/3694/0/3694a080", "title": "Security Guidelines: Requirements Engineering for Verifying Code Quality", "doi": null, "abstractUrl": "/proceedings-article/rew/2016/3694a080/12OmNrJiCQv", "parentPublication": { "id": "proceedings/rew/2016/3694/0", "title": "2016 IEEE 24th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dexa/2010/8049/0/05590760", "title": "A Multimodal Dialogue Interface", "doi": null, "abstractUrl": "/proceedings-article/dexa/2010/05590760/12OmNxvNZXb", "parentPublication": { "id": "proceedings/dexa/2010/8049/0", "title": "2010 Workshops on Database and Expert Systems Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2009/3638/0/3638a945", "title": "Utilising Multimodal Interaction Metaphors in E-learning Applications: An Experimental Study", "doi": null, "abstractUrl": "/proceedings-article/aina/2009/3638a945/12OmNyeWdKy", "parentPublication": { "id": "proceedings/aina/2009/3638/0", "title": "2009 International Conference on Advanced Information Networking and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/soca/2014/6833/0/6833a183", "title": "Supporting Doctors through Mobile Multimodal Interaction and Process-Aware Execution of Clinical Guidelines", "doi": null, "abstractUrl": "/proceedings-article/soca/2014/6833a183/12OmNylKAOZ", "parentPublication": { "id": "proceedings/soca/2014/6833/0", "title": "2014 IEEE 7th International Conference on Service-Oriented Computing and Applications (SOCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2015/9795/0/9795a595", "title": "Game-Based Learning Guidelines: Designing for Learning and Fun", "doi": null, "abstractUrl": "/proceedings-article/csci/2015/9795a595/12OmNzVXNKF", "parentPublication": { "id": "proceedings/csci/2015/9795/0", "title": "2015 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2007/06/k0832", "title": "Verification of Medical Guidelines Using Background Knowledge in Task Networks", "doi": null, "abstractUrl": "/journal/tk/2007/06/k0832/13rRUx0xPip", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/1993/05/k0895", "title": "A Set of Design Guidelines for Object-Oriented Deductive Systems", "doi": null, "abstractUrl": "/journal/tk/1993/05/k0895/13rRUxDqS8y", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/t4e/2018/1143/0/114300a146", "title": "Usability and Usefulness of ADVIcE Tool Experiment Design Guidelines for Virtual Laboratories", "doi": null, "abstractUrl": "/proceedings-article/t4e/2018/114300a146/17D45WaTkmV", "parentPublication": { "id": "proceedings/t4e/2018/1143/0", "title": "2018 IEEE Ninth International Conference on Technology for Education (T4E)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2019/02/08794686", "title": "Revolution or Evolution? Speech Interaction and HCI Design Guidelines", "doi": null, "abstractUrl": "/magazine/pc/2019/02/08794686/1cplXYDY06Y", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09385921", "title": "Declutter and Focus: Empirically Evaluating Design Guidelines for Effective Data Communication", "doi": null, "abstractUrl": "/journal/tg/2022/10/09385921/1seipuzsKis", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNylsZKk", "title": "2007 11th International Conference Information Visualization (IV '07)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNAoDhQU", "doi": "10.1109/IV.2007.114", "title": "Towards a Model of Information Aesthetics in Information Visualization", "normalizedTitle": "Towards a Model of Information Aesthetics in Information Visualization", "abstract": "This paper proposes a model of information aesthetics in the context of information visualization. It addresses the need to acknowledge a recently emerging number of visualization projects that combine information visualization techniques with principles of creative design. The proposed model contributes to a better understanding of information aesthetics as a potentially independent research field within visualization that specifically focuses on the experience of aesthetics, dataset interpretation and interaction. The proposed model is based on analysing existing visualization techniques by their interpretative intent and data mapping inspiration. It reveals information aesthetics as the conceptual link between information visualization and visualization art, and includes the fields of social and ambient visualization. This model is unique in its focus on aesthetics as the artistic influence on the technical implementation and intended purpose of a visualization technique, rather than subjective aesthetic judgments of the visualization outcome. This research provides a framework for understanding aesthetics in visualization, and allows for new design guidelines and reviewing criteria.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes a model of information aesthetics in the context of information visualization. It addresses the need to acknowledge a recently emerging number of visualization projects that combine information visualization techniques with principles of creative design. The proposed model contributes to a better understanding of information aesthetics as a potentially independent research field within visualization that specifically focuses on the experience of aesthetics, dataset interpretation and interaction. The proposed model is based on analysing existing visualization techniques by their interpretative intent and data mapping inspiration. It reveals information aesthetics as the conceptual link between information visualization and visualization art, and includes the fields of social and ambient visualization. This model is unique in its focus on aesthetics as the artistic influence on the technical implementation and intended purpose of a visualization technique, rather than subjective aesthetic judgments of the visualization outcome. This research provides a framework for understanding aesthetics in visualization, and allows for new design guidelines and reviewing criteria.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes a model of information aesthetics in the context of information visualization. It addresses the need to acknowledge a recently emerging number of visualization projects that combine information visualization techniques with principles of creative design. The proposed model contributes to a better understanding of information aesthetics as a potentially independent research field within visualization that specifically focuses on the experience of aesthetics, dataset interpretation and interaction. The proposed model is based on analysing existing visualization techniques by their interpretative intent and data mapping inspiration. It reveals information aesthetics as the conceptual link between information visualization and visualization art, and includes the fields of social and ambient visualization. This model is unique in its focus on aesthetics as the artistic influence on the technical implementation and intended purpose of a visualization technique, rather than subjective aesthetic judgments of the visualization outcome. This research provides a framework for understanding aesthetics in visualization, and allows for new design guidelines and reviewing criteria.", "fno": "29000087", "keywords": [ "Data Visualisation", "Information Aesthetics", "Information Visualization", "Visualization Projects", "Creative Design", "Data Mapping", "Conceptual Link", "Data Visualization", "Art", "Cognition", "Context Modeling", "Displays", "Computer Interfaces", "Humans", "Guidelines", "Reflection", "Bridges" ], "authors": [ { "affiliation": "Key Centre of Design Computing & Cognition, University of Sydney, Australia", "fullName": "Andrea Lau", "givenName": "Andrea", "surname": "Lau", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Centre of Design Computing & Cognition, University of Sydney, Australia", "fullName": "Andrew Vande Moere", "givenName": "Andrew", "surname": "Vande Moere", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-07-01T00:00:00", "pubType": "proceedings", "pages": "87-92", "year": "2007", "issn": "1550-6037", "isbn": "0-7695-2900-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "29000003", "articleId": "12OmNvAiSFI", "__typename": "AdjacentArticleType" }, "next": { "fno": "29000011", "articleId": "12OmNyqiaQq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fie/2015/8454/0/07344231", "title": "Aesthetics and emotional engagement: Why it matters to our students, why it matters to our professions", "doi": null, "abstractUrl": "/proceedings-article/fie/2015/07344231/12OmNwDSdzK", "parentPublication": { "id": "proceedings/fie/2015/8454/0", "title": "2015 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2009/3733/0/3733a579", "title": "Measuring Aesthetics for Information Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2009/3733a579/12OmNxFJXDP", "parentPublication": { "id": "proceedings/iv/2009/3733/0", "title": "2009 13th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2004/2177/0/21770245", "title": "Aesthetics and Inspiration for Visualization Design: Bridging the Gap between Art and Science", "doi": null, "abstractUrl": "/proceedings-article/iv/2004/21770245/12OmNyXMQhm", "parentPublication": { "id": "proceedings/iv/2004/2177/0", "title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06456244", "title": "Research on Performance Evaluation of Architectural Aesthetics with the AHP Theory", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06456244/12OmNzTH0I0", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2003/2055/0/20550030", "title": "Between Aesthetics and Utility: Designing Ambient Information Visualizations", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2003/20550030/12OmNzTYBNQ", "parentPublication": { "id": "proceedings/ieee-infovis/2003/2055/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/01/mcg2015010056", "title": "Teaching Information Aesthetics as a Research Class in China", "doi": null, "abstractUrl": "/magazine/cg/2015/01/mcg2015010056/13rRUB6SpUI", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2012/03/06185648", "title": "Adapting models of visual aesthetics for personalized content creation", "doi": null, "abstractUrl": "/journal/ci/2012/03/06185648/13rRUwh80Jd", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093412", "title": "Composition-Aware Image Aesthetics Assessment", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093412/1jPbiliRCmY", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/01/09204797", "title": "Learning Perceptual Aesthetics of 3-D Shapes From Multiple Views", "doi": null, "abstractUrl": "/magazine/cg/2022/01/09204797/1nme2tJ6cCs", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09229135", "title": "Towards Modeling Visualization Processes as Dynamic Bayesian Networks", "doi": null, "abstractUrl": "/journal/tg/2021/02/09229135/1o3npNp56Vi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKiqH", "title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)", "acronym": "bdva", "groupId": "1809805", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WgziPO", "doi": "10.1109/BDVA.2018.8534029", "title": "VISupply: A Supply-Chain Process Model for Visualization Guidelines", "normalizedTitle": "VISupply: A Supply-Chain Process Model for Visualization Guidelines", "abstract": "Visualization is widely accepted as an effective medium to communicate complex data to a human observer. To do this effectively, visualizations have to be carefully designed to achieve a certain intent. Visualization guidelines are proposed by the academic research community and practitioners to facilitate effective visualization design. A few guidelines have been received a fair amount of attention, and effort has been made to study, discuss, validate, falsify, adopt, adapt, or extend them. However, many guidelines have not received adequate exposure or have not had the opportunities to undergone a similar level of scrutiny. When some of these guidelines managed to emerge or resurface, it is often not clear about their scientific rationale and the state of play in their validation. In this paper, we juxtapose the development and consumption of visualization guidelines with that of consumer products. We outline a conceptual model for a Visualization Guidelines Supply Chain, VISupply. It describes an idealized loop of actions for formulating, curating, using, and improving guidelines systematically. By enabling an ecosystem for visualization guidelines, the community can collectively optimize these guidelines and adopt them with confidence in a given context. We examine the current and potential roles of different stakeholders in this ecosystem.", "abstracts": [ { "abstractType": "Regular", "content": "Visualization is widely accepted as an effective medium to communicate complex data to a human observer. To do this effectively, visualizations have to be carefully designed to achieve a certain intent. Visualization guidelines are proposed by the academic research community and practitioners to facilitate effective visualization design. A few guidelines have been received a fair amount of attention, and effort has been made to study, discuss, validate, falsify, adopt, adapt, or extend them. However, many guidelines have not received adequate exposure or have not had the opportunities to undergone a similar level of scrutiny. When some of these guidelines managed to emerge or resurface, it is often not clear about their scientific rationale and the state of play in their validation. In this paper, we juxtapose the development and consumption of visualization guidelines with that of consumer products. We outline a conceptual model for a Visualization Guidelines Supply Chain, VISupply. It describes an idealized loop of actions for formulating, curating, using, and improving guidelines systematically. By enabling an ecosystem for visualization guidelines, the community can collectively optimize these guidelines and adopt them with confidence in a given context. We examine the current and potential roles of different stakeholders in this ecosystem.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visualization is widely accepted as an effective medium to communicate complex data to a human observer. To do this effectively, visualizations have to be carefully designed to achieve a certain intent. Visualization guidelines are proposed by the academic research community and practitioners to facilitate effective visualization design. A few guidelines have been received a fair amount of attention, and effort has been made to study, discuss, validate, falsify, adopt, adapt, or extend them. However, many guidelines have not received adequate exposure or have not had the opportunities to undergone a similar level of scrutiny. When some of these guidelines managed to emerge or resurface, it is often not clear about their scientific rationale and the state of play in their validation. In this paper, we juxtapose the development and consumption of visualization guidelines with that of consumer products. We outline a conceptual model for a Visualization Guidelines Supply Chain, VISupply. It describes an idealized loop of actions for formulating, curating, using, and improving guidelines systematically. By enabling an ecosystem for visualization guidelines, the community can collectively optimize these guidelines and adopt them with confidence in a given context. We examine the current and potential roles of different stakeholders in this ecosystem.", "fno": "08534029", "keywords": [ "Consumer Products", "Data Visualisation", "Supply Chain Management", "Supply Chain Process Model", "Visualization Guidelines Supply Chain", "Visualization Design", "VI Supply", "Consumer Products", "Guidelines", "Visualization", "Supply Chains", "Data Visualization", "Automotive Engineering", "Automobiles" ], "authors": [ { "affiliation": null, "fullName": "Ulrich Engelke", "givenName": "Ulrich", "surname": "Engelke", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Alfie Abdul-Rahman", "givenName": "Alfie", "surname": "Abdul-Rahman", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Min Chen", "givenName": "Min", "surname": "Chen", "__typename": "ArticleAuthorType" } ], "idPrefix": "bdva", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "1-9", "year": "2018", "issn": null, "isbn": "978-1-5386-9194-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08534028", "articleId": "17D45WXIkC4", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660a120", "title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a120/12OmNAle6zC", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056610", "title": "Guidelines for interactive digital storytelling presentations of cultural heritage", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056610/12OmNC3FG5s", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363376", "title": "Pervasive Information Visualization: Toward an Information Visualization Design Methodology for Multi-device Co-located Synchronous Collaboration", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363376/12OmNyNQSOX", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/06/mcg2010060067", "title": "Using Visualization to Debug Visualization Software", "doi": null, "abstractUrl": "/magazine/cg/2010/06/mcg2010060067/13rRUB6SpOa", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/04/mcg2017040103", "title": "Pathways for Theoretical Advances in Visualization", "doi": null, "abstractUrl": "/magazine/cg/2017/04/mcg2017040103/13rRUzpzeDD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a044", "title": "Once Upon a Time in a Land Far Away: Guidelines for Spatio-Temporal Narrative Visualization", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a044/1cMF8rgW5na", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icuems/2020/8832/0/09151635", "title": "Research on the compilation of guidelines for rail transit connection in Mountainous Cities", "doi": null, "abstractUrl": "/proceedings-article/icuems/2020/09151635/1lRlOO32ZK8", "parentPublication": { "id": "proceedings/icuems/2020/8832/0", "title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09246308", "title": "Guidelines For Pursuing and Revealing Data Abstractions", "doi": null, "abstractUrl": "/journal/tg/2021/02/09246308/1olDVqD8b0A", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a746", "title": "Literature Review on Visualization in Supply Chain &#x0026; Decision Making", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a746/1rSRaK4pgt2", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09385921", "title": "Declutter and Focus: Empirically Evaluating Design Guidelines for Effective Data Communication", "doi": null, "abstractUrl": "/journal/tg/2022/10/09385921/1seipuzsKis", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1sEXmtsgBs4", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering (ICSE)", "acronym": "icse", "groupId": null, "volume": "0", "displayVolume": null, "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1sEXovAYJdC", "doi": "10.1109/ICSE43902.2021.00075", "title": "Don&#x2019;t Do That! Hunting Down Visual Design Smells in Complex UIs Against Design Guidelines", "normalizedTitle": "Don’t Do That! Hunting Down Visual Design Smells in Complex UIs Against Design Guidelines", "abstract": "Just like code smells in source code, UI design has visual design smells. We study 93 don't-do-that guidelines in the Material Design, a complex design system created by Google. We find that these don't-guidelines go far beyond UI aesthetics, and involve seven general design dimensions (layout, typography, iconography, navigation, communication, color, and shape) and four component design aspects (anatomy, placement, behavior, and usage). Violating these guidelines results in visual design smells in UIs (or UI design smells). In a study of 60,756 UIs of 9,286 Android apps, we find that 7,497 UIs of 2,587 apps have at least one violation of some Material Design guidelines. This reveals the lack of developer training and tool support to avoid UI design smells. To fill this gap, we design an automated UI design smell detector (UIS-Hunter) that extracts and validates multi-modal UI information (component metadata, typography, iconography, color, and edge) for detecting the violation of diverse don't-guidelines in Material Design. The detection accuracy of UIS-Hunter is high (precision=0.81, recall=0.90) on the 60,756 UIs of 9,286 apps. We build a guideline gallery with real-world UI design smells that UIS-Hunter detects for developers to learn the best Material Design practices. Our user studies show that UIS-Hunter is more effective than manual detection of UI design smells, and the UI design smells that are detected by UIS-Hunter have severely negative impacts on app users.", "abstracts": [ { "abstractType": "Regular", "content": "Just like code smells in source code, UI design has visual design smells. We study 93 don't-do-that guidelines in the Material Design, a complex design system created by Google. We find that these don't-guidelines go far beyond UI aesthetics, and involve seven general design dimensions (layout, typography, iconography, navigation, communication, color, and shape) and four component design aspects (anatomy, placement, behavior, and usage). Violating these guidelines results in visual design smells in UIs (or UI design smells). In a study of 60,756 UIs of 9,286 Android apps, we find that 7,497 UIs of 2,587 apps have at least one violation of some Material Design guidelines. This reveals the lack of developer training and tool support to avoid UI design smells. To fill this gap, we design an automated UI design smell detector (UIS-Hunter) that extracts and validates multi-modal UI information (component metadata, typography, iconography, color, and edge) for detecting the violation of diverse don't-guidelines in Material Design. The detection accuracy of UIS-Hunter is high (precision=0.81, recall=0.90) on the 60,756 UIs of 9,286 apps. We build a guideline gallery with real-world UI design smells that UIS-Hunter detects for developers to learn the best Material Design practices. Our user studies show that UIS-Hunter is more effective than manual detection of UI design smells, and the UI design smells that are detected by UIS-Hunter have severely negative impacts on app users.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Just like code smells in source code, UI design has visual design smells. We study 93 don't-do-that guidelines in the Material Design, a complex design system created by Google. We find that these don't-guidelines go far beyond UI aesthetics, and involve seven general design dimensions (layout, typography, iconography, navigation, communication, color, and shape) and four component design aspects (anatomy, placement, behavior, and usage). Violating these guidelines results in visual design smells in UIs (or UI design smells). In a study of 60,756 UIs of 9,286 Android apps, we find that 7,497 UIs of 2,587 apps have at least one violation of some Material Design guidelines. This reveals the lack of developer training and tool support to avoid UI design smells. To fill this gap, we design an automated UI design smell detector (UIS-Hunter) that extracts and validates multi-modal UI information (component metadata, typography, iconography, color, and edge) for detecting the violation of diverse don't-guidelines in Material Design. The detection accuracy of UIS-Hunter is high (precision=0.81, recall=0.90) on the 60,756 UIs of 9,286 apps. We build a guideline gallery with real-world UI design smells that UIS-Hunter detects for developers to learn the best Material Design practices. Our user studies show that UIS-Hunter is more effective than manual detection of UI design smells, and the UI design smells that are detected by UIS-Hunter have severely negative impacts on app users.", "fno": "029600a761", "keywords": [ "Graphical User Interfaces", "Software Maintenance", "Visual Design Smells", "Complex Design System", "Material Design Guidelines", "Automated UI Design Smell Detector", "UIS Hunter", "Real World UI Design", "Training", "Visualization", "Design Methodology", "Color", "Tools", "Guidelines", "Software Engineering", "GUI Testing UI Design Smell Violation Detection Material Design" ], "authors": [ { "affiliation": "Zhejiang University, China", "fullName": "Bo Yang", "givenName": "Bo", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Australian National University, Australia", "fullName": "Zhenchang Xing", "givenName": "Zhenchang", "surname": "Xing", "__typename": "ArticleAuthorType" }, { "affiliation": "Monash University, Australia", "fullName": "Xin Xia", "givenName": "Xin", "surname": "Xia", "__typename": "ArticleAuthorType" }, { "affiliation": "Monash University, Australia", "fullName": "Chunyang Chen", "givenName": "Chunyang", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Tencent AI Lab, China", "fullName": "Deheng Ye", "givenName": "Deheng", "surname": "Ye", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University, China", "fullName": "Shanping Li", "givenName": "Shanping", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icse", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-05-01T00:00:00", "pubType": "proceedings", "pages": "761-772", "year": "2021", "issn": "1558-1225", "isbn": "978-1-6654-0296-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "029600a748", "articleId": "1sEXouuzoS4", "__typename": "AdjacentArticleType" }, "next": { "fno": "029600a773", "articleId": "1sEXoxlYHao", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/esem/2017/4039/0/4039a424", "title": "House of Cards: Code Smells in Open-Source C# Repositories", "doi": null, "abstractUrl": "/proceedings-article/esem/2017/4039a424/12OmNAFFdIb", "parentPublication": { "id": "proceedings/esem/2017/4039/0", "title": "2017 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsaw/2017/4793/0/07958506", "title": "Arcan: A Tool for Architectural Smells Detection", "doi": null, "abstractUrl": "/proceedings-article/icsaw/2017/07958506/12OmNApLGUi", "parentPublication": { "id": "proceedings/icsaw/2017/4793/0", "title": "2017 IEEE International Conference on Software Architecture Workshops (ICSAW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2014/5701/0/5701a343", "title": "Integrating Universal Design (UD) Principles and Mobile Design Guidelines to Improve Design of Mobile Health Applications for Older Adults", "doi": null, "abstractUrl": "/proceedings-article/ichi/2014/5701a343/12OmNwswfZi", "parentPublication": { "id": "proceedings/ichi/2014/5701/0", "title": "2014 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2019/03/08501942", "title": "What We Know About Smells in Software Test Code", "doi": null, "abstractUrl": "/magazine/so/2019/03/08501942/14ArjyEjBXW", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a414", "title": "Evaluating the Object-Centered User Interface in Head-Worn Mixed Reality Environment", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a414/1JrRiVjEd44", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/techdebt/2019/3371/0/337100a088", "title": "Architectural Smells Detected by Tools: a Catalogue Proposal", "doi": null, "abstractUrl": "/proceedings-article/techdebt/2019/337100a088/1cdP75LezCw", "parentPublication": { "id": "proceedings/techdebt/2019/3371/0", "title": "2019 IEEE/ACM International Conference on Technical Debt (TechDebt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2021/05/08888213", "title": "Understanding Illicit UI in iOS Apps Through Hidden UI Analysis", "doi": null, "abstractUrl": "/journal/tq/2021/05/08888213/1ezPszjc8Ja", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2021/1219/0/121900a089", "title": "UIS-Hunter: Detecting UI Design Smells in Android Apps", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2021/121900a089/1sET5YXPYnC", "parentPublication": { "id": "proceedings/icse-companion/2021/1219/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-seis/2021/0139/0/013900a077", "title": "Understanding Community Smells Variability: A Statistical Approach", "doi": null, "abstractUrl": "/proceedings-article/icse-seis/2021/013900a077/1sEXqZXmiKQ", "parentPublication": { "id": "proceedings/icse-seis/2021/0139/0/", "title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Software Engineering in Society (ICSE-SEIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2022/10/09519532", "title": "Dependency Smells in JavaScript Projects", "doi": null, "abstractUrl": "/journal/ts/2022/10/09519532/1wc8U3FaaNq", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyfdOIQ", "title": "Multimedia Information Networking and Security, International Conference on", "acronym": "mines", "groupId": "1003021", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNAndimG", "doi": "10.1109/MINES.2010.15", "title": "Fast Texture Segmentation Based on Semi-local Region Descriptor and Active Contour Driven by the Bhattacharyya Distance", "normalizedTitle": "Fast Texture Segmentation Based on Semi-local Region Descriptor and Active Contour Driven by the Bhattacharyya Distance", "abstract": "Based on a texture descriptor which intrinsically defines the geometry of textures using semi-local image information and tools from differential geometry, a fast active contour segmentation model for color texture image is proposed. In this model, we use the popular Bhattacharyya distance between the probability density function (pdf) to design the data fitting term which distinguishes the background and textures of interest. Then, a fast algorithm based on the Split-Bregman method is introduced to extract meaningful objects. Finally, some examples on some challenging images are illustrated to verify the possibility of the proposed model.", "abstracts": [ { "abstractType": "Regular", "content": "Based on a texture descriptor which intrinsically defines the geometry of textures using semi-local image information and tools from differential geometry, a fast active contour segmentation model for color texture image is proposed. In this model, we use the popular Bhattacharyya distance between the probability density function (pdf) to design the data fitting term which distinguishes the background and textures of interest. Then, a fast algorithm based on the Split-Bregman method is introduced to extract meaningful objects. Finally, some examples on some challenging images are illustrated to verify the possibility of the proposed model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Based on a texture descriptor which intrinsically defines the geometry of textures using semi-local image information and tools from differential geometry, a fast active contour segmentation model for color texture image is proposed. In this model, we use the popular Bhattacharyya distance between the probability density function (pdf) to design the data fitting term which distinguishes the background and textures of interest. Then, a fast algorithm based on the Split-Bregman method is introduced to extract meaningful objects. Finally, some examples on some challenging images are illustrated to verify the possibility of the proposed model.", "fno": "4258a035", "keywords": [ "Image Segmentation Active Contour Geometry Of Textures Bhattacharyya Flow Split Bregman Method" ], "authors": [ { "affiliation": null, "fullName": "Shanqing Zhang", "givenName": "Shanqing", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Weibin Xin", "givenName": "Weibin", "surname": "Xin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Guixu Zhang", "givenName": "Guixu", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "mines", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-11-01T00:00:00", "pubType": "proceedings", "pages": "35-38", "year": "2010", "issn": null, "isbn": "978-0-7695-4258-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4258a032", "articleId": "12OmNvA1h6V", "__typename": "AdjacentArticleType" }, "next": { "fno": "4258a039", "articleId": "12OmNwDj1j2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isvd/2010/4112/0/4112a056", "title": "Jensen-Bregman Voronoi Diagrams and Centroidal Tessellations", "doi": null, "abstractUrl": "/proceedings-article/isvd/2010/4112a056/12OmNB8Cj0H", "parentPublication": { "id": "proceedings/isvd/2010/4112/0", "title": "2010 International Symposium on Voronoi Diagrams in Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970038", "title": "Interactive Exploration of Volume Line Integral Convolution Based on 3D-Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970038/12OmNCdk2MV", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2012/4899/0/4899a220", "title": "Image Decomposition via Generalized Morphological Component Analysis and Split Bregman Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icdh/2012/4899a220/12OmNCu4nar", "parentPublication": { "id": "proceedings/icdh/2012/4899/0", "title": "4th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esiat/2009/3682/2/3682b575", "title": "Rapid Texture-based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/esiat/2009/3682b575/12OmNx7G5VW", "parentPublication": { "id": "esiat/2009/3682/2", "title": "Environmental Science and Information Application Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1996/3673/0/36730101", "title": "Visualization of Complex Models Using Dynamic Texture-based Simplification", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1996/36730101/12OmNzE54xC", "parentPublication": { "id": "proceedings/ieee-vis/1996/3673/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761331", "title": "Region-based active contours and sparse representations for texture segmentation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761331/12OmNzUgdbx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880219", "title": "Adaptive 4-8 Texture Hierarchies", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880219/12OmNzXFozi", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660028", "title": "View-Dependent Rendering of Multiresolution Texture-Atlases", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660028/12OmNzXWZDD", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a061", "title": "Distribution-Based Active Contour Model for Medical Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a061/12OmNzwpUqr", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/02/v0120", "title": "Texture Mixing and Texture Movie Synthesis Using Statistical Learning", "doi": null, "abstractUrl": "/journal/tg/2001/02/v0120/13rRUwbaqLn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqzcvOE", "title": "2017 International Conference on Machine Vision and Information Technology (CMVIT)", "acronym": "cmvit", "groupId": "1818944", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNzVoBtf", "doi": "10.1109/CMVIT.2017.21", "title": "Preliminary Investigation on Stationarity of Dynamic Smoke Texture and Dynamic Fire Texture Based on Motion Coherent Metric", "normalizedTitle": "Preliminary Investigation on Stationarity of Dynamic Smoke Texture and Dynamic Fire Texture Based on Motion Coherent Metric", "abstract": "Motion coherence index is a metric to evaluate the coherent motion among vectors. In this article, we propose a coherent motion indicator to describe coherent motion among four adjacent vectors. For the preliminary investigation on stationarity of dynamic texture, the coherent motion indicator is proposed as an attribute of vector. The coherent motion indicator is calculated by averaging the angular distances of four adjacent vectors. Motion coherence index is computed as an average of coherent motion indicators in a video frame. The stationarity is evaluated through the covariance stationary series of motion coherence index in a video scene. Our experiments show that, there is significant difference between dynamic fire texture and dynamic smoke texture, evaluated by that covariance stationary series. Fire textures preserve their covariance stationary series. Conversely, smoke textures demonstrate their covariance non-stationary series. The difference among covariance stationary series models is useful and can be defined as a distinct characteristic among dynamic fire textures and dynamic smoke textures. Consequently, such difference can be brought to leverage in several disciplines of computer vision involved with dynamic textures, such as dynamic texture synthesis and fire calamity surveillance system.", "abstracts": [ { "abstractType": "Regular", "content": "Motion coherence index is a metric to evaluate the coherent motion among vectors. In this article, we propose a coherent motion indicator to describe coherent motion among four adjacent vectors. For the preliminary investigation on stationarity of dynamic texture, the coherent motion indicator is proposed as an attribute of vector. The coherent motion indicator is calculated by averaging the angular distances of four adjacent vectors. Motion coherence index is computed as an average of coherent motion indicators in a video frame. The stationarity is evaluated through the covariance stationary series of motion coherence index in a video scene. Our experiments show that, there is significant difference between dynamic fire texture and dynamic smoke texture, evaluated by that covariance stationary series. Fire textures preserve their covariance stationary series. Conversely, smoke textures demonstrate their covariance non-stationary series. The difference among covariance stationary series models is useful and can be defined as a distinct characteristic among dynamic fire textures and dynamic smoke textures. Consequently, such difference can be brought to leverage in several disciplines of computer vision involved with dynamic textures, such as dynamic texture synthesis and fire calamity surveillance system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Motion coherence index is a metric to evaluate the coherent motion among vectors. In this article, we propose a coherent motion indicator to describe coherent motion among four adjacent vectors. For the preliminary investigation on stationarity of dynamic texture, the coherent motion indicator is proposed as an attribute of vector. The coherent motion indicator is calculated by averaging the angular distances of four adjacent vectors. Motion coherence index is computed as an average of coherent motion indicators in a video frame. The stationarity is evaluated through the covariance stationary series of motion coherence index in a video scene. Our experiments show that, there is significant difference between dynamic fire texture and dynamic smoke texture, evaluated by that covariance stationary series. Fire textures preserve their covariance stationary series. Conversely, smoke textures demonstrate their covariance non-stationary series. The difference among covariance stationary series models is useful and can be defined as a distinct characteristic among dynamic fire textures and dynamic smoke textures. Consequently, such difference can be brought to leverage in several disciplines of computer vision involved with dynamic textures, such as dynamic texture synthesis and fire calamity surveillance system.", "fno": "07878722", "keywords": [ "Dynamics", "Coherence", "Indexes", "Fires", "Computer Vision", "Measurement", "Image Motion Analysis", "Motion Coherent Indicator", "Covariance Stationary Series", "Stationarity", "Dynamic Textures", "Motion Coherence" ], "authors": [ { "affiliation": null, "fullName": "Kanoksak Wattanachote", "givenName": "Kanoksak", "surname": "Wattanachote", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kuan Li", "givenName": "Kuan", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yong Wang", "givenName": "Yong", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Timothy K. Shih", "givenName": "Timothy K.", "surname": "Shih", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wenyin Liu", "givenName": "Wenyin", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cmvit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-02-01T00:00:00", "pubType": "proceedings", "pages": "99-104", "year": "2017", "issn": null, "isbn": "978-1-5090-4993-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07878721", "articleId": "12OmNqGA5aA", "__typename": "AdjacentArticleType" }, "next": { "fno": "07878723", "articleId": "12OmNyKrHns", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2009/4442/0/05457666", "title": "Learning mixed-state Markov models for statistical motion texture tracking", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457666/12OmNBVrjp9", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bliss/2009/3754/0/3754a058", "title": "Recognition of Dynamic Texture Patterns Using CHLAC Features", "doi": null, "abstractUrl": "/proceedings-article/bliss/2009/3754a058/12OmNBWzHPp", "parentPublication": { "id": "proceedings/bliss/2009/3754/0", "title": "2009 Symposium on Bio-inspired Learning and Intelligent Systems for Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2015/8688/0/8688a371", "title": "Extracting Recurrent Motion Flows from Crowded Scene Videos: A Coherent Motion-Based Approach", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2015/8688a371/12OmNwMFMlk", "parentPublication": { "id": "proceedings/bigmm/2015/8688/0", "title": "2015 IEEE International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457f583", "title": "Semantically Coherent Co-Segmentation and Reconstruction of Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457f583/12OmNweBUJH", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2017/6029/0/6029a205", "title": "Dynamic Textures and Covariance Stationary Series Analysis Using Strategic Motion Coherence", "doi": null, "abstractUrl": "/proceedings-article/aina/2017/6029a205/12OmNyQGSld", "parentPublication": { "id": "proceedings/aina/2017/6029/0", "title": "2017 IEEE 31st International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2010/7491/0/05582585", "title": "Video coding using dynamic texture synthesis", "doi": null, "abstractUrl": "/proceedings-article/icme/2010/05582585/12OmNzt0IMl", "parentPublication": { "id": "proceedings/icme/2010/7491/0", "title": "2010 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/10/07010973", "title": "Spatiotemporal Directional Number Transitional Graph for Dynamic Texture Recognition", "doi": null, "abstractUrl": "/journal/tp/2015/10/07010973/13rRUwbs2ca", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/09/ttg2013091476", "title": "Image-Space Texture-Based Output-Coherent Surface Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2013/09/ttg2013091476/13rRUwghd98", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2008/05/ttp2008050909", "title": "Modeling, Clustering, and Segmenting Video with Mixtures of Dynamic Textures", "doi": null, "abstractUrl": "/journal/tp/2008/05/ttp2008050909/13rRUyYjKbs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545370", "title": "Dynamic Texture Similarity Criterion", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545370/17D45WGGoLX", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwWorB", "title": "Proceedings of Seventh Annual IEEE Visualization '96", "acronym": "visual", "groupId": "1000796", "volume": "0", "displayVolume": "0", "year": "1996", "__typename": "ProceedingType" }, "article": { "id": "12OmNzcxZjO", "doi": "10.1109/VISUAL.1996.567774", "title": "Visualization of complex models using dynamic texture-based simplification", "normalizedTitle": "Visualization of complex models using dynamic texture-based simplification", "abstract": "We are investigating methods for simplifying complex models for interactive visualizations using texture based representations. The paper presents a simplification method which dynamically \"caches\" distant geometry into textures and trades off accurate rendering of the distant geometry for performance. Smooth transitions and continuous borders are defined between the geometry and textures thus the representations can be switched without sudden jumps (as is the case with many current texturing techniques). All the computations for the transitions can be done a priori without the need to change the textures each frame thereafter.", "abstracts": [ { "abstractType": "Regular", "content": "We are investigating methods for simplifying complex models for interactive visualizations using texture based representations. The paper presents a simplification method which dynamically \"caches\" distant geometry into textures and trades off accurate rendering of the distant geometry for performance. Smooth transitions and continuous borders are defined between the geometry and textures thus the representations can be switched without sudden jumps (as is the case with many current texturing techniques). All the computations for the transitions can be done a priori without the need to change the textures each frame thereafter.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We are investigating methods for simplifying complex models for interactive visualizations using texture based representations. The paper presents a simplification method which dynamically \"caches\" distant geometry into textures and trades off accurate rendering of the distant geometry for performance. Smooth transitions and continuous borders are defined between the geometry and textures thus the representations can be switched without sudden jumps (as is the case with many current texturing techniques). All the computations for the transitions can be done a priori without the need to change the textures each frame thereafter.", "fno": "00567774", "keywords": [ "Data Visualisation", "Complex Model Visualization", "Dynamic Texture Based Simplification", "Interactive Visualizations", "Texture Based Representations", "Simplification Method", "Distant Geometry", "Rendering", "Continuous Borders", "Smooth Transitions", "Texturing Techniques", "Visualization", "Geometry", "Solid Modeling", "Layout", "Computer Science", "Partitioning Algorithms", "Image Quality", "Rendering Computer Graphics", "Displays", "Performance Gain" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., North Carolina Univ., Chapel Hill, NC, USA", "fullName": "D.G. Aliaga", "givenName": "D.G.", "surname": "Aliaga", "__typename": "ArticleAuthorType" } ], "idPrefix": "visual", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1996-01-01T00:00:00", "pubType": "proceedings", "pages": "101-106", "year": "1996", "issn": null, "isbn": "0-89791-864-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00567752", "articleId": "12OmNAkWvfC", "__typename": "AdjacentArticleType" }, "next": { "fno": "00568113", "articleId": "12OmNwpoFGX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dagstuhl/1997/0503/0/05030151", "title": "Visualization of Complex Physical Phenomena and Mathematical Objects in Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/05030151/12OmNAWYKJc", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2002/7498/0/7498deussen", "title": "Interactive Visualization of Complex Plant Ecosystems", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2002/7498deussen/12OmNyRPgti", "parentPublication": { "id": "proceedings/ieee-vis/2002/7498/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2015/7962/0/7962a001", "title": "Meta-Relief Texture Mapping with Dynamic Texture-Space Ambient Occlusion", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a001/12OmNyp9MiX", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1996/3673/0/36730101", "title": "Visualization of Complex Models Using Dynamic Texture-based Simplification", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1996/36730101/12OmNzE54xC", "parentPublication": { "id": "proceedings/ieee-vis/1996/3673/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2002/03/mcg2002030056", "title": "Evaluating Graphics Displays for Complex 3D Models", "doi": null, "abstractUrl": "/magazine/cg/2002/03/mcg2002030056/13rRUx0xPvw", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/02/08010336", "title": "Improved Alpha Testing Using Hashed Sampling", "doi": null, "abstractUrl": "/journal/tg/2019/02/08010336/17D45VTRoxw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f555", "title": "IRON: Inverse Rendering by Optimizing Neural SDFs and Materials from Photometric Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f555/1H0NwfbmSeA", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/00809908", "title": "Multiresolution techniques for interactive texture-based volume visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/00809908/1h0KNaivmHm", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h115", "title": "NeuTex: Neural Texture Mapping for Volumetric Neural Rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h115/1yeLdyIKnV6", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900o4519", "title": "DeepSurfels: Learning Online Appearance Fusion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900o4519/1yeM0fscjSw", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKir6", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WGGoLX", "doi": "10.1109/ICPR.2018.8545370", "title": "Dynamic Texture Similarity Criterion", "normalizedTitle": "Dynamic Texture Similarity Criterion", "abstract": "Dynamic texture similarity ranking is a challenging and still unsolved problem. Evaluation of how well are various dynamic textures similar to humans perception view is extremely difficult even for static textures and requires tedious psychophysical experiments. Human perception principles are largely not understood yet and the dynamic texture perception is further complicated with a distinct way of perceiving spatial and temporal domains, which complicates any similarity criterion definition. We propose a novel dynamic texture criterion based on the Fourier transformation and properties of dynamic texture spatio-temporal frequencies. The presented criterion correlates well with performed psycho-physical tests while maintaining sufficient diversity and descriptiveness.", "abstracts": [ { "abstractType": "Regular", "content": "Dynamic texture similarity ranking is a challenging and still unsolved problem. Evaluation of how well are various dynamic textures similar to humans perception view is extremely difficult even for static textures and requires tedious psychophysical experiments. Human perception principles are largely not understood yet and the dynamic texture perception is further complicated with a distinct way of perceiving spatial and temporal domains, which complicates any similarity criterion definition. We propose a novel dynamic texture criterion based on the Fourier transformation and properties of dynamic texture spatio-temporal frequencies. The presented criterion correlates well with performed psycho-physical tests while maintaining sufficient diversity and descriptiveness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dynamic texture similarity ranking is a challenging and still unsolved problem. Evaluation of how well are various dynamic textures similar to humans perception view is extremely difficult even for static textures and requires tedious psychophysical experiments. Human perception principles are largely not understood yet and the dynamic texture perception is further complicated with a distinct way of perceiving spatial and temporal domains, which complicates any similarity criterion definition. We propose a novel dynamic texture criterion based on the Fourier transformation and properties of dynamic texture spatio-temporal frequencies. The presented criterion correlates well with performed psycho-physical tests while maintaining sufficient diversity and descriptiveness.", "fno": "08545370", "keywords": [ "Harmonic Analysis", "Visualization", "Frequency Synthesizers", "Biological System Modeling", "Testing", "Databases", "Dynamics" ], "authors": [ { "affiliation": "Institute of Information Theory and Automation of the CAS", "fullName": "Radek Richtr", "givenName": "Radek", "surname": "Richtr", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Information Theory and Automation of the CAS", "fullName": "Michal Haindl", "givenName": "Michal", "surname": "Haindl", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-08-01T00:00:00", "pubType": "proceedings", "pages": "904-909", "year": "2018", "issn": "1051-4651", "isbn": "978-1-5386-3788-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08545287", "articleId": "17D45VW8bry", "__typename": "AdjacentArticleType" }, "next": { "fno": "08545812", "articleId": "17D45VWpMzi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2001/1272/2/127220058", "title": "Dynamic Texture Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127220058/12OmNAle6tF", "parentPublication": { "id": "proceedings/cvpr/2001/1272/2", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/2/252120211", "title": "Local Binary Pattern Descriptors for Dynamic Texture Recognition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252120211/12OmNB9KHs1", "parentPublication": { "id": "proceedings/icpr/2006/2521/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bliss/2009/3754/0/3754a058", "title": "Recognition of Dynamic Texture Patterns Using CHLAC Features", "doi": null, "abstractUrl": "/proceedings-article/bliss/2009/3754a058/12OmNBWzHPp", "parentPublication": { "id": "proceedings/bliss/2009/3754/0", "title": "2009 Symposium on Bio-inspired Learning and Intelligent Systems for Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2017/6029/0/6029a205", "title": "Dynamic Textures and Covariance Stationary Series Analysis Using Strategic Motion Coherence", "doi": null, "abstractUrl": "/proceedings-article/aina/2017/6029a205/12OmNyQGSld", "parentPublication": { "id": "proceedings/aina/2017/6029/0", "title": "2017 IEEE 31st International Conference on Advanced Information Networking and Applications (AINA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/1999/0040/0/00401055", "title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx", "parentPublication": { "id": "proceedings/iciap/1999/0040/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmvit/2017/4993/0/07878722", "title": "Preliminary Investigation on Stationarity of Dynamic Smoke Texture and Dynamic Fire Texture Based on Motion Coherent Metric", "doi": null, "abstractUrl": "/proceedings-article/cmvit/2017/07878722/12OmNzVoBtf", "parentPublication": { "id": "proceedings/cmvit/2017/4993/0", "title": "2017 International Conference on Machine Vision and Information Technology (CMVIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/10/07010973", "title": "Spatiotemporal Directional Number Transitional Graph for Dynamic Texture Recognition", "doi": null, "abstractUrl": "/journal/tp/2015/10/07010973/13rRUwbs2ca", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545560", "title": "Delving into the Synthesizability of Dynamic Texture Samples", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545560/17D45X7VTeU", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g703", "title": "Two-Stream Convolutional Networks for Dynamic Texture Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g703/17D45XH89po", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09374106", "title": "DTexFusion: Dynamic Texture Fusion Using a Consumer RGBD Sensor", "doi": null, "abstractUrl": "/journal/tg/2022/10/09374106/1rPtmp3tBSM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3onF36nBe", "doi": "10.1109/CVPR42600.2020.00163", "title": "Adversarial Texture Optimization From RGB-D Scans", "normalizedTitle": "Adversarial Texture Optimization From RGB-D Scans", "abstract": "Realistic color texture generation is an important step in RGB-D surface reconstruction, but remains challenging in practice due to inaccuracies in reconstructed geometry, misaligned camera poses, and view-dependent imaging artifacts. In this work, we present a novel approach for color texture generation using a conditional adversarial loss obtained from weakly-supervised views. Specifically, we propose an approach to produce photorealistic textures for approximate surfaces, even from misaligned images, by learning an objective function that is robust to these errors. The key idea of our approach is to learn a patch-based conditional discriminator which guides the texture optimization to be tolerant to misalignments. Our discriminator takes a synthesized view and a real image, and evaluates whether the synthesized one is realistic, under a broadened definition of realism. We train the discriminator by providing as `real' examples pairs of input views and their misaligned versions - so that the learned adversarial loss will tolerate errors from the scans. Experiments on synthetic and real data under quantitative or qualitative evaluation demonstrate the advantage of our approach in comparison to state of the art (see Figure 1, right). Our code is publicly available<sup>1</sup> with video demonstration<sup>2</sup>.", "abstracts": [ { "abstractType": "Regular", "content": "Realistic color texture generation is an important step in RGB-D surface reconstruction, but remains challenging in practice due to inaccuracies in reconstructed geometry, misaligned camera poses, and view-dependent imaging artifacts. In this work, we present a novel approach for color texture generation using a conditional adversarial loss obtained from weakly-supervised views. Specifically, we propose an approach to produce photorealistic textures for approximate surfaces, even from misaligned images, by learning an objective function that is robust to these errors. The key idea of our approach is to learn a patch-based conditional discriminator which guides the texture optimization to be tolerant to misalignments. Our discriminator takes a synthesized view and a real image, and evaluates whether the synthesized one is realistic, under a broadened definition of realism. We train the discriminator by providing as `real' examples pairs of input views and their misaligned versions - so that the learned adversarial loss will tolerate errors from the scans. Experiments on synthetic and real data under quantitative or qualitative evaluation demonstrate the advantage of our approach in comparison to state of the art (see Figure 1, right). Our code is publicly available<sup>1</sup> with video demonstration<sup>2</sup>.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Realistic color texture generation is an important step in RGB-D surface reconstruction, but remains challenging in practice due to inaccuracies in reconstructed geometry, misaligned camera poses, and view-dependent imaging artifacts. In this work, we present a novel approach for color texture generation using a conditional adversarial loss obtained from weakly-supervised views. Specifically, we propose an approach to produce photorealistic textures for approximate surfaces, even from misaligned images, by learning an objective function that is robust to these errors. The key idea of our approach is to learn a patch-based conditional discriminator which guides the texture optimization to be tolerant to misalignments. Our discriminator takes a synthesized view and a real image, and evaluates whether the synthesized one is realistic, under a broadened definition of realism. We train the discriminator by providing as `real' examples pairs of input views and their misaligned versions - so that the learned adversarial loss will tolerate errors from the scans. Experiments on synthetic and real data under quantitative or qualitative evaluation demonstrate the advantage of our approach in comparison to state of the art (see Figure 1, right). Our code is publicly available1 with video demonstration2.", "fno": "716800b556", "keywords": [ "Image Colour Analysis", "Image Reconstruction", "Image Texture", "Learning Artificial Intelligence", "Realistic Images", "Learned Adversarial Loss", "Adversarial Texture Optimization", "Realistic Color Texture Generation", "RGB D Surface Reconstruction", "Reconstructed Geometry", "View Dependent Imaging Artifacts", "Conditional Adversarial Loss", "Photorealistic Textures", "Misaligned Images", "Objective Function", "Patch Based Conditional Discriminator", "Misaligned Camera Poses", "Image Color Analysis", "Cameras", "Optimization", "Geometry", "Measurement", "Image Reconstruction", "Surface Reconstruction" ], "authors": [ { "affiliation": "Stanford University; Google Research", "fullName": "Jingwei Huang", "givenName": "Jingwei", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Justus Thies", "givenName": "Justus", "surname": "Thies", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Angela Dai", "givenName": "Angela", "surname": "Dai", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Abhijit Kundu", "givenName": "Abhijit", "surname": "Kundu", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research; UC Berkeley", "fullName": "Chiyu Jiang", "givenName": "Chiyu", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Stanford University", "fullName": "Leonidas J. Guibas", "givenName": "Leonidas J.", "surname": "Guibas", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Matthias Nießner", "givenName": "Matthias", "surname": "Nießner", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Research", "fullName": "Thomas Funkhouser", "givenName": "Thomas", "surname": "Funkhouser", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "1556-1565", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800b545", "articleId": "1m3nOcApYgU", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800b566", "articleId": "1m3ojX99pRu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2009/4420/0/05459378", "title": "Superresolution texture maps for multiview reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459378/12OmNxuFBnH", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a810", "title": "Intrinsic Scene Decomposition from RGB-D Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a810/12OmNy4IF17", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a175", "title": "Deep Depth Completion of a Single RGB-D Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a175/17D45WaTkg5", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a533", "title": "Plane-Based Optimization of Geometry and Texture for RGB-D Reconstruction of Indoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a533/17D45Wda7eK", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e645", "title": "Texture Mapping for 3D Reconstruction with RGB-D Sensor", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e645/17D45Wuc36V", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500b413", "title": "3D Reconstruction and Texture Optimization Using a Sparse Set of RGB-D Cameras", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500b413/18j8FdScGbe", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a533", "title": "Learned Multi-View Texture Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a533/1ezRB4XSC0E", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b269", "title": "TextureFusion: High-Quality Texture Acquisition for Real-Time RGB-D Scanning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b269/1m3obd1zLG0", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f949", "title": "Joint Texture and Geometry Optimization for RGB-D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f949/1m3ogA88vw4", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/03/09645189", "title": "Seamless Texture Optimization for RGB-D Reconstruction", "doi": null, "abstractUrl": "/journal/tg/2023/03/09645189/1zc6CdFskcU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45VsBTWS", "doi": "10.1109/CVPR.2018.00986", "title": "CartoonGAN: Generative Adversarial Networks for Photo Cartoonization", "normalizedTitle": "CartoonGAN: Generative Adversarial Networks for Photo Cartoonization", "abstract": "In this paper, we propose a solution to transforming photos of real-world scenes into cartoon style images, which is valuable and challenging in computer vision and computer graphics. Our solution belongs to learning based methods, which have recently become popular to stylize images in artistic forms such as painting. However, existing methods do not produce satisfactory results for cartoonization, due to the fact that (1) cartoon styles have unique characteristics with high level simplification and abstraction, and (2) cartoon images tend to have clear edges, smooth color shading and relatively simple textures, which exhibit significant challenges for texture-descriptor-based loss functions used in existing methods. In this paper, we propose CartoonGAN, a generative adversarial network (GAN) framework for cartoon stylization. Our method takes unpaired photos and cartoon images for training, which is easy to use. Two novel losses suitable for cartoonization are proposed: (1) a semantic content loss, which is formulated as a sparse regularization in the high-level feature maps of the VGG network to cope with substantial style variation between photos and cartoons, and (2) an edge-promoting adversarial loss for preserving clear edges. We further introduce an initialization phase, to improve the convergence of the network to the target manifold. Our method is also much more efficient to train than existing methods. Experimental results show that our method is able to generate high-quality cartoon images from real-world photos (i.e., following specific artists' styles and with clear edges and smooth shading) and outperforms state-of-the-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a solution to transforming photos of real-world scenes into cartoon style images, which is valuable and challenging in computer vision and computer graphics. Our solution belongs to learning based methods, which have recently become popular to stylize images in artistic forms such as painting. However, existing methods do not produce satisfactory results for cartoonization, due to the fact that (1) cartoon styles have unique characteristics with high level simplification and abstraction, and (2) cartoon images tend to have clear edges, smooth color shading and relatively simple textures, which exhibit significant challenges for texture-descriptor-based loss functions used in existing methods. In this paper, we propose CartoonGAN, a generative adversarial network (GAN) framework for cartoon stylization. Our method takes unpaired photos and cartoon images for training, which is easy to use. Two novel losses suitable for cartoonization are proposed: (1) a semantic content loss, which is formulated as a sparse regularization in the high-level feature maps of the VGG network to cope with substantial style variation between photos and cartoons, and (2) an edge-promoting adversarial loss for preserving clear edges. We further introduce an initialization phase, to improve the convergence of the network to the target manifold. Our method is also much more efficient to train than existing methods. Experimental results show that our method is able to generate high-quality cartoon images from real-world photos (i.e., following specific artists' styles and with clear edges and smooth shading) and outperforms state-of-the-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a solution to transforming photos of real-world scenes into cartoon style images, which is valuable and challenging in computer vision and computer graphics. Our solution belongs to learning based methods, which have recently become popular to stylize images in artistic forms such as painting. However, existing methods do not produce satisfactory results for cartoonization, due to the fact that (1) cartoon styles have unique characteristics with high level simplification and abstraction, and (2) cartoon images tend to have clear edges, smooth color shading and relatively simple textures, which exhibit significant challenges for texture-descriptor-based loss functions used in existing methods. In this paper, we propose CartoonGAN, a generative adversarial network (GAN) framework for cartoon stylization. Our method takes unpaired photos and cartoon images for training, which is easy to use. Two novel losses suitable for cartoonization are proposed: (1) a semantic content loss, which is formulated as a sparse regularization in the high-level feature maps of the VGG network to cope with substantial style variation between photos and cartoons, and (2) an edge-promoting adversarial loss for preserving clear edges. We further introduce an initialization phase, to improve the convergence of the network to the target manifold. Our method is also much more efficient to train than existing methods. Experimental results show that our method is able to generate high-quality cartoon images from real-world photos (i.e., following specific artists' styles and with clear edges and smooth shading) and outperforms state-of-the-art methods.", "fno": "642000j465", "keywords": [ "Computer Animation", "Computer Graphics", "Computer Vision", "Feature Extraction", "Image Colour Analysis", "Image Enhancement", "Image Matching", "Image Resolution", "Image Segmentation", "Image Texture", "Learning Artificial Intelligence", "Cartoon Style Images", "Computer Vision", "Computer Graphics", "Based Methods", "Artistic Forms", "Clear Edges", "Smooth Color Shading", "Texture Descriptor Based Loss", "Cartoon GAN", "Generative Adversarial Network Framework", "Cartoon Stylization", "Unpaired Photos", "Semantic Content Loss", "VGG Network", "Edge Promoting Adversarial Loss", "Real World Photos", "Generative Adversarial Networks", "Photo Cartoonization", "Real World Scenes", "Feature Maps", "Photo Transformation", "Cartoon Images", "Cartoon Styles", "Training", "Gallium Nitride", "Generative Adversarial Networks", "Manifolds", "Image Edge Detection", "Automobiles", "Training Data" ], "authors": [ { "affiliation": null, "fullName": "Yang Chen", "givenName": "Yang", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yu-Kun Lai", "givenName": "Yu-Kun", "surname": "Lai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yong-Jin Liu", "givenName": "Yong-Jin", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "9465-9474", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000j455", "articleId": "17D45XacGkf", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000j475", "articleId": "17D45VsBTYY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032f908", "title": "StackGAN: Text to Photo-Realistic Image Synthesis with Stacked Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032f908/12OmNA0MZ6U", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457a105", "title": "Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a105/12OmNwoPtoP", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486440", "title": "Densely Stacked Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486440/14jQfSnkWGs", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545881", "title": "MMGAN: Manifold-Matching Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545881/17D45WHONmN", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a115", "title": "Deep Feature Similarity for Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a115/17D45Wuc39X", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000j455", "title": "ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000j455/17D45XacGkf", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a652", "title": "Everyone is a Cartoonist: Selfie Cartoonization with Attentive Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a652/1cdOKjcIyB2", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g931", "title": "Photo-Realistic Monocular Gaze Redirection Using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g931/1hVloxAEVA4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/01/09149832", "title": "Improving Generative Adversarial Networks With Local Coordinate Coding", "doi": null, "abstractUrl": "/journal/tp/2022/01/09149832/1lNXsekD4ha", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09382902", "title": "GAN-Based Multi-Style Photo Cartoonization", "doi": null, "abstractUrl": "/journal/tg/2022/10/09382902/1saZlQvlf0Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WXIkCO", "doi": "10.1109/CVPR.2018.00789", "title": "Multi-content GAN for Few-Shot Font Style Transfer", "normalizedTitle": "Multi-content GAN for Few-Shot Font Style Transfer", "abstract": "In this work, we focus on the challenge of taking partial observations of highly-stylized text and generalizing the observations to generate unobserved glyphs in the ornamented typeface. To generate a set of multi-content images following a consistent style from very few examples, we propose an end-to-end stacked conditional GAN model considering content along channels and style along network layers. Our proposed network transfers the style of given glyphs to the contents of unseen ones, capturing highly stylized fonts found in the real-world such as those on movie posters or infographics. We seek to transfer both the typographic stylization (ex. serifs and ears) as well as the textual stylization (ex. color gradients and effects.) We base our experiments on our collected data set including 10,000 fonts with different styles and demonstrate effective generalization from a very small number of observed glyphs.", "abstracts": [ { "abstractType": "Regular", "content": "In this work, we focus on the challenge of taking partial observations of highly-stylized text and generalizing the observations to generate unobserved glyphs in the ornamented typeface. To generate a set of multi-content images following a consistent style from very few examples, we propose an end-to-end stacked conditional GAN model considering content along channels and style along network layers. Our proposed network transfers the style of given glyphs to the contents of unseen ones, capturing highly stylized fonts found in the real-world such as those on movie posters or infographics. We seek to transfer both the typographic stylization (ex. serifs and ears) as well as the textual stylization (ex. color gradients and effects.) We base our experiments on our collected data set including 10,000 fonts with different styles and demonstrate effective generalization from a very small number of observed glyphs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this work, we focus on the challenge of taking partial observations of highly-stylized text and generalizing the observations to generate unobserved glyphs in the ornamented typeface. To generate a set of multi-content images following a consistent style from very few examples, we propose an end-to-end stacked conditional GAN model considering content along channels and style along network layers. Our proposed network transfers the style of given glyphs to the contents of unseen ones, capturing highly stylized fonts found in the real-world such as those on movie posters or infographics. We seek to transfer both the typographic stylization (ex. serifs and ears) as well as the textual stylization (ex. color gradients and effects.) We base our experiments on our collected data set including 10,000 fonts with different styles and demonstrate effective generalization from a very small number of observed glyphs.", "fno": "642000h564", "keywords": [ "Art", "Character Sets", "Computer Graphics", "Image Processing", "Neural Nets", "Word Processing", "End To End Stacked Conditional GAN Model", "Highly Stylized Fonts", "Movie Posters", "Infographics", "Typographic Stylization", "Multicontent GAN", "Few Shot Font Style Transfer", "Partial Observations", "Highly Stylized Text", "Unobserved Glyphs", "Ornamented Typeface", "Multicontent Images", "Gallium Nitride", "Shape", "Generators", "Image Color Analysis", "Generative Adversarial Networks", "Training", "Gray Scale" ], "authors": [ { "affiliation": null, "fullName": "Samaneh Azadi", "givenName": "Samaneh", "surname": "Azadi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Matthew Fisher", "givenName": "Matthew", "surname": "Fisher", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Vladimir Kim", "givenName": "Vladimir", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhaowen Wang", "givenName": "Zhaowen", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Eli Shechtman", "givenName": "Eli", "surname": "Shechtman", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Trevor Darrell", "givenName": "Trevor", "surname": "Darrell", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "7564-7573", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000h553", "articleId": "17D45WXIkCN", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000h574", "articleId": "17D45Xtvpav", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2017/3586/5/3586f051", "title": "Neural Font Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/icdar/2017/3586f051/12OmNy68ELb", "parentPublication": { "id": "icdar/2017/3586/5", "title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a232", "title": "Font Creation Using Class Discriminative Deep Convolutional Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a232/17D45Xh13v0", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h885", "title": "Few-Shot Font Generation by Learning Fine-Grained Local Styles", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h885/1H0KWLAcGwo", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h895", "title": "XMP-Font: Self-Supervised Cross-Modality Pre-training for Few-Shot Font Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h895/1H0LeHOghTG", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a910", "title": "Semantic GAN: Application for Cross-Domain Image Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a910/1cdOFuQjESA", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j115", "title": "Large-Scale Tag-Based Font Retrieval With Generative Feature Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j115/1hQqq9UZc3u", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300e441", "title": "Controllable Artistic Text Style Transfer via Shape-Matching GAN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300e441/1hVlRglbrk4", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/07/09339900", "title": "Shape-Matching GAN++: Scale Controllable Dynamic Artistic Text Style Transfer", "doi": null, "abstractUrl": "/journal/tp/2022/07/09339900/1qL54N4119S", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412254", "title": "Few-Shot Font Generation with Deep Metric Learning", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412254/1tmiOLMYDvy", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a433", "title": "Few-shot Font Style Transfer between Different Languages", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a433/1uqGGDV1zlS", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45Xh13uO", "doi": "10.1109/CVPR.2018.00881", "title": "Separating Style and Content for Generalized Style Transfer", "normalizedTitle": "Separating Style and Content for Generalized Style Transfer", "abstract": "Neural style transfer has drawn broad attention in recent years. However, most existing methods aim to explicitly model the transformation between different styles, and the learned model is thus not generalizable to new styles. We here attempt to separate the representations for styles and contents, and propose a generalized style transfer network consisting of style encoder, content encoder, mixer and decoder. The style encoder and content encoder are used to extract the style and content factors from the style reference images and content reference images, respectively. The mixer employs a bilinear model to integrate the above two factors and finally feeds it into a decoder to generate images with target style and content. To separate the style features and content features, we leverage the conditional dependence of styles and contents given an image. During training, the encoder network learns to extract styles and contents from two sets of reference images in limited size, one with shared style and the other with shared content. This learning framework allows simultaneous style transfer among multiple styles and can be deemed as a special 'multi-task' learning scenario. The encoders are expected to capture the underlying features for different styles and contents which is generalizable to new styles and contents. For validation, we applied the proposed algorithm to the Chinese Typeface transfer problem. Extensive experiment results on character generation have demonstrated the effectiveness and robustness of our method.", "abstracts": [ { "abstractType": "Regular", "content": "Neural style transfer has drawn broad attention in recent years. However, most existing methods aim to explicitly model the transformation between different styles, and the learned model is thus not generalizable to new styles. We here attempt to separate the representations for styles and contents, and propose a generalized style transfer network consisting of style encoder, content encoder, mixer and decoder. The style encoder and content encoder are used to extract the style and content factors from the style reference images and content reference images, respectively. The mixer employs a bilinear model to integrate the above two factors and finally feeds it into a decoder to generate images with target style and content. To separate the style features and content features, we leverage the conditional dependence of styles and contents given an image. During training, the encoder network learns to extract styles and contents from two sets of reference images in limited size, one with shared style and the other with shared content. This learning framework allows simultaneous style transfer among multiple styles and can be deemed as a special 'multi-task' learning scenario. The encoders are expected to capture the underlying features for different styles and contents which is generalizable to new styles and contents. For validation, we applied the proposed algorithm to the Chinese Typeface transfer problem. Extensive experiment results on character generation have demonstrated the effectiveness and robustness of our method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Neural style transfer has drawn broad attention in recent years. However, most existing methods aim to explicitly model the transformation between different styles, and the learned model is thus not generalizable to new styles. We here attempt to separate the representations for styles and contents, and propose a generalized style transfer network consisting of style encoder, content encoder, mixer and decoder. The style encoder and content encoder are used to extract the style and content factors from the style reference images and content reference images, respectively. The mixer employs a bilinear model to integrate the above two factors and finally feeds it into a decoder to generate images with target style and content. To separate the style features and content features, we leverage the conditional dependence of styles and contents given an image. During training, the encoder network learns to extract styles and contents from two sets of reference images in limited size, one with shared style and the other with shared content. This learning framework allows simultaneous style transfer among multiple styles and can be deemed as a special 'multi-task' learning scenario. The encoders are expected to capture the underlying features for different styles and contents which is generalizable to new styles and contents. For validation, we applied the proposed algorithm to the Chinese Typeface transfer problem. Extensive experiment results on character generation have demonstrated the effectiveness and robustness of our method.", "fno": "642000i447", "keywords": [ "Decoding", "Feature Extraction", "Image Coding", "Image Representation", "Learning Artificial Intelligence", "Neural Nets", "Learned Model", "Decoder", "Bilinear Model", "Feature Extraction", "Multitask Learning Scenario", "Chinese Typeface Transfer Problem", "Content Reference Images", "Style Reference Images", "Style Encoder", "Content Encoder", "Generalized Style Transfer Network", "Neural Style Transfer", "Decoding", "Mixers", "Silicon", "Training", "Gallium Nitride", "Convolution", "Generators" ], "authors": [ { "affiliation": null, "fullName": "Yexun Zhang", "givenName": "Yexun", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ya Zhang", "givenName": "Ya", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wenbin Cai", "givenName": "Wenbin", "surname": "Cai", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "8447-8455", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000i437", "articleId": "17D45Vw15so", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000i456", "articleId": "17D45Xtvpaw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2018/4886/0/488601b350", "title": "Balancing Content and Style with Two-Stream FCNs for Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b350/12OmNrYlmNm", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457c770", "title": "StyleBank: An Explicit Representation for Neural Image Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457c770/12OmNyKrHkQ", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b510", "title": "Arbitrary Style Transfer in Real-Time with Adaptive Instance Normalization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b510/12OmNzkMlWi", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h564", "title": "Multi-content GAN for Few-Shot Font Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h564/17D45WXIkCO", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2019/1975/0/197500a848", "title": "Style and Content Disentanglement in Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a848/18j8FazPuwM", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a078", "title": "Neural Style Transfer with Content Discrimination", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a078/1cJ0yScItqw", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/07/08950233", "title": "Explicit Filterbank Learning for Neural Image Style Transfer and Image Processing", "doi": null, "abstractUrl": "/journal/tp/2021/07/08950233/1gKwLwPnyy4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300e421", "title": "Content and Style Disentanglement for Artistic Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300e421/1hVlS83UBB6", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800n3813", "title": "Two-Stage Peer-Regularized Feature Recombination for Arbitrary Image Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800n3813/1m3nNWitbtm", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2021/4486/0/448600a191", "title": "Image Style Transfer Based on Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2021/448600a191/1yEZnAUl3qg", "parentPublication": { "id": "proceedings/iccnea/2021/4486/0", "title": "2021 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "18j8Ecq0jn2", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "18j8FazPuwM", "doi": "10.1109/WACV.2019.00095", "title": "Style and Content Disentanglement in Generative Adversarial Networks", "normalizedTitle": "Style and Content Disentanglement in Generative Adversarial Networks", "abstract": "Disentangling factors of variation within data has become a very challenging problem for image generation tasks. Current frameworks for training a Generative Adversarial Network (GAN), learn to disentangle the representations of the data in an unsupervised fashion and capture the most significant factors of the data variations. However, these approaches ignore the principle of content and style disentanglement in image generation, which means their learned latent code may alter the content and style of the generated images at the same time. This paper describes the Style and Content Disentangled GAN (SC-GAN), a new unsupervised algorithm for training GANs that learns disentangled style and content representations of the data. We assume that the representation of an image can be decomposed into a content code that represents the geometrical information of the data, and a style code that captures textural properties. Consequently, by fixing the style portion of the latent representation, we can generate diverse images in a particular style. Reversely, we can set the content code and generate a specific scene in a variety of styles. The proposed SC-GAN has two components: a content code which is the input to the generator, and a style code which modifies the scene style through modification of the Adaptive Instance Normalization (AdaIN) layers' parameters. We evaluate the proposed SC-GAN framework on a set of baseline datasets.", "abstracts": [ { "abstractType": "Regular", "content": "Disentangling factors of variation within data has become a very challenging problem for image generation tasks. Current frameworks for training a Generative Adversarial Network (GAN), learn to disentangle the representations of the data in an unsupervised fashion and capture the most significant factors of the data variations. However, these approaches ignore the principle of content and style disentanglement in image generation, which means their learned latent code may alter the content and style of the generated images at the same time. This paper describes the Style and Content Disentangled GAN (SC-GAN), a new unsupervised algorithm for training GANs that learns disentangled style and content representations of the data. We assume that the representation of an image can be decomposed into a content code that represents the geometrical information of the data, and a style code that captures textural properties. Consequently, by fixing the style portion of the latent representation, we can generate diverse images in a particular style. Reversely, we can set the content code and generate a specific scene in a variety of styles. The proposed SC-GAN has two components: a content code which is the input to the generator, and a style code which modifies the scene style through modification of the Adaptive Instance Normalization (AdaIN) layers' parameters. We evaluate the proposed SC-GAN framework on a set of baseline datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Disentangling factors of variation within data has become a very challenging problem for image generation tasks. Current frameworks for training a Generative Adversarial Network (GAN), learn to disentangle the representations of the data in an unsupervised fashion and capture the most significant factors of the data variations. However, these approaches ignore the principle of content and style disentanglement in image generation, which means their learned latent code may alter the content and style of the generated images at the same time. This paper describes the Style and Content Disentangled GAN (SC-GAN), a new unsupervised algorithm for training GANs that learns disentangled style and content representations of the data. We assume that the representation of an image can be decomposed into a content code that represents the geometrical information of the data, and a style code that captures textural properties. Consequently, by fixing the style portion of the latent representation, we can generate diverse images in a particular style. Reversely, we can set the content code and generate a specific scene in a variety of styles. The proposed SC-GAN has two components: a content code which is the input to the generator, and a style code which modifies the scene style through modification of the Adaptive Instance Normalization (AdaIN) layers' parameters. We evaluate the proposed SC-GAN framework on a set of baseline datasets.", "fno": "197500a848", "keywords": [ "Feature Extraction", "Geophysical Image Processing", "Image Classification", "Image Representation", "Image Texture", "Learning Artificial Intelligence", "Object Detection", "Unsupervised Learning", "Generative Adversarial Networks", "Image Generation Tasks", "Generative Adversarial Network", "Unsupervised Fashion", "Data Variations", "Content Style Disentanglement", "Learned Latent Code", "Style GAN", "Content Disentangled GAN", "Unsupervised Algorithm", "Training GA Ns", "Disentangled Style", "Content Representations", "Content Code", "Style Code", "Style Portion", "Latent Representation", "Scene Style", "SC GAN Framework", "Gallium Nitride", "Generative Adversarial Networks", "Generators", "Training", "Data Models", "Decoding", "Task Analysis" ], "authors": [ { "affiliation": null, "fullName": "Hadi Kazemi", "givenName": "Hadi", "surname": "Kazemi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Seyed Mehdi Iranmanesh", "givenName": "Seyed Mehdi", "surname": "Iranmanesh", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Nasser Nasrabadi", "givenName": "Nasser", "surname": "Nasrabadi", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-01-01T00:00:00", "pubType": "proceedings", "pages": "848-856", "year": "2019", "issn": "1550-5790", "isbn": "978-1-7281-1975-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "197500a839", "articleId": "18j8MgVDOk8", "__typename": "AdjacentArticleType" }, "next": { "fno": "197500a857", "articleId": "18j8IQyQwLu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2018/3788/0/08545894", "title": "Data Augmentation with Improved Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545894/17D45WKWnJc", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h564", "title": "Multi-content GAN for Few-Shot Font Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h564/17D45WXIkCO", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a021", "title": "Finding Tiny Faces in the Wild with Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a021/17D45WwsQ4S", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000c165", "title": "Generative Adversarial Style Transfer Networks for Face Aging", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000c165/17D45Xcttm9", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g363", "title": "DRB-GAN: A Dynamic ResBlock Generative Adversarial Network for Artistic Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g363/1BmEIaAvaV2", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a018", "title": "Structured Coupled Generative Adversarial Networks for Unsupervised Monocular Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a018/1ezRC2U52Vi", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093323", "title": "Semantic Consistency and Identity Mapping Multi-Component Generative Adversarial Network for Person Re-Identification", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093323/1jPbq7IInh6", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cibda/2020/9837/0/983700a193", "title": "A Collaborative Filtering Framework Based on Variational Autoencoders and Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/cibda/2020/983700a193/1lO1JKZxurS", "parentPublication": { "id": "proceedings/cibda/2020/9837/0", "title": "2020 International Conference on Computer Information and Big Data Applications (CIBDA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09151087", "title": "Structure Preserving Compressive Sensing MRI Reconstruction using Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09151087/1lPH4HitLwI", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2020/9274/0/927400a001", "title": "Why are Generative Adversarial Networks so Fascinating and Annoying?", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2020/927400a001/1p2VAkdrCXC", "parentPublication": { "id": "proceedings/sibgrapi/2020/9274/0", "title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmEIaAvaV2", "doi": "10.1109/ICCV48922.2021.00632", "title": "DRB-GAN: A Dynamic ResBlock Generative Adversarial Network for Artistic Style Transfer", "normalizedTitle": "DRB-GAN: A Dynamic ResBlock Generative Adversarial Network for Artistic Style Transfer", "abstract": "The paper proposes a Dynamic ResBlock Generative Adversarial Network (DRB-GAN) for artistic style transfer. The style code is modeled as the shared parameters for Dynamic ResBlocks connecting both the style encoding network and the style transfer network. In the style encoding network, a style class-aware attention mechanism is used to attend the style feature representation for generating the style codes. In the style transfer network, multiple Dynamic ResBlocks are designed to integrate the style code and the extracted CNN semantic feature and then feed into the spatial window Layer-Instance Normalization (SW-LIN) decoder, which enables high-quality synthetic images with artistic style transfer. Moreover, the style collection conditional discriminator is designed to equip our DRB-GAN model with abilities for both arbitrary style transfer and collection style transfer during the training stage. No matter for arbitrary style transfer or collection style transfer, extensive experiments strongly demonstrate that our proposed DRB-GAN outperforms state-of-the-art methods and exhibits its superior performance in terms of visual quality and efficiency. Our source code is available at https://github.com/xuwenju123/DRB-GAN.", "abstracts": [ { "abstractType": "Regular", "content": "The paper proposes a Dynamic ResBlock Generative Adversarial Network (DRB-GAN) for artistic style transfer. The style code is modeled as the shared parameters for Dynamic ResBlocks connecting both the style encoding network and the style transfer network. In the style encoding network, a style class-aware attention mechanism is used to attend the style feature representation for generating the style codes. In the style transfer network, multiple Dynamic ResBlocks are designed to integrate the style code and the extracted CNN semantic feature and then feed into the spatial window Layer-Instance Normalization (SW-LIN) decoder, which enables high-quality synthetic images with artistic style transfer. Moreover, the style collection conditional discriminator is designed to equip our DRB-GAN model with abilities for both arbitrary style transfer and collection style transfer during the training stage. No matter for arbitrary style transfer or collection style transfer, extensive experiments strongly demonstrate that our proposed DRB-GAN outperforms state-of-the-art methods and exhibits its superior performance in terms of visual quality and efficiency. Our source code is available at https://github.com/xuwenju123/DRB-GAN.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The paper proposes a Dynamic ResBlock Generative Adversarial Network (DRB-GAN) for artistic style transfer. The style code is modeled as the shared parameters for Dynamic ResBlocks connecting both the style encoding network and the style transfer network. In the style encoding network, a style class-aware attention mechanism is used to attend the style feature representation for generating the style codes. In the style transfer network, multiple Dynamic ResBlocks are designed to integrate the style code and the extracted CNN semantic feature and then feed into the spatial window Layer-Instance Normalization (SW-LIN) decoder, which enables high-quality synthetic images with artistic style transfer. Moreover, the style collection conditional discriminator is designed to equip our DRB-GAN model with abilities for both arbitrary style transfer and collection style transfer during the training stage. No matter for arbitrary style transfer or collection style transfer, extensive experiments strongly demonstrate that our proposed DRB-GAN outperforms state-of-the-art methods and exhibits its superior performance in terms of visual quality and efficiency. Our source code is available at https://github.com/xuwenju123/DRB-GAN.", "fno": "281200g363", "keywords": [ "Training", "Visualization", "Codes", "Computational Modeling", "Semantics", "Generative Adversarial Networks", "Feature Extraction", "Neural Generative Models", "Adversarial Learning", "Image And Video Synthesis" ], "authors": [ { "affiliation": "InnoPeak Technology Inc,OPPO US Research Center,Palo Alto,CA,USA", "fullName": "Wenju Xu", "givenName": "Wenju", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "JD Finance America Corporation,Mountain View,CA,USA", "fullName": "Chengjiang Long", "givenName": "Chengjiang", "surname": "Long", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Calgary,Department of Geomatics Engineering,Alberta,Canada", "fullName": "Ruisheng Wang", "givenName": "Ruisheng", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Ryerson University,Department of Computer Science,Toronto,ON,Canada", "fullName": "Guanghui Wang", "givenName": "Guanghui", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "6363-6372", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200g353", "articleId": "1BmILJHle2A", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200g373", "articleId": "1BmFhhduyVG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2019/1975/0/197500a848", "title": "Style and Content Disentanglement in Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/wacv/2019/197500a848/18j8FazPuwM", "parentPublication": { "id": "proceedings/wacv/2019/1975/0", "title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cacml/2022/8290/0/829000a181", "title": "Artistic Text Effect Transfer with Conditonal Generative Adversarial Network", "doi": null, "abstractUrl": "/proceedings-article/cacml/2022/829000a181/1FY1nhwXzOg", "parentPublication": { "id": "proceedings/cacml/2022/8290/0", "title": "2022 Asia Conference on Algorithms, Computing and Machine Learning (CACML)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859510", "title": "Tachiegan: Generative Adversarial Networks for Tachie Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859510/1G4F4aecPqE", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a910", "title": "Semantic GAN: Application for Cross-Domain Image Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a910/1cdOFuQjESA", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400a178", "title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2020/9228/0/922800a694", "title": "Realistic Style-Transfer Generative Adversarial Network With a Weight-Sharing Strategy", "doi": null, "abstractUrl": "/proceedings-article/ictai/2020/922800a694/1pP3zTKL8xq", "parentPublication": { "id": "proceedings/ictai/2020/9228/0", "title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/07/09339900", "title": "Shape-Matching GAN++: Scale Controllable Dynamic Artistic Text Style Transfer", "doi": null, "abstractUrl": "/journal/tp/2022/07/09339900/1qL54N4119S", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifeea/2020/9627/0/962700a451", "title": "Artistic Text Style Transfer based on Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/ifeea/2020/962700a451/1rvCEvTEN7W", "parentPublication": { "id": "proceedings/ifeea/2020/9627/0", "title": "2020 7th International Forum on Electrical Engineering and Automation (IFEEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09382902", "title": "GAN-Based Multi-Style Photo Cartoonization", "doi": null, "abstractUrl": "/journal/tg/2022/10/09382902/1saZlQvlf0Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2021/1865/0/186500a063", "title": "Multi-Style Transfer Generative Adversarial Network for Text Images", "doi": null, "abstractUrl": "/proceedings-article/mipr/2021/186500a063/1xPsjXkDspq", "parentPublication": { "id": "proceedings/mipr/2021/1865/0", "title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KaHFgmCCXK", "title": "2022 IEEE International Symposium on Multimedia (ISM)", "acronym": "ism", "groupId": "1001094", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KaHHXHrp7O", "doi": "10.1109/ISM55400.2022.00062", "title": "Conditional GAN for Small Datasets", "normalizedTitle": "Conditional GAN for Small Datasets", "abstract": "Generating high-quality images with Generative Adversarial Networks (GANs) generally requires 100k+ training data. The required data amount is too large when we consider using GANs to support professional art creators; they need to follow the specific art style while interactively controlling the results along with their theme. This research proposes Conditional FastGAN, which adds a condition vector to FastGAN to produce high-quality different domain images even on small datasets. In our experiments, the MUCT Face Database of images consisting of face photos in various orientations and manga face images extracted from Osamu Tezuka&#x2019;s works were used as a small-scale dataset. Fine-tuning with manga face images to a model pre-trained with photo-only face images enabled control of the generated images according to explicit conditions, such as photos and manga, for the same latent variables. In addition, the proposed method improved the FID score by 2.55 from the original FastGAN in the case of manga face generation.", "abstracts": [ { "abstractType": "Regular", "content": "Generating high-quality images with Generative Adversarial Networks (GANs) generally requires 100k+ training data. The required data amount is too large when we consider using GANs to support professional art creators; they need to follow the specific art style while interactively controlling the results along with their theme. This research proposes Conditional FastGAN, which adds a condition vector to FastGAN to produce high-quality different domain images even on small datasets. In our experiments, the MUCT Face Database of images consisting of face photos in various orientations and manga face images extracted from Osamu Tezuka&#x2019;s works were used as a small-scale dataset. Fine-tuning with manga face images to a model pre-trained with photo-only face images enabled control of the generated images according to explicit conditions, such as photos and manga, for the same latent variables. In addition, the proposed method improved the FID score by 2.55 from the original FastGAN in the case of manga face generation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Generating high-quality images with Generative Adversarial Networks (GANs) generally requires 100k+ training data. The required data amount is too large when we consider using GANs to support professional art creators; they need to follow the specific art style while interactively controlling the results along with their theme. This research proposes Conditional FastGAN, which adds a condition vector to FastGAN to produce high-quality different domain images even on small datasets. In our experiments, the MUCT Face Database of images consisting of face photos in various orientations and manga face images extracted from Osamu Tezuka’s works were used as a small-scale dataset. Fine-tuning with manga face images to a model pre-trained with photo-only face images enabled control of the generated images according to explicit conditions, such as photos and manga, for the same latent variables. In addition, the proposed method improved the FID score by 2.55 from the original FastGAN in the case of manga face generation.", "fno": "717200a278", "keywords": [ "Art", "Face Recognition", "Feature Extraction", "Condition Vector", "Conditional Fast GAN", "Face Photos", "GA Ns", "Generative Adversarial Networks", "High Quality Different Domain Images", "High Quality Images", "Manga Face Generation", "Manga Face Images", "Original Fast GAN", "Photo Only Face Images", "Professional Art Creators", "Required Data Amount", "Small Scale Dataset", "Specific Art Style", "Training Data", "Training", "Art", "Image Synthesis", "Databases", "Training Data", "Generative Adversarial Networks", "Generators", "Conditional GA Ns", "Deep Generative Model", "Manga" ], "authors": [ { "affiliation": "Keio University,Graduate School of Science and Technology,Kanagawa,Japan", "fullName": "Komei Hiruta", "givenName": "Komei", "surname": "Hiruta", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,Graduate School of Science and Technology,Kanagawa,Japan", "fullName": "Ryusuke Saito", "givenName": "Ryusuke", "surname": "Saito", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,Graduate School of Science and Technology,Kanagawa,Japan", "fullName": "Taro Hatakeyama", "givenName": "Taro", "surname": "Hatakeyama", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,Graduate School of Science and Technology,Kanagawa,Japan", "fullName": "Atsushi Hashimoto", "givenName": "Atsushi", "surname": "Hashimoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,Faculty of Science and Technology,Kanagawa,Japan", "fullName": "Satoshi Kurihara", "givenName": "Satoshi", "surname": "Kurihara", "__typename": "ArticleAuthorType" } ], "idPrefix": "ism", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "278-281", "year": "2022", "issn": null, "isbn": "978-1-6654-7172-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "717200a273", "articleId": "1KaHN4jQBzy", "__typename": "AdjacentArticleType" }, "next": { "fno": "717200a283", "articleId": "1KaHJKlSxR6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000a821", "title": "FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a821/17D45Xh13pk", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbase/2021/2709/0/270900a601", "title": "The comparison between Conditional Generative Adversarial Nets and Deep Convolutional Generative Adversarial Network, and its GUI-related application", "doi": null, "abstractUrl": "/proceedings-article/icbase/2021/270900a601/1AH8fU796co", "parentPublication": { "id": "proceedings/icbase/2021/2709/0", "title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4074", "title": "Towards Discovery and Attribution of Open-world GAN Generated Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4074/1BmKSlbvd0Q", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2022/5478/0/547800a050", "title": "VCL-GAN: A Variational Contrastive Learning Generative Adversarial Network for Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icdh/2022/547800a050/1JeDs4zZhqE", "parentPublication": { "id": "proceedings/icdh/2022/5478/0", "title": "2022 9th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2019/1246/0/124600a866", "title": "MD-GAN: Multi-Discriminator Generative Adversarial Networks for Distributed Datasets", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2019/124600a866/1cYhReGG1YA", "parentPublication": { "id": "proceedings/ipdps/2019/1246/0", "title": "2019 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300e501", "title": "Seeing What a GAN Cannot Generate", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300e501/1hVlGKg2532", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2020/4272/0/427200a314", "title": "Face Aging with Conditional Generative Adversarial Network Guided by Ranking-CNN", "doi": null, "abstractUrl": "/proceedings-article/mipr/2020/427200a314/1mAa24QPsEU", "parentPublication": { "id": "proceedings/mipr/2020/4272/0", "title": "2020 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700d188", "title": "LT-GAN: Self-Supervised GAN with Latent Transformation Detection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700d188/1uqGqafzEOI", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900m2162", "title": "Efficient Conditional GAN Transfer with Knowledge Propagation across Classes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900m2162/1yeIW6wG7Ha", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a672", "title": "GAN Prior Embedded Network for Blind Face Restoration in the Wild", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a672/1yeIXi2vdh6", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cdOEoawzMQ", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cdOFuQjESA", "doi": "10.1109/ICME.2019.00161", "title": "Semantic GAN: Application for Cross-Domain Image Style Transfer", "normalizedTitle": "Semantic GAN: Application for Cross-Domain Image Style Transfer", "abstract": "Image style transfer has attracted much attention from many fields and received promising performance. However, style transfer in the cross-domain field, e.g., the transfer between near-infrared and visible light images, is rarely studied. In the cross-domain image style transfer, one key issue is mismatching problem existing in the generated semantic regions. In this paper, we propose a novel model of Semantic GAN, which integrates the semantic guidance and the recent CycleGAN. In particular, we present a semantic style loss with Gram matrix to well preserve the semantic information in the generated images. The proposed Semantic GAN can control the transfer in the right way with semantic masks and solve the mismatching problem. We apply our approach to two outdoor scene datasets to evaluate the performance of all competing methods. The experimental results show that our approach outperforms previous methods in addressing the mismatching problem and providing a good quality result.", "abstracts": [ { "abstractType": "Regular", "content": "Image style transfer has attracted much attention from many fields and received promising performance. However, style transfer in the cross-domain field, e.g., the transfer between near-infrared and visible light images, is rarely studied. In the cross-domain image style transfer, one key issue is mismatching problem existing in the generated semantic regions. In this paper, we propose a novel model of Semantic GAN, which integrates the semantic guidance and the recent CycleGAN. In particular, we present a semantic style loss with Gram matrix to well preserve the semantic information in the generated images. The proposed Semantic GAN can control the transfer in the right way with semantic masks and solve the mismatching problem. We apply our approach to two outdoor scene datasets to evaluate the performance of all competing methods. The experimental results show that our approach outperforms previous methods in addressing the mismatching problem and providing a good quality result.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Image style transfer has attracted much attention from many fields and received promising performance. However, style transfer in the cross-domain field, e.g., the transfer between near-infrared and visible light images, is rarely studied. In the cross-domain image style transfer, one key issue is mismatching problem existing in the generated semantic regions. In this paper, we propose a novel model of Semantic GAN, which integrates the semantic guidance and the recent CycleGAN. In particular, we present a semantic style loss with Gram matrix to well preserve the semantic information in the generated images. The proposed Semantic GAN can control the transfer in the right way with semantic masks and solve the mismatching problem. We apply our approach to two outdoor scene datasets to evaluate the performance of all competing methods. The experimental results show that our approach outperforms previous methods in addressing the mismatching problem and providing a good quality result.", "fno": "955200a910", "keywords": [ "Feature Extraction", "Image Classification", "Image Fusion", "Image Motion Analysis", "Image Representation", "Image Segmentation", "Image Texture", "Neural Nets", "Object Detection", "Video Signal Processing", "Semantic GAN", "Cross Domain Image Style Transfer", "Cross Domain Field", "Near Infrared Images", "Visible Light Images", "Semantic Guidance", "Semantic Style Loss", "Semantic Information", "Semantic Masks", "Cycle GAN", "Semantics", "Gallium Nitride", "Image Segmentation", "Generative Adversarial Networks", "Generators", "Training", "Image Synthesis", "Semantic GAN", "Style Transfer", "Cross Domain", "Mismatching Problem" ], "authors": [ { "affiliation": "Sun Yat-sen University, China", "fullName": "Pengfei Li", "givenName": "Pengfei", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Sun Yat-sen University, China; Key Laboratory of Machine Intelligence and Advanced Computing (SYSU)", "fullName": "Meng Yang", "givenName": "Meng", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-07-01T00:00:00", "pubType": "proceedings", "pages": "910-915", "year": "2019", "issn": null, "isbn": "978-1-5386-9552-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "955200a904", "articleId": "1cdOVtOx8ha", "__typename": "AdjacentArticleType" }, "next": { "fno": "955200a916", "articleId": "1cdOGuvOsXC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2018/3788/0/08546172", "title": "Artsy-GAN: A style transfer system with improved quality, diversity and performance", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08546172/17D45VTRoxq", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a506", "title": "Style Transfer for Anime Sketches with Enhanced Residual U-net and Auxiliary Classifier GAN", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a506/17D45WHONmU", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h564", "title": "Multi-content GAN for Few-Shot Font Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h564/17D45WXIkCO", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000a040", "title": "PairedCycleGAN: Asymmetric Style Transfer for Applying and Removing Makeup", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000a040/17D45WnnFWY", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g363", "title": "DRB-GAN: A Dynamic ResBlock Generative Adversarial Network for Artistic Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g363/1BmEIaAvaV2", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2019/5527/0/552700a068", "title": "User Input Based Style Transfer While Retaining Facial Attributes", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2019/552700a068/1fHjJrdShOw", "parentPublication": { "id": "proceedings/bigmm/2019/5527/0", "title": "2019 IEEE Fifth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400a178", "title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d169", "title": "Generative Modelling of Semantic Segmentation Data in the Fashion Domain", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d169/1i5mv3eCRl6", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/07/09339900", "title": "Shape-Matching GAN++: Scale Controllable Dynamic Artistic Text Style Transfer", "doi": null, "abstractUrl": "/journal/tp/2022/07/09339900/1qL54N4119S", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/10/09382902", "title": "GAN-Based Multi-Style Photo Cartoonization", "doi": null, "abstractUrl": "/journal/tg/2022/10/09382902/1saZlQvlf0Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }