data
dict
{ "proceeding": { "id": "12OmNxwENvc", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "acronym": "icassp", "groupId": "1000002", "volume": "3", "displayVolume": "3", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNAXxXdU", "doi": "10.1109/ICASSP.2004.1326466", "title": "Robust two-camera tracking using homography", "normalizedTitle": "Robust two-camera tracking using homography", "abstract": "The paper introduces a two view tracking method which uses the homography relation between the two views to handle occlusions. An adaptive appearance-based model is incorporated in a particle filter to realize robust visual tracking. Occlusion is detected using robust statistics. When there is occlusion in one view, the homography from this view to other views is estimated from previous tracking results and used to infer the correct transformation for the occluded view. Experimental results show the robustness of the two view tracker.", "abstracts": [ { "abstractType": "Regular", "content": "The paper introduces a two view tracking method which uses the homography relation between the two views to handle occlusions. An adaptive appearance-based model is incorporated in a particle filter to realize robust visual tracking. Occlusion is detected using robust statistics. When there is occlusion in one view, the homography from this view to other views is estimated from previous tracking results and used to infer the correct transformation for the occluded view. Experimental results show the robustness of the two view tracker.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The paper introduces a two view tracking method which uses the homography relation between the two views to handle occlusions. An adaptive appearance-based model is incorporated in a particle filter to realize robust visual tracking. Occlusion is detected using robust statistics. When there is occlusion in one view, the homography from this view to other views is estimated from previous tracking results and used to infer the correct transformation for the occluded view. Experimental results show the robustness of the two view tracker.", "fno": "01326466", "keywords": [ "Optical Tracking", "Nonlinear Filters", "Monte Carlo Methods", "Video Signal Processing", "Image Sequences", "Two Camera Tracking", "Homography", "Two View Tracking Method", "Occlusions", "Particle Filter", "Nonlinear Filter", "Visual Tracking", "Robust Statistics", "Sequential Monte Carlo Framework", "Video Frame Processing", "Robustness", "Target Tracking", "Cameras", "Statistics", "Maximum Likelihood Detection", "Bayesian Methods", "Collaboration", "Government", "Fuses", "Motion Estimation" ], "authors": [ { "affiliation": "Dept. of Electr. & Comput. Eng., Maryland Univ., College Park, MD, USA", "fullName": "Zhanfeng Yue", "givenName": null, "surname": "Zhanfeng Yue", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. & Comput. Eng., Maryland Univ., College Park, MD, USA", "fullName": "S.K. Zhou", "givenName": "S.K.", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Electr. & Comput. Eng., Maryland Univ., College Park, MD, USA", "fullName": "R. Chellappa", "givenName": "R.", "surname": "Chellappa", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "iii-1-4 vol.3", "year": "2004", "issn": "1520-6149", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01326465", "articleId": "12OmNxYL5b9", "__typename": "AdjacentArticleType" }, "next": { "fno": "01326467", "articleId": "12OmNrHB1Xf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2009/3883/0/3883a349", "title": "A Novel Multi-planar Homography Constraint Algorithm for Robust Multi-people Location with Severe Occlusion", "doi": null, "abstractUrl": "/proceedings-article/icig/2009/3883a349/12OmNCd2rEx", "parentPublication": { "id": "proceedings/icig/2009/3883/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2010/4264/0/4264a294", "title": "Multi Camera-Based Person Tracking Using Region Covariance and Homography Constraint", "doi": null, "abstractUrl": "/proceedings-article/avss/2010/4264a294/12OmNCdk2Vm", "parentPublication": { "id": "proceedings/avss/2010/4264/0", "title": "2010 7th IEEE International Conference on Advanced Video and Signal Based Surveillance", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1994/5825/0/00323889", "title": "A maximum likelihood N-camera stereo algorithm", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1994/00323889/12OmNqGA5aB", "parentPublication": { "id": "proceedings/cvpr/1994/5825/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981790", "title": "Occlusion robust multi-camera face tracking", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981790/12OmNqyUUFo", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2005/9385/0/01577318", "title": "A modular multi-camera framework for team sports tracking", "doi": null, "abstractUrl": "/proceedings-article/avss/2005/01577318/12OmNrJiD04", "parentPublication": { "id": "proceedings/avss/2005/9385/0", "title": "IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2010/4271/0/4271a480", "title": "Multiple Homography Estimation with Full Consistency Constraints", "doi": null, "abstractUrl": "/proceedings-article/dicta/2010/4271a480/12OmNvjQ8Qb", "parentPublication": { "id": "proceedings/dicta/2010/4271/0", "title": "2010 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2006/0366/0/04036582", "title": "Decentralized Multiple Camera Multiple Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/icme/2006/04036582/12OmNyqzLXB", "parentPublication": { "id": "proceedings/icme/2006/0366/0", "title": "2006 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031a001", "title": "Extended Online HECOL Based Multi-camera Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031a001/12OmNzC5TfY", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmlc/2003/7865/5/01260052", "title": "A new algorithm for 3D projective reconstruction based on infinite homography", "doi": null, "abstractUrl": "/proceedings-article/icmlc/2003/01260052/12OmNzuZUvD", "parentPublication": { "id": "proceedings/icmlc/2003/7865/1", "title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800g079", "title": "Robust Homography Estimation via Dual Principal Component Pursuit", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800g079/1m3ob6nHNWo", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8wTfL", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBtl1Ez", "doi": "10.1109/ICPR.2008.4761044", "title": "Robust tracking of spatial related components", "normalizedTitle": "Robust tracking of spatial related components", "abstract": "This paper introduces a hierarchical approach for multi-component tracking, where the object-to-be-tracked is modeled as a group of spatial related parts. We propose to use a robust particle filtering framework for tracking the individual components and outline how the spatial coherency between the parts can be efficiently integrated by analyzing a two-level hierarchy of particle filters. Including spatial information allows to handle common tracking problems like occlusions, clutter or blur. Furthermore, the dynamic calculation of particle set uncertainties allows a dynamic adaption of stiffness values for the spatial model to e. g. force occluded parts to stay in spatial relation. The experimental section proves the robustness of the proposed tracker on challenging sequences of the VIVID-PETS database.", "abstracts": [ { "abstractType": "Regular", "content": "This paper introduces a hierarchical approach for multi-component tracking, where the object-to-be-tracked is modeled as a group of spatial related parts. We propose to use a robust particle filtering framework for tracking the individual components and outline how the spatial coherency between the parts can be efficiently integrated by analyzing a two-level hierarchy of particle filters. Including spatial information allows to handle common tracking problems like occlusions, clutter or blur. Furthermore, the dynamic calculation of particle set uncertainties allows a dynamic adaption of stiffness values for the spatial model to e. g. force occluded parts to stay in spatial relation. The experimental section proves the robustness of the proposed tracker on challenging sequences of the VIVID-PETS database.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper introduces a hierarchical approach for multi-component tracking, where the object-to-be-tracked is modeled as a group of spatial related parts. We propose to use a robust particle filtering framework for tracking the individual components and outline how the spatial coherency between the parts can be efficiently integrated by analyzing a two-level hierarchy of particle filters. Including spatial information allows to handle common tracking problems like occlusions, clutter or blur. Furthermore, the dynamic calculation of particle set uncertainties allows a dynamic adaption of stiffness values for the spatial model to e. g. force occluded parts to stay in spatial relation. The experimental section proves the robustness of the proposed tracker on challenging sequences of the VIVID-PETS database.", "fno": "04761044", "keywords": [ "Computer Vision", "Particle Filtering Numerical Methods", "Robust Tracking", "Spatial Related Components", "Multi Component Tracking", "Robust Particle Filtering Framework", "Particle Set Uncertainties", "Stiffness Values", "VIVID PETS Database", "Robustness", "Particle Filters", "Particle Tracking", "Filtering", "State Space Methods", "Spatial Coherence", "Uncertainty", "Computational Complexity", "Computer Graphics", "Spatial Databases" ], "authors": [ { "affiliation": "Institute for Computer Graphics and Vision, Graz University of Technology, Austria", "fullName": "Thomas Mauthner", "givenName": "Thomas", "surname": "Mauthner", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute for Computer Graphics and Vision, Graz University of Technology, Austria", "fullName": "Michael Donoser", "givenName": "Michael", "surname": "Donoser", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute for Computer Graphics and Vision, Graz University of Technology, Austria", "fullName": "Horst Bischof", "givenName": "Horst", "surname": "Bischof", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1051-4651", "isbn": "978-1-4244-2174-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04761043", "articleId": "12OmNBA9oBJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "04761045", "articleId": "12OmNAio71c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmlc/2003/7865/2/01259628", "title": "A new smoothing particle filter for tracking a maneuvering target", "doi": null, "abstractUrl": "/proceedings-article/icmlc/2003/01259628/12OmNAXglU5", "parentPublication": { "id": "proceedings/icmlc/2003/7865/2", "title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2004/2159/5/215950070", "title": "A Particle Filter without Dynamics for Robust 3D Face Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2004/215950070/12OmNC8uRzn", "parentPublication": { "id": "proceedings/cvprw/2004/2159/5", "title": "2004 Conference on Computer Vision and Pattern Recognition Workshop", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2009/3804/1/3804a250", "title": "Covariance Tracking via Geometric Particle Filtering", "doi": null, "abstractUrl": "/proceedings-article/icicta/2009/3804a250/12OmNvnfkdK", "parentPublication": { "id": "proceedings/icicta/2009/3804/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2004/2244/0/01410404", "title": "Robust color-based tracking", "doi": null, "abstractUrl": "/proceedings-article/icig/2004/01410404/12OmNwogh7J", "parentPublication": { "id": "proceedings/icig/2004/2244/0", "title": "Proceedings. Third International Conference on Image and Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761153", "title": "SVD based Kalman particle filter for robust visual tracking", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761153/12OmNyRPgKl", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isda/2008/3382/2/3382b482", "title": "Robust Object Tracking via Hierarchical Particle Filter", "doi": null, "abstractUrl": "/proceedings-article/isda/2008/3382b482/12OmNyr8YrA", "parentPublication": { "id": "proceedings/isda/2008/3382/2", "title": "2008 Eighth International Conference on Intelligent Systems Design and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfn/2010/3940/0/3940a020", "title": "A Robust Particle Filter for People Tracking", "doi": null, "abstractUrl": "/proceedings-article/icfn/2010/3940a020/12OmNz61cVP", "parentPublication": { "id": "proceedings/icfn/2010/3940/0", "title": "Future Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109d508", "title": "Receding Horizon Estimation for Hybrid Particle Filters and Application for Robust Visual Tracking", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d508/12OmNz6iOjm", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206661", "title": "Memory-based Particle Filter for face pose tracking robust under complex dynamics", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206661/12OmNzlD9sq", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2007/12/i2157", "title": "Robust Object Tracking Via Online Dynamic Spatial Bias Appearance Models", "doi": null, "abstractUrl": "/journal/tp/2007/12/i2157/13rRUxcbnDD", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNC1GueH", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNvonIKs", "doi": "", "title": "A modified KLT multiple objects tracking framework based on global segmentation and adaptive template", "normalizedTitle": "A modified KLT multiple objects tracking framework based on global segmentation and adaptive template", "abstract": "This paper presents a modified Kanade-Lucas-Tomasi (KLT) tracking framework for multiple objects tracking applications. First, the framework includes a global pixel-level probabilistic model and an adaptive RGB template model to modify traditional KLT tracker more robust to track multiple objects and partial occlusions. Meanwhile, a Merge and Split algorithm is introduced in the proposed framework to track complete occlusions. The advantage of our method is demonstrated on a variety of challenging video sequences.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a modified Kanade-Lucas-Tomasi (KLT) tracking framework for multiple objects tracking applications. First, the framework includes a global pixel-level probabilistic model and an adaptive RGB template model to modify traditional KLT tracker more robust to track multiple objects and partial occlusions. Meanwhile, a Merge and Split algorithm is introduced in the proposed framework to track complete occlusions. The advantage of our method is demonstrated on a variety of challenging video sequences.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a modified Kanade-Lucas-Tomasi (KLT) tracking framework for multiple objects tracking applications. First, the framework includes a global pixel-level probabilistic model and an adaptive RGB template model to modify traditional KLT tracker more robust to track multiple objects and partial occlusions. Meanwhile, a Merge and Split algorithm is introduced in the proposed framework to track complete occlusions. The advantage of our method is demonstrated on a variety of challenging video sequences.", "fno": "06460934", "keywords": [ "Hidden Feature Removal", "Image Colour Analysis", "Image Segmentation", "Image Sequences", "Object Tracking", "Probability", "Modified KLT Multiple Object Tracking Framework", "Global Segmentation", "Adaptive Template", "Kanade Lucas Tomasi Tracking Framework", "Global Pixel Level Probabilistic Model", "Adaptive RGB Template Model", "KLT Tracker", "Partial Occlusions", "Merge And Split Algorithm", "Video Sequences", "Target Tracking", "Adaptation Models", "Probabilistic Logic", "Mathematical Model", "Object Tracking", "Robustness", "Principal Component Analysis" ], "authors": [ { "affiliation": "Beijing Institute of Technology", "fullName": "Kang Xue", "givenName": "Kang", "surname": "Xue", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology", "fullName": "Patricio A. Vela", "givenName": "Patricio A.", "surname": "Vela", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Institute of Technology", "fullName": "Yue Liu", "givenName": "Yue", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Institute of Technology", "fullName": "Yongtian Wang", "givenName": "Yongtian", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "3561-3564", "year": "2012", "issn": "1051-4651", "isbn": "978-1-4673-2216-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06460933", "articleId": "12OmNviZlim", "__typename": "AdjacentArticleType" }, "next": { "fno": "06460935", "articleId": "12OmNy6qfRb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/reconfig/2011/4551/0/4551a297", "title": "Improving KLT in Embedded Systems by Processing Oversampling Video Sequence in Real-Time", "doi": null, "abstractUrl": "/proceedings-article/reconfig/2011/4551a297/12OmNA1mbeB", "parentPublication": { "id": "proceedings/reconfig/2011/4551/0", "title": "Reconfigurable Computing and FPGAs, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457465", "title": "Multitarget tracking with a corner-based particle filter", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457465/12OmNBJNL21", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2016/2535/0/2535a462", "title": "Motion-Based Feature Selection and Adaptive Template Update Strategy for Robust Visual Tracking", "doi": null, "abstractUrl": "/proceedings-article/icisce/2016/2535a462/12OmNqHqSqF", "parentPublication": { "id": "proceedings/icisce/2016/2535/0", "title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2006/2686/0/26860187", "title": "Unscented KLT: nonlinear feature and uncertainty tracking", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2006/26860187/12OmNwc3wso", "parentPublication": { "id": "proceedings/sibgrapi/2006/2686/0", "title": "2006 19th Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2008/3456/0/3456a234", "title": "GPU-Accelerated KLT Tracking with Monte-Carlo-Based Feature Reselection", "doi": null, "abstractUrl": "/proceedings-article/dicta/2008/3456a234/12OmNx5pj2r", "parentPublication": { "id": "proceedings/dicta/2008/3456/0", "title": "2008 Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457608", "title": "Realtime affine-photometric KLT feature tracker on GPU in CUDA framework", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457608/12OmNyKJiBe", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118b298", "title": "A Probabilistic Framework for Multitarget Tracking with Mutual Occlusions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b298/12OmNzEVRUm", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813464", "title": "Tracking facial features under occlusions and recognizing facial expressions in sign language", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813464/12OmNzUPpCO", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2008/3440/1/3440a241", "title": "The Efficient Features for Tracking", "doi": null, "abstractUrl": "/proceedings-article/ictai/2008/3440a241/12OmNzXWZIF", "parentPublication": { "id": "proceedings/ictai/2008/3440/1", "title": "2008 20th IEEE International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvidl/2020/9481/0/948100a044", "title": "Image stabilization algorithm based on KLT motion tracking", "doi": null, "abstractUrl": "/proceedings-article/cvidl/2020/948100a044/1pbe5VlXkti", "parentPublication": { "id": "proceedings/cvidl/2020/9481/0", "title": "2020 International Conference on Computer Vision, Image and Deep Learning (CVIDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrNh0vs", "title": "2013 23rd International Conference on Artificial Reality and Telexistence (ICAT)", "acronym": "icat", "groupId": "1001485", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNx2zjvN", "doi": "10.1109/ICAT.2013.6728915", "title": "Wide area optical user tracking in unconstrained indoor environments", "normalizedTitle": "Wide area optical user tracking in unconstrained indoor environments", "abstract": "In this paper, we present a robust infrared optical 3D position tracking system for wide area indoor environments up to 30m. The system consists of two shutter-synchronized cameras that track multiple targets, which are equipped with infrared light emitting diodes. Our system is able to learn targets as well as to perform extrinsic calibration and 3D position tracking in unconstrained environments, which exhibit occlusions and static as well as locomotive interfering infrared lights. Tracking targets can directly be used for calibration which minimizes the amount of necessary hardware. With the presented approach, limitations of state-of-the-art tracking systems in terms of volume coverage, sensitivity during training and calibration, setup complexity and hardware costs can be minimized. Preliminary results indicate interactive tracking with minimal jitter <; 0.0675mm and 3D point accuracy of <; 9.22mm throughout the entire tracking volume up to 30m.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a robust infrared optical 3D position tracking system for wide area indoor environments up to 30m. The system consists of two shutter-synchronized cameras that track multiple targets, which are equipped with infrared light emitting diodes. Our system is able to learn targets as well as to perform extrinsic calibration and 3D position tracking in unconstrained environments, which exhibit occlusions and static as well as locomotive interfering infrared lights. Tracking targets can directly be used for calibration which minimizes the amount of necessary hardware. With the presented approach, limitations of state-of-the-art tracking systems in terms of volume coverage, sensitivity during training and calibration, setup complexity and hardware costs can be minimized. Preliminary results indicate interactive tracking with minimal jitter <; 0.0675mm and 3D point accuracy of <; 9.22mm throughout the entire tracking volume up to 30m.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a robust infrared optical 3D position tracking system for wide area indoor environments up to 30m. The system consists of two shutter-synchronized cameras that track multiple targets, which are equipped with infrared light emitting diodes. Our system is able to learn targets as well as to perform extrinsic calibration and 3D position tracking in unconstrained environments, which exhibit occlusions and static as well as locomotive interfering infrared lights. Tracking targets can directly be used for calibration which minimizes the amount of necessary hardware. With the presented approach, limitations of state-of-the-art tracking systems in terms of volume coverage, sensitivity during training and calibration, setup complexity and hardware costs can be minimized. Preliminary results indicate interactive tracking with minimal jitter <; 0.0675mm and 3D point accuracy of <; 9.22mm throughout the entire tracking volume up to 30m.", "fno": "06728915", "keywords": [ "Target Tracking", "Cameras", "Calibration", "Three Dimensional Displays", "Robustness", "Accuracy", "Optical Imaging", "I 4 9 Image Processing And Computer Vision Applications", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality", "I 4 8 Image Processing And Computer Vision Scene Analysis Tracking", "Stereo" ], "authors": [ { "affiliation": "Interactive Media Syst. Group, Vienna Univ. of Technol., Vienna, Austria", "fullName": "Annette Mossel", "givenName": "Annette", "surname": "Mossel", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Media Syst. Group, Vienna Univ. of Technol., Vienna, Austria", "fullName": "Hannes Kaufmann", "givenName": "Hannes", "surname": "Kaufmann", "__typename": "ArticleAuthorType" } ], "idPrefix": "icat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-12-01T00:00:00", "pubType": "proceedings", "pages": "108-115", "year": "2013", "issn": null, "isbn": "978-4-904490-11-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06728914", "articleId": "12OmNxwENCV", "__typename": "AdjacentArticleType" }, "next": { "fno": "06728916", "articleId": "12OmNvjyxGc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118b186", "title": "Persistent Tracking for Wide Area Aerial Surveillance", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b186/12OmNAoDilJ", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926676", "title": "Exploring Local Context for Multi-target Tracking in Wide Area Aerial Surveillance", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926676/12OmNC0y5Ek", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460047", "title": "Evaluation of user-centric optical see-through head-mounted display calibration using a leap motion controller", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460047/12OmNrJRPdz", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643601", "title": "Large area indoor tracking for industrial augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643601/12OmNxvwoTv", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcis/2010/4304/2/4304b370", "title": "Single Active Infrared Target Image Tracking System in the Indoor Environment", "doi": null, "abstractUrl": "/proceedings-article/gcis/2010/4304b370/12OmNyqRn6c", "parentPublication": { "id": "proceedings/gcis/2010/4304/2", "title": "2010 Second WRI Global Congress on Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icftic/2022/2195/0/10075167", "title": "Infrared Patch-Image Model For Small Target Detection and Tracking in Space Activities", "doi": null, "abstractUrl": "/proceedings-article/icftic/2022/10075167/1LRl8NCAXPq", "parentPublication": { "id": "proceedings/icftic/2022/2195/0", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsgea/2019/4463/0/446300a452", "title": "Issues on Infrared Dim Small Target Detection and Tracking", "doi": null, "abstractUrl": "/proceedings-article/icsgea/2019/446300a452/1eXatvuTl8Q", "parentPublication": { "id": "proceedings/icsgea/2019/4463/0", "title": "2019 International Conference on Smart Grid and Electrical Automation (ICSGEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2020/7081/0/708100a610", "title": "Research on Infrared Small Target Tracking Method", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2020/708100a610/1iERGxPRDoI", "parentPublication": { "id": "proceedings/icmtma/2020/7081/0", "title": "2020 12th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150868", "title": "A Real-time Robust Approach for Tracking UAVs in Infrared Videos", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150868/1lPH00WOTra", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise/2020/2261/0/226100a277", "title": "Infrared Target Tracking Algorithm Based on Bernoulli Filter and Support Vector Machine", "doi": null, "abstractUrl": "/proceedings-article/icise/2020/226100a277/1tnYk6IFHYk", "parentPublication": { "id": "proceedings/icise/2020/2261/0", "title": "2020 International Conference on Information Science and Education (ICISE-IE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYoKmw", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdVgZ2", "doi": "10.1109/ISMAR.2013.6671768", "title": "Simultaneous 3D tracking and reconstruction on a mobile phone", "normalizedTitle": "Simultaneous 3D tracking and reconstruction on a mobile phone", "abstract": "A novel framework for joint monocular 3D tracking and reconstruction is described that can handle untextured objects, occlusions, motion blur, changing background and imperfect lighting, and that can run at frame rate on a mobile phone. The method runs in parallel (i) level set based pose estimation and (ii) continuous max flow based shape optimisation. By avoiding a global computation of distance transforms typically used in level set methods, tracking rates here exceed 100Hz and 20Hz on a desktop and mobile phone, respectively, without needing a GPU. Tracking ambiguities are reduced by augmenting orientation information from the phone's inertial sensor. Reconstruction involves probabilistic integration of the 2D image statistics from keyframes into a 3D volume. Per-voxel posteriors are used instead of the standard likelihoods, giving increased accuracy and robustness. Shape coherency and compactness is then imposed using a total variational approach solved using globally optimal continuous max flow.", "abstracts": [ { "abstractType": "Regular", "content": "A novel framework for joint monocular 3D tracking and reconstruction is described that can handle untextured objects, occlusions, motion blur, changing background and imperfect lighting, and that can run at frame rate on a mobile phone. The method runs in parallel (i) level set based pose estimation and (ii) continuous max flow based shape optimisation. By avoiding a global computation of distance transforms typically used in level set methods, tracking rates here exceed 100Hz and 20Hz on a desktop and mobile phone, respectively, without needing a GPU. Tracking ambiguities are reduced by augmenting orientation information from the phone's inertial sensor. Reconstruction involves probabilistic integration of the 2D image statistics from keyframes into a 3D volume. Per-voxel posteriors are used instead of the standard likelihoods, giving increased accuracy and robustness. Shape coherency and compactness is then imposed using a total variational approach solved using globally optimal continuous max flow.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel framework for joint monocular 3D tracking and reconstruction is described that can handle untextured objects, occlusions, motion blur, changing background and imperfect lighting, and that can run at frame rate on a mobile phone. The method runs in parallel (i) level set based pose estimation and (ii) continuous max flow based shape optimisation. By avoiding a global computation of distance transforms typically used in level set methods, tracking rates here exceed 100Hz and 20Hz on a desktop and mobile phone, respectively, without needing a GPU. Tracking ambiguities are reduced by augmenting orientation information from the phone's inertial sensor. Reconstruction involves probabilistic integration of the 2D image statistics from keyframes into a 3D volume. Per-voxel posteriors are used instead of the standard likelihoods, giving increased accuracy and robustness. Shape coherency and compactness is then imposed using a total variational approach solved using globally optimal continuous max flow.", "fno": "06671768", "keywords": [ "Three Dimensional Displays", "Shape", "Image Reconstruction", "Mobile Handsets", "Optimization", "Cameras", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "Univ. of Oxford, Oxford, UK", "fullName": "Victor Adrian Prisacariu", "givenName": "Victor Adrian", "surname": "Prisacariu", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Oxford, Oxford, UK", "fullName": "Olaf Kahler", "givenName": "Olaf", "surname": "Kahler", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Oxford, Oxford, UK", "fullName": "David W. Murray", "givenName": "David W.", "surname": "Murray", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Adelaide, Adelaide, SA, Australia", "fullName": "Ian D. Reid", "givenName": "Ian D.", "surname": "Reid", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "89-98", "year": "2013", "issn": null, "isbn": "978-1-4799-2869-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06671767", "articleId": "12OmNCdk2Ok", "__typename": "AdjacentArticleType" }, "next": { "fno": "06671769", "articleId": "12OmNxymo5C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ecbs/2010/4005/0/4005a403", "title": "Visual Tracking Based on 3D Probabilistic Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ecbs/2010/4005a403/12OmNCbCrT7", "parentPublication": { "id": "proceedings/ecbs/2010/4005/0", "title": "Engineering of Computer-Based Systems, IEEE International Conference on the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890240", "title": "Efficient pose tracking on mobile phones with 3D points grouping", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890240/12OmNs59JGX", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457495", "title": "Mobile phone-based 3D modeling framework for instant interaction", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457495/12OmNvlPkGo", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a711", "title": "On-line Object Reconstruction and Tracking for 3D Interaction", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a711/12OmNyGbIiN", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477629", "title": "Mobile phone and cloud — A dream team for 3D reconstruction", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477629/12OmNyQYtp6", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/11/07165662", "title": "MobileFusion: Real-Time Volumetric Surface Reconstruction and Dense Tracking on Mobile Phones", "doi": null, "abstractUrl": "/journal/tg/2015/11/07165662/13rRUwInv4q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/05/06892950", "title": "Real-Time 3D Tracking and Reconstruction on Mobile Phones", "doi": null, "abstractUrl": "/journal/tg/2015/05/06892950/13rRUwInvyC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800b259", "title": "Through the Looking Glass: Neural 3D Reconstruction of Transparent Shapes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800b259/1m3nkX25xyE", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09201064", "title": "Mobile3DRecon: Real-time Monocular 3D Reconstruction on a Mobile Phone", "doi": null, "abstractUrl": "/journal/tg/2020/12/09201064/1niUpdweh2g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09314030", "title": "3D Curve Creation on and Around Physical Objects With Mobile AR", "doi": null, "abstractUrl": "/journal/tg/2022/08/09314030/1q8Ufya8xj2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirq", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdVh0E", "doi": "10.1109/ISMAR.2012.6402533", "title": "Kinectrack: Agile 6-DoF tracking using a projected dot pattern", "normalizedTitle": "Kinectrack: Agile 6-DoF tracking using a projected dot pattern", "abstract": "We present Kinectrack, a new six degree-of-freedom (6-DoF) tracker which allows real-time and low-cost pose estimation using only commodity hardware. We decouple the dot pattern emitter and IR camera of the Kinect. Keeping the camera fixed and moving the IR emitter in the environment, we recover the 6-DoF pose of the emitter by matching the observed dot pattern in the field-of-view of the camera to a pre-captured reference image. We propose a novel matching technique to obtain dot pattern correspondences efficiently in wide- and adaptive-baseline scenarios. We also propose an auto-calibration method to obtain the camera intrinsics and dot pattern reference image. The performance of Kinectrack is evaluated and the rotational and translational accuracy of the system is measured relative to ground truth for both planar and multi-planar scene geometry. Our system can simultaneously recover the 6-DoF pose of the device and also recover piecewise planar 3D scene structure, and can be used as a low-cost method for tracking a device without any on-board computation, with small size and only simple electronics.", "abstracts": [ { "abstractType": "Regular", "content": "We present Kinectrack, a new six degree-of-freedom (6-DoF) tracker which allows real-time and low-cost pose estimation using only commodity hardware. We decouple the dot pattern emitter and IR camera of the Kinect. Keeping the camera fixed and moving the IR emitter in the environment, we recover the 6-DoF pose of the emitter by matching the observed dot pattern in the field-of-view of the camera to a pre-captured reference image. We propose a novel matching technique to obtain dot pattern correspondences efficiently in wide- and adaptive-baseline scenarios. We also propose an auto-calibration method to obtain the camera intrinsics and dot pattern reference image. The performance of Kinectrack is evaluated and the rotational and translational accuracy of the system is measured relative to ground truth for both planar and multi-planar scene geometry. Our system can simultaneously recover the 6-DoF pose of the device and also recover piecewise planar 3D scene structure, and can be used as a low-cost method for tracking a device without any on-board computation, with small size and only simple electronics.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present Kinectrack, a new six degree-of-freedom (6-DoF) tracker which allows real-time and low-cost pose estimation using only commodity hardware. We decouple the dot pattern emitter and IR camera of the Kinect. Keeping the camera fixed and moving the IR emitter in the environment, we recover the 6-DoF pose of the emitter by matching the observed dot pattern in the field-of-view of the camera to a pre-captured reference image. We propose a novel matching technique to obtain dot pattern correspondences efficiently in wide- and adaptive-baseline scenarios. We also propose an auto-calibration method to obtain the camera intrinsics and dot pattern reference image. The performance of Kinectrack is evaluated and the rotational and translational accuracy of the system is measured relative to ground truth for both planar and multi-planar scene geometry. Our system can simultaneously recover the 6-DoF pose of the device and also recover piecewise planar 3D scene structure, and can be used as a low-cost method for tracking a device without any on-board computation, with small size and only simple electronics.", "fno": "06402533", "keywords": [ "Cameras", "Calibration", "Pattern Matching", "Accuracy", "Runtime", "Table Lookup", "Robustness" ], "authors": [ { "affiliation": "University of Cambridge, Microsoft Research, UK", "fullName": "Paul McIlroy", "givenName": "Paul", "surname": "McIlroy", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research, UK", "fullName": "Shahram Izadi", "givenName": "Shahram", "surname": "Izadi", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research, UK", "fullName": "Andrew Fitzgibbon", "givenName": "Andrew", "surname": "Fitzgibbon", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "23-29", "year": "2012", "issn": null, "isbn": "978-1-4673-4660-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06402532", "articleId": "12OmNx3ZjnA", "__typename": "AdjacentArticleType" }, "next": { "fno": "06402535", "articleId": "12OmNz5s0RS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/bigmm/2015/8688/0/8688a156", "title": "Fast Estimation of Relative Poses for 6-DOF Image Localization", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2015/8688a156/12OmNCcbEh8", "parentPublication": { "id": "proceedings/bigmm/2015/8688/0", "title": "2015 IEEE International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460065", "title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nanoarch/2012/1671/0/06464141", "title": "A conventional design for CLB implementation of a FPGA in Quantum-dot Cellular Automata (QCA)", "doi": null, "abstractUrl": "/proceedings-article/nanoarch/2012/06464141/12OmNzmLxRh", "parentPublication": { "id": "proceedings/nanoarch/2012/1671/0", "title": "2012 IEEE/ACM International Symposium on Nanoscale Architectures (NANOARCH 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/06/06756711", "title": "Kinectrack: 3D Pose Estimation Using a Projected Dense Dot Pattern", "doi": null, "abstractUrl": "/journal/tg/2014/06/06756711/13rRUxNEqPU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fpl/2018/8517/0/851700a327", "title": "An Efficient Exact Fused Dot Product Processor in FPGA", "doi": null, "abstractUrl": "/proceedings-article/fpl/2018/851700a327/17D45WZZ7Ga", "parentPublication": { "id": "proceedings/fpl/2018/8517/0", "title": "2018 28th International Conference on Field Programmable Logic and Applications (FPL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09779957", "title": "Casual 6-DoF: free-viewpoint panorama using a handheld 360&#x00B0; camera", "doi": null, "abstractUrl": "/journal/tg/5555/01/09779957/1DBTD2uB4di", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797791", "title": "A 6-DOF Telexistence Drone Controlled by a Head Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797791/1cJ12HLy2ac", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d788", "title": "How to Improve CNN-Based 6-DoF Camera Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d788/1i5mrqS8wZq", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqzcvOG", "title": "2017 14th Conference on Computer and Robot Vision (CRV)", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNz61cYb", "doi": "10.1109/CRV.2017.41", "title": "4-DoF Tracking for Robot Fine Manipulation Tasks", "normalizedTitle": "4-DoF Tracking for Robot Fine Manipulation Tasks", "abstract": "This paper presents two visual trackers from the different paradigms of learning and registration based tracking and evaluates their application in image based visual servoing. They can track object motion with four degrees of freedom (DoF) which, as we will show here, is sufficient for many fine manipulation tasks. One of these trackers is a newly developed learning based tracker that relies on learning discriminative correlation filters while the other is a refinement of a recent 8 DoF RANSAC based tracker adapted with a new appearance model for tracking 4 DoF motion. Both trackers are shown to provide superior performance to several state of the art trackers on an existing dataset for manipulation tasks. Further, a new dataset with challenging sequences for fine manipulation tasks captured from robot mounted eye-in-hand (EIH) cameras is also presented. These sequences have a variety of challenges encountered during real tasks including jittery camera movement, motion blur, drastic scale changes and partial occlusions. Quantitative and qualitative results on these sequences are used to show that these two trackers are robust to failures while providing high precision that makes them suitable for such fine manipulation tasks.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents two visual trackers from the different paradigms of learning and registration based tracking and evaluates their application in image based visual servoing. They can track object motion with four degrees of freedom (DoF) which, as we will show here, is sufficient for many fine manipulation tasks. One of these trackers is a newly developed learning based tracker that relies on learning discriminative correlation filters while the other is a refinement of a recent 8 DoF RANSAC based tracker adapted with a new appearance model for tracking 4 DoF motion. Both trackers are shown to provide superior performance to several state of the art trackers on an existing dataset for manipulation tasks. Further, a new dataset with challenging sequences for fine manipulation tasks captured from robot mounted eye-in-hand (EIH) cameras is also presented. These sequences have a variety of challenges encountered during real tasks including jittery camera movement, motion blur, drastic scale changes and partial occlusions. Quantitative and qualitative results on these sequences are used to show that these two trackers are robust to failures while providing high precision that makes them suitable for such fine manipulation tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents two visual trackers from the different paradigms of learning and registration based tracking and evaluates their application in image based visual servoing. They can track object motion with four degrees of freedom (DoF) which, as we will show here, is sufficient for many fine manipulation tasks. One of these trackers is a newly developed learning based tracker that relies on learning discriminative correlation filters while the other is a refinement of a recent 8 DoF RANSAC based tracker adapted with a new appearance model for tracking 4 DoF motion. Both trackers are shown to provide superior performance to several state of the art trackers on an existing dataset for manipulation tasks. Further, a new dataset with challenging sequences for fine manipulation tasks captured from robot mounted eye-in-hand (EIH) cameras is also presented. These sequences have a variety of challenges encountered during real tasks including jittery camera movement, motion blur, drastic scale changes and partial occlusions. Quantitative and qualitative results on these sequences are used to show that these two trackers are robust to failures while providing high precision that makes them suitable for such fine manipulation tasks.", "fno": "2818a329", "keywords": [ "Cameras", "Image Motion Analysis", "Image Registration", "Image Sequences", "Learning Artificial Intelligence", "Manipulators", "Object Detection", "Object Tracking", "Robot Vision", "Visual Servoing", "Robot Fine Manipulation Tasks", "Visual Trackers", "Learning Registration Based Tracking", "Image Based Visual Servoing", "Newly Developed Learning Based Tracker", "8 Do F RANSAC Based Tracker", "Tracking", "Task Analysis", "Correlation", "Feature Extraction", "Robots", "Cameras", "Two Dimensional Displays", "Visual Tracking", "Visual Servoing", "Robot Manipulation" ], "authors": [ { "affiliation": null, "fullName": "Mennatullah Siam", "givenName": "Mennatullah", "surname": "Siam", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Abhineet Singh", "givenName": "Abhineet", "surname": "Singh", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Camilo Perez", "givenName": "Camilo", "surname": "Perez", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Martin Jagersand", "givenName": "Martin", "surname": "Jagersand", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-05-01T00:00:00", "pubType": "proceedings", "pages": "329-336", "year": "2017", "issn": null, "isbn": "978-1-5386-2818-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2818a321", "articleId": "12OmNzhnaa2", "__typename": "AdjacentArticleType" }, "next": { "fno": "2818a337", "articleId": "12OmNvSKNVk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isatp/2003/7770/0/01217208", "title": "Passing manipulation by 1 degree-of-freedom manipulator - catching manipulation of tossed object without impact", "doi": null, "abstractUrl": "/proceedings-article/isatp/2003/01217208/12OmNCbU2PO", "parentPublication": { "id": "proceedings/isatp/2003/7770/0", "title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a070", "title": "RKLT: 8 DOF Real-Time Robust Video Tracking Combing Coarse Ransac Features and Accurate Fast Template Registration", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a070/12OmNCcKQiS", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550216", "title": "Poster: A pilot study on stepwise 6-DoF manipulation of virtual 3D objects using smartphone in wearable augmented reality environment", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550216/12OmNvw2TdB", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2006/2754/0/27540662", "title": "Techniques for Selecting and Manipulating Object in Virtual Environment Based on 3-DOF Trackers and Data Glove", "doi": null, "abstractUrl": "/proceedings-article/icat/2006/27540662/12OmNzd7brA", "parentPublication": { "id": "proceedings/icat/2006/2754/0", "title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460065", "title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a538", "title": "Effects of Clutching Mechanism on Remote Object Manipulation Tasks", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a538/1CJf9GYjHMc", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102852", "title": "Fine-Grained Expression Manipulation Via Structured Latent Space", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102852/1kwr2Gvfr9K", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/02/09144483", "title": "Evaluating the Effects of Non-Isomorphic Rotation on 3D Manipulation Tasks in Mixed Reality Simulation", "doi": null, "abstractUrl": "/journal/tg/2022/02/09144483/1lClltCZfOg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a358", "title": "Workload, Presence and Task Performance of Virtual Object Manipulation on WebVR", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a358/1qpzAYILRRe", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2020/7624/0/762400a504", "title": "Transfer of Hierarchical Reinforcement Learning Structures for Robotic Manipulation Tasks", "doi": null, "abstractUrl": "/proceedings-article/csci/2020/762400a504/1uGYTLXrXqg", "parentPublication": { "id": "proceedings/csci/2020/7624/0", "title": "2020 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVp", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNzC5TfM", "doi": "10.1109/3DV.2016.54", "title": "Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose, Depth, and Expression Variation", "normalizedTitle": "Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose, Depth, and Expression Variation", "abstract": "We introduce a novel end-to-end real-time pose-robust 3D face tracking framework from RGBD videos, which is capable of tracking head pose and facial actions simultaneously in unconstrained environment without intervention or pre-calibration from a user. In particular, we emphasize tracking the head pose from profile to profile and improving tracking performance in challenging instances, where the tracked subject is at a considerably large distance from the camera and the quality of data deteriorates severely. To achieve these goals, the tracker is guided by an efficient multi-view 3D shape regressor, trained upon generic RGB datasets, which is able to predict model parameters despite large head rotations or tracking range. Specifically, the shape regressor is made aware of the head pose by inferring the possibility of particular facial landmarks being visible through a joint regression-classification local random forest framework, and piecewise linear regression models effectively map visibility features into shape parameters. In addition, the regressor is combined with a joint 2D+3D optimization that sparsely exploits depth information to further refine shape parameters to maintain tracking accuracy over time. The result is a robust on-line RGBD 3D face tracker that can model extreme head poses and facial expressions accurately in challenging scenes, which are demonstrated in our extensive experiments.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a novel end-to-end real-time pose-robust 3D face tracking framework from RGBD videos, which is capable of tracking head pose and facial actions simultaneously in unconstrained environment without intervention or pre-calibration from a user. In particular, we emphasize tracking the head pose from profile to profile and improving tracking performance in challenging instances, where the tracked subject is at a considerably large distance from the camera and the quality of data deteriorates severely. To achieve these goals, the tracker is guided by an efficient multi-view 3D shape regressor, trained upon generic RGB datasets, which is able to predict model parameters despite large head rotations or tracking range. Specifically, the shape regressor is made aware of the head pose by inferring the possibility of particular facial landmarks being visible through a joint regression-classification local random forest framework, and piecewise linear regression models effectively map visibility features into shape parameters. In addition, the regressor is combined with a joint 2D+3D optimization that sparsely exploits depth information to further refine shape parameters to maintain tracking accuracy over time. The result is a robust on-line RGBD 3D face tracker that can model extreme head poses and facial expressions accurately in challenging scenes, which are demonstrated in our extensive experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a novel end-to-end real-time pose-robust 3D face tracking framework from RGBD videos, which is capable of tracking head pose and facial actions simultaneously in unconstrained environment without intervention or pre-calibration from a user. In particular, we emphasize tracking the head pose from profile to profile and improving tracking performance in challenging instances, where the tracked subject is at a considerably large distance from the camera and the quality of data deteriorates severely. To achieve these goals, the tracker is guided by an efficient multi-view 3D shape regressor, trained upon generic RGB datasets, which is able to predict model parameters despite large head rotations or tracking range. Specifically, the shape regressor is made aware of the head pose by inferring the possibility of particular facial landmarks being visible through a joint regression-classification local random forest framework, and piecewise linear regression models effectively map visibility features into shape parameters. In addition, the regressor is combined with a joint 2D+3D optimization that sparsely exploits depth information to further refine shape parameters to maintain tracking accuracy over time. The result is a robust on-line RGBD 3D face tracker that can model extreme head poses and facial expressions accurately in challenging scenes, which are demonstrated in our extensive experiments.", "fno": "5407a441", "keywords": [ "Three Dimensional Displays", "Face", "Shape", "Solid Modeling", "Robustness", "Tracking", "Two Dimensional Displays", "Pose Robust", "3 D Face Tracking", "Blendshape", "Multi View" ], "authors": [ { "affiliation": null, "fullName": "Hai X. Pham", "givenName": "Hai X.", "surname": "Pham", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Vladimir Pavlovic", "givenName": "Vladimir", "surname": "Pavlovic", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-10-01T00:00:00", "pubType": "proceedings", "pages": "441-449", "year": "2016", "issn": null, "isbn": "978-1-5090-5407-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5407a435", "articleId": "12OmNxR5UUp", "__typename": "AdjacentArticleType" }, "next": { "fno": "5407a450", "articleId": "12OmNyTwRff", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209c525", "title": "A Pose-Adaptive Constrained Local Model for Accurate Head Pose Tracking", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c525/12OmNA0dMIA", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601a075", "title": "3D Head Pose Estimation Enhanced Through SURF-Based Key-Frames", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601a075/12OmNBOll5V", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/07780823", "title": "Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/07780823/12OmNqBbHze", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733c060", "title": "3D-Assisted Coarse-to-Fine Extreme-Pose Facial Landmark Detection", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733c060/12OmNvStcth", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034b545", "title": "Improved Strategies for HPE Employing Learning-by-Synthesis Approaches", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034b545/12OmNxX3uuz", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601c028", "title": "Fusion of Keypoint Tracking and Facial Landmark Detection for Real-Time Head Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601c028/12OmNyQGShc", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a500", "title": "A Combined Generalized and Subject-Specific 3D Head Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a500/12OmNyk2ZXr", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437a792", "title": "Person-Independent 3D Gaze Estimation Using Face Frontalization", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a792/12OmNzYwbWh", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h707", "title": "Deep 3D Portrait From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h707/1m3nijIcYta", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800i333", "title": "RGBD-Dog: Predicting Canine Pose from RGBD Sensors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800i333/1m3npw1SCKA", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmK0oMbvk4", "doi": "10.1109/ICCV48922.2021.01055", "title": "DepthTrack: Unveiling the Power of RGBD Tracking", "normalizedTitle": "DepthTrack: Unveiling the Power of RGBD Tracking", "abstract": "RGBD (RGB plus depth) object tracking is gaining momentum as RGBD sensors have become popular in many application fields such as robotics. However, the best RGBD trackers are extensions of the state-of-the-art deep RGB trackers. They are trained with RGB data and the depth channel is used as a sidekick for subtleties such as occlusion detection. This can be explained by the fact that there are no sufficiently large RGBD datasets to 1) train \"deep depth trackers\" and to 2) challenge RGB trackers with sequences for which the depth cue is essential. This work introduces a new RGBD tracking dataset - Depth-Track - that has twice as many sequences (200) and scene types (40) than in the largest existing dataset, and three times more objects (90). In addition, the average length of the sequences (1473), the number of deformable objects (16) and the number of annotated tracking attributes (15) have been increased. Furthermore, by running the SotA RGB and RGBD trackers on DepthTrack, we propose a new RGBD tracking baseline, namely DeT, which reveals that deep RGBD tracking indeed benefits from genuine training data. The code and dataset is available at https://github.com/xiaozai/DeT.", "abstracts": [ { "abstractType": "Regular", "content": "RGBD (RGB plus depth) object tracking is gaining momentum as RGBD sensors have become popular in many application fields such as robotics. However, the best RGBD trackers are extensions of the state-of-the-art deep RGB trackers. They are trained with RGB data and the depth channel is used as a sidekick for subtleties such as occlusion detection. This can be explained by the fact that there are no sufficiently large RGBD datasets to 1) train \"deep depth trackers\" and to 2) challenge RGB trackers with sequences for which the depth cue is essential. This work introduces a new RGBD tracking dataset - Depth-Track - that has twice as many sequences (200) and scene types (40) than in the largest existing dataset, and three times more objects (90). In addition, the average length of the sequences (1473), the number of deformable objects (16) and the number of annotated tracking attributes (15) have been increased. Furthermore, by running the SotA RGB and RGBD trackers on DepthTrack, we propose a new RGBD tracking baseline, namely DeT, which reveals that deep RGBD tracking indeed benefits from genuine training data. The code and dataset is available at https://github.com/xiaozai/DeT.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "RGBD (RGB plus depth) object tracking is gaining momentum as RGBD sensors have become popular in many application fields such as robotics. However, the best RGBD trackers are extensions of the state-of-the-art deep RGB trackers. They are trained with RGB data and the depth channel is used as a sidekick for subtleties such as occlusion detection. This can be explained by the fact that there are no sufficiently large RGBD datasets to 1) train \"deep depth trackers\" and to 2) challenge RGB trackers with sequences for which the depth cue is essential. This work introduces a new RGBD tracking dataset - Depth-Track - that has twice as many sequences (200) and scene types (40) than in the largest existing dataset, and three times more objects (90). In addition, the average length of the sequences (1473), the number of deformable objects (16) and the number of annotated tracking attributes (15) have been increased. Furthermore, by running the SotA RGB and RGBD trackers on DepthTrack, we propose a new RGBD tracking baseline, namely DeT, which reveals that deep RGBD tracking indeed benefits from genuine training data. The code and dataset is available at https://github.com/xiaozai/DeT.", "fno": "281200k0705", "keywords": [ "Training", "Computer Vision", "Codes", "Training Data", "Benchmark Testing", "Robot Sensing Systems", "Sensors", "Datasets And Evaluation", "Motion And Tracking" ], "authors": [ { "affiliation": "Tampere University", "fullName": "Song Yan", "givenName": "Song", "surname": "Yan", "__typename": "ArticleAuthorType" }, { "affiliation": "Southern University of Science and Technology", "fullName": "Jinyu Yang", "givenName": "Jinyu", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tampere University", "fullName": "Jani Käpylä", "givenName": "Jani", "surname": "Käpylä", "__typename": "ArticleAuthorType" }, { "affiliation": "Southern University of Science and Technology", "fullName": "Feng Zheng", "givenName": "Feng", "surname": "Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Birmingham", "fullName": "Aleš Leonardis", "givenName": "Aleš", "surname": "Leonardis", "__typename": "ArticleAuthorType" }, { "affiliation": "Tampere University", "fullName": "Joni-Kristian Kämäräinen", "givenName": "Joni-Kristian", "surname": "Kämäräinen", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "10705-10713", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200k0695", "articleId": "1BmJRYVvF6g", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200k0714", "articleId": "1BmECdgFmcE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2013/2840/0/2840a233", "title": "Tracking Revisited Using RGBD Camera: Unified Benchmark and Baselines", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840a233/12OmNqFrGI8", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851b439", "title": "3D Part-Based Sparse Tracker with Automatic Synchronization and Registration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851b439/12OmNqJq4s3", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2017/2818/0/2818a285", "title": "Towards Transferring Grasping from Human to Robot with RGBD Hand Detection", "doi": null, "abstractUrl": "/proceedings-article/crv/2017/2818a285/12OmNyQphga", "parentPublication": { "id": "proceedings/crv/2017/2818/0", "title": "2017 14th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437a243", "title": "Simultaneous Semi-Coupled Dictionary Learning for Matching RGBD Data", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437a243/12OmNzxPTKB", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07298653", "title": "Layered RGBD scene flow estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298653/12OmNzxgHGD", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300k0012", "title": "CDTB: A Color and Depth Visual Object Tracking Dataset and Benchmark", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300k0012/1hVlTXWKy9W", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c206", "title": "The Seventh Visual Object Tracking VOT2019 Challenge Results", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c206/1i5mQ8487AI", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102848", "title": "Rgbd-Fg: A Large-Scale Rgb-D Dataset For Fine-Grained Categorization", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102848/1kwr8ajEwyk", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412984", "title": "DAL: A Deep Depth-Aware Long-term Tracker", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412984/1tmhGgDZ4ic", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100c711", "title": "The Ninth Visual Object Tracking VOT2021 Challenge Results", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100c711/1yNikp8SJUc", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNApu5n9", "title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)", "acronym": "imccc", "groupId": "1800575", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNwoxSaK", "doi": "10.1109/IMCCC.2016.158", "title": "A New Positioning Method for Indoor Laser Navigation on Under-Determined Condition", "normalizedTitle": "A New Positioning Method for Indoor Laser Navigation on Under-Determined Condition", "abstract": "This study presents the indoor automated guided vehicles (AGV) laser navigation method on under-determined environments. Typical laser navigation system working well based on at least three reflectors. However, for the complex environment, dependent on obstacles shelves and goods, it is hard to install reflectors in expected points result in the laser scanner probably receive reflecting signal from less than three reflectors. This under-determined positioning condition will lead to the abnormal work situation of laser navigation system. In order to figure out it, this paper presents a new method when the number is less than three reflectors to output expected location information. It combines the known roadmap with two-axis accelerometer for different observation condition when the number of available reflectors is less than three. The scheme showed that the output accuracy is better than 8cm at the worst and more robust than single laser navigation system.", "abstracts": [ { "abstractType": "Regular", "content": "This study presents the indoor automated guided vehicles (AGV) laser navigation method on under-determined environments. Typical laser navigation system working well based on at least three reflectors. However, for the complex environment, dependent on obstacles shelves and goods, it is hard to install reflectors in expected points result in the laser scanner probably receive reflecting signal from less than three reflectors. This under-determined positioning condition will lead to the abnormal work situation of laser navigation system. In order to figure out it, this paper presents a new method when the number is less than three reflectors to output expected location information. It combines the known roadmap with two-axis accelerometer for different observation condition when the number of available reflectors is less than three. The scheme showed that the output accuracy is better than 8cm at the worst and more robust than single laser navigation system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This study presents the indoor automated guided vehicles (AGV) laser navigation method on under-determined environments. Typical laser navigation system working well based on at least three reflectors. However, for the complex environment, dependent on obstacles shelves and goods, it is hard to install reflectors in expected points result in the laser scanner probably receive reflecting signal from less than three reflectors. This under-determined positioning condition will lead to the abnormal work situation of laser navigation system. In order to figure out it, this paper presents a new method when the number is less than three reflectors to output expected location information. It combines the known roadmap with two-axis accelerometer for different observation condition when the number of available reflectors is less than three. The scheme showed that the output accuracy is better than 8cm at the worst and more robust than single laser navigation system.", "fno": "07774873", "keywords": [ "Accelerometers", "Automatic Guided Vehicles", "Indoor Navigation", "Measurement By Laser Beam", "Optical Scanners", "Two Axis Accelerometer", "Location Information", "Under Determined Positioning Condition", "Laser Scanner", "Reflectors", "Automated Guided Vehicles", "Indoor AGV Laser Navigation Method", "Indoor Positioning Method", "Navigation", "Accelerometers", "Measurement By Laser Beam", "Laser Theory", "Robustness", "Vehicles", "Automated Guided Vehicles", "Indoor Positioning", "Laser Navigation System", "Roadmap", "Under Determined" ], "authors": [ { "affiliation": null, "fullName": "Zhaoxin Xu", "givenName": "Zhaoxin", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shanle Huang", "givenName": "Shanle", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jicheng Ding", "givenName": "Jicheng", "surname": "Ding", "__typename": "ArticleAuthorType" } ], "idPrefix": "imccc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-07-01T00:00:00", "pubType": "proceedings", "pages": "703-706", "year": "2016", "issn": null, "isbn": "978-1-5090-1195-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07774872", "articleId": "12OmNxFaLko", "__typename": "AdjacentArticleType" }, "next": { "fno": "07774874", "articleId": "12OmNAYXWIc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/itc/2014/4722/0/07035329", "title": "IC laser trimming speed-up through wafer-level spatial correlation modeling", "doi": null, "abstractUrl": "/proceedings-article/itc/2014/07035329/12OmNwAt1Fq", "parentPublication": { "id": "proceedings/itc/2014/4722/0", "title": "2014 IEEE International Test Conference (ITC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007248", "title": "Simultaneous Projection and Positioning of Laser Projector Pixels", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007248/13rRUxASupD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/euros&p/2022/1614/0/161400a537", "title": "Laser Meager Listener: A Scientific Exploration of Laser-based Speech Eavesdropping in Commercial User Space", "doi": null, "abstractUrl": "/proceedings-article/euros&p/2022/161400a537/1ErpCbbN8ti", "parentPublication": { "id": "proceedings/euros&p/2022/1614/0", "title": "2022 IEEE 7th European Symposium on Security and Privacy (EuroS&P)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aie/2022/7400/0/740000a134", "title": "The Photoelectric Sensor Network for AGV High-precision Spatial Positioning", "doi": null, "abstractUrl": "/proceedings-article/aie/2022/740000a134/1GZjkcE7Bpm", "parentPublication": { "id": "proceedings/aie/2022/7400/0", "title": "2022 International Conference on Artificial Intelligence in Everything (AIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2019/9151/0/08730820", "title": "Road Surface Condition Inspection Using a Laser Scanner Mounted on an Autonomous Driving Car", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2019/08730820/1aDSDZbZcNa", "parentPublication": { "id": "proceedings/percom-workshops/2019/9151/0", "title": "2019 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2019/4689/0/468900a470", "title": "Design of a Stroboscopic Laser Grating Stripe Projection Device", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2019/468900a470/1h0FgohMNG0", "parentPublication": { "id": "proceedings/icmcce/2019/4689/0", "title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crc/2019/4620/0/462000a196", "title": "Remote Crack Measurement Using Android Camera with Laser-Positioning Technique", "doi": null, "abstractUrl": "/proceedings-article/crc/2019/462000a196/1iTuKuNHHws", "parentPublication": { "id": "proceedings/crc/2019/4620/0", "title": "2019 4th International Conference on Control, Robotics and Cybernetics (CRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeas/2020/9272/0/927200a237", "title": "Experimental study on laser assisted micro-milling based on the evolution of Ti6Al4V chip morphology", "doi": null, "abstractUrl": "/proceedings-article/icmeas/2020/927200a237/1rsiD7sigb6", "parentPublication": { "id": "proceedings/icmeas/2020/9272/0", "title": "2020 6th International Conference on Mechanical Engineering and Automation Science (ICMEAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wcmeim/2020/4109/0/410900a044", "title": "Research on Laser Navigation AGV Positioning Technology Based on Dynamic Matching", "doi": null, "abstractUrl": "/proceedings-article/wcmeim/2020/410900a044/1t2mvtlwP3a", "parentPublication": { "id": "proceedings/wcmeim/2020/4109/0", "title": "2020 3rd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsmt/2020/8668/0/866800a009", "title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG", "parentPublication": { "id": "proceedings/iccsmt/2020/8668/0", "title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzQhP7Z", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "acronym": "isot", "groupId": "1002942", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNx9nGGj", "doi": "10.1109/ISOT.2014.29", "title": "Recent Development of Using Optical Methods to Measure the Mechanical Properties of Thin Films", "normalizedTitle": "Recent Development of Using Optical Methods to Measure the Mechanical Properties of Thin Films", "abstract": "The mechanical properties of micron and nanometer scale thin films have become an important issues in modern microelectronics. Here, two recent development of using optical methods to measure the mechanical properties of thin films were introduced. First, a paddle-like cantilever beam test structure with nanometer scale metal films on top was design and developed. Film strain and stresses of different thicknesses were measured through the beam deflection obtained by using a four-step phase-shifting process with a Michelson interferometer. Second, we introduce the XRD measurements of the bulge tested thin film. We annealed thin Ag films and tracked the texture transformation in-situ using synchrotron x-ray diffraction while independently varying the stress in the film using a bulge test apparatus. The bulge height was measured as a function of pressure using a simple Fabry-Perot optical interferometer, using the bulge as the fully reflective surface, and an optically flat half-silvered mirror as a reference surface. A CCD camera was used to record interference fringe motion as the pressure was increased. The bulge height was obtained by counting the number of fringes that passed a given point. A laser light source with a 532 nm wavelength gave a height resolution of 266 nm.", "abstracts": [ { "abstractType": "Regular", "content": "The mechanical properties of micron and nanometer scale thin films have become an important issues in modern microelectronics. Here, two recent development of using optical methods to measure the mechanical properties of thin films were introduced. First, a paddle-like cantilever beam test structure with nanometer scale metal films on top was design and developed. Film strain and stresses of different thicknesses were measured through the beam deflection obtained by using a four-step phase-shifting process with a Michelson interferometer. Second, we introduce the XRD measurements of the bulge tested thin film. We annealed thin Ag films and tracked the texture transformation in-situ using synchrotron x-ray diffraction while independently varying the stress in the film using a bulge test apparatus. The bulge height was measured as a function of pressure using a simple Fabry-Perot optical interferometer, using the bulge as the fully reflective surface, and an optically flat half-silvered mirror as a reference surface. A CCD camera was used to record interference fringe motion as the pressure was increased. The bulge height was obtained by counting the number of fringes that passed a given point. A laser light source with a 532 nm wavelength gave a height resolution of 266 nm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The mechanical properties of micron and nanometer scale thin films have become an important issues in modern microelectronics. Here, two recent development of using optical methods to measure the mechanical properties of thin films were introduced. First, a paddle-like cantilever beam test structure with nanometer scale metal films on top was design and developed. Film strain and stresses of different thicknesses were measured through the beam deflection obtained by using a four-step phase-shifting process with a Michelson interferometer. Second, we introduce the XRD measurements of the bulge tested thin film. We annealed thin Ag films and tracked the texture transformation in-situ using synchrotron x-ray diffraction while independently varying the stress in the film using a bulge test apparatus. The bulge height was measured as a function of pressure using a simple Fabry-Perot optical interferometer, using the bulge as the fully reflective surface, and an optically flat half-silvered mirror as a reference surface. A CCD camera was used to record interference fringe motion as the pressure was increased. The bulge height was obtained by counting the number of fringes that passed a given point. A laser light source with a 532 nm wavelength gave a height resolution of 266 nm.", "fno": "07119393", "keywords": [ "Films", "Measurement By Laser Beam", "Stress", "Optical Interferometry", "Optical Variables Measurement", "Structural Beams", "Laser Beams", "The Mechanical Properties Of Thin Films Measurement", "Optical Methods" ], "authors": [ { "affiliation": null, "fullName": "Chi-Jia Tong", "givenName": "Chi-Jia", "surname": "Tong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ming-Tzer Lin", "givenName": "Ming-Tzer", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chung Lin Wu", "givenName": "Chung Lin", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ya-Chi Cheng", "givenName": "Ya-Chi", "surname": "Cheng", "__typename": "ArticleAuthorType" } ], "idPrefix": "isot", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-11-01T00:00:00", "pubType": "proceedings", "pages": "91-94", "year": "2014", "issn": null, "isbn": "978-1-4673-6752-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07119392", "articleId": "12OmNqN6R4j", "__typename": "AdjacentArticleType" }, "next": { "fno": "07119394", "articleId": "12OmNwudQM5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iciev/2013/0400/0/06572588", "title": "Nano-thin CuO films doped with Au and Pd for gas sensors applications", "doi": null, "abstractUrl": "/proceedings-article/iciev/2013/06572588/12OmNAZx8RJ", "parentPublication": { "id": "proceedings/iciev/2013/0400/0", "title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581882", "title": "Measurements of mechanical properties of microfabricated thin films", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581882/12OmNqESuc6", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmens/2003/1947/0/19470327", "title": "Evaporated Nanostructured Y2O3: Eu Thin Films", "doi": null, "abstractUrl": "/proceedings-article/icmens/2003/19470327/12OmNqGA5eQ", "parentPublication": { "id": "proceedings/icmens/2003/1947/0", "title": "MEMS, NANO, and Smart Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2017/3299/0/08110054", "title": "Electrical, dielectric and structural characterization of nickel ferrite films for thin film electronic applications", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2017/08110054/12OmNrMHOf5", "parentPublication": { "id": "proceedings/ewdts/2017/3299/0", "title": "2017 IEEE East-West Design & Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031a201", "title": "A Novel Measurement System for Hydrogenated Nanocrystalline Silicon Thin Films' Gauge Factor", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031a201/12OmNvDZF4P", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iseim/2005/6063/3/01496255", "title": "High tunability (Ba,Sr)TiO/sub 3/ thin films on atomic layer deposited buffer layers for Si integration", "doi": null, "abstractUrl": "/proceedings-article/iseim/2005/01496255/12OmNwCsdPL", "parentPublication": { "id": "proceedings/iseim/2005/6063/3", "title": "International Symposium on Electrical Insulating Materials", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mtdt/1994/6245/0/00397202", "title": "Mechanical stress induced void and hillock formations in thin films", "doi": null, "abstractUrl": "/proceedings-article/mtdt/1994/00397202/12OmNwErpAJ", "parentPublication": { "id": "proceedings/mtdt/1994/6245/0", "title": "Proceedings of IEEE International Workshop on Memory Technology, Design, and Test", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icqnm/2009/3524/0/3524a073", "title": "Heat Transfer in Thin Films", "doi": null, "abstractUrl": "/proceedings-article/icqnm/2009/3524a073/12OmNwvDQtz", "parentPublication": { "id": "proceedings/icqnm/2009/3524/0", "title": "Quantum, Nano, and Micro Technologies, First International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031f722", "title": "The Effect of Processing Parameters on Resistivity of Boron-doped Hydrogenated Nanocrystalline Silicon Thin Films", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031f722/12OmNy7h3bq", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iceee/2019/3910/0/391000a344", "title": "Influence of Film Thickness on Electrical and Optical Properties of Vanadium Dioxide Thin Films for Smart Window Coating Application", "doi": null, "abstractUrl": "/proceedings-article/iceee/2019/391000a344/1cpqGUO9lKM", "parentPublication": { "id": "proceedings/iceee/2019/3910/0", "title": "2019 6th International Conference on Electrical and Electronics Engineering (ICEEE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzn395d", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "acronym": "ectc", "groupId": "1000248", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNxFsmCC", "doi": "10.1109/ECTC.2017.81", "title": "Multi Beam Full Cut Dicing of Thin Si IC Wafers", "normalizedTitle": "Multi Beam Full Cut Dicing of Thin Si IC Wafers", "abstract": "Over the last years, singulation of thin semiconductor wafers with (ultra) low-k top layer has become a challenge in the production process of integrated circuits. The traditional blade dicing process is encountering serious yield issues. These issues can be addressed by applying a laser grooving process prior to the blade dicing, which is the process of reference nowadays. However, as wafers are becoming thinner, this process flow is not providing the yield and cost required. This paper will discuss the results of a study done on the impact of the dicing quality of thin Si wafers, which led to the development of a laser separation process that employs a V-shape laser beam pattern.", "abstracts": [ { "abstractType": "Regular", "content": "Over the last years, singulation of thin semiconductor wafers with (ultra) low-k top layer has become a challenge in the production process of integrated circuits. The traditional blade dicing process is encountering serious yield issues. These issues can be addressed by applying a laser grooving process prior to the blade dicing, which is the process of reference nowadays. However, as wafers are becoming thinner, this process flow is not providing the yield and cost required. This paper will discuss the results of a study done on the impact of the dicing quality of thin Si wafers, which led to the development of a laser separation process that employs a V-shape laser beam pattern.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Over the last years, singulation of thin semiconductor wafers with (ultra) low-k top layer has become a challenge in the production process of integrated circuits. The traditional blade dicing process is encountering serious yield issues. These issues can be addressed by applying a laser grooving process prior to the blade dicing, which is the process of reference nowadays. However, as wafers are becoming thinner, this process flow is not providing the yield and cost required. This paper will discuss the results of a study done on the impact of the dicing quality of thin Si wafers, which led to the development of a laser separation process that employs a V-shape laser beam pattern.", "fno": "07999928", "keywords": [ "Elemental Semiconductors", "Integrated Circuit Manufacture", "Laser Beam Cutting", "Semiconductor Technology", "Silicon", "V Shape Laser Beam Pattern", "Laser Separation Process", "Laser Grooving Process", "Blade Dicing Process", "Integrated Circuits", "Ultra Low K Top Layer", "Thin Semiconductor Wafers", "Thin IC Wafers", "Multibeam Full Cut Dicing", "Si", "Laser Beam Cutting", "Silicon", "Laser Beams", "Measurement By Laser Beam", "Blades", "Laser Ablation", "Substrates", "3 D Packaging", "Dicing", "Die Strength", "Laser", "Thin Wafer", "Multiple Beams", "Diffractive Optics" ], "authors": [ { "affiliation": null, "fullName": "Jeroen van Borkulo", "givenName": "Jeroen van", "surname": "Borkulo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Paul Verburg", "givenName": "Paul", "surname": "Verburg", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Richard van der Stam", "givenName": "Richard van der", "surname": "Stam", "__typename": "ArticleAuthorType" } ], "idPrefix": "ectc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-05-01T00:00:00", "pubType": "proceedings", "pages": "1817-1822", "year": "2017", "issn": "2377-5726", "isbn": "978-1-5090-6315-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07999927", "articleId": "12OmNwwMf59", "__typename": "AdjacentArticleType" }, "next": { "fno": "07999929", "articleId": "12OmNxRF75d", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/case/2007/1153/0/04341853", "title": "Characterization of Program Controlled CO2 Laser-Cut PDMS Channels for Lab-on-a-chip Applications", "doi": null, "abstractUrl": "/proceedings-article/case/2007/04341853/12OmNBh8gSn", "parentPublication": { "id": "proceedings/case/2007/1153/0", "title": "3rd Annual IEEE Conference on Automation Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999715", "title": "Plasma Dicing Fully Integrated Process-Flows Suitable for BEOL Advanced Packaging Fabrications", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999715/12OmNCd2rQE", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isqed/2005/2301/0/23010610", "title": "Reticle Floorplanning and Wafer Dicing for Multiple Project Wafers", "doi": null, "abstractUrl": "/proceedings-article/isqed/2005/23010610/12OmNqGA5bz", "parentPublication": { "id": "proceedings/isqed/2005/2301/0", "title": "Proceedings. 6th International Symposium on Quality Electronic Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/Ismar-mashd/2014/6887/0/06935435", "title": "VAL: Visually Augmented Laser cutting to enhance and support creativity", "doi": null, "abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935435/12OmNrJAdXk", "parentPublication": { "id": "proceedings/Ismar-mashd/2014/6887/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999713", "title": "Laser Multi Beam Full Cut Dicing of Wafer Level Chip-Scale Packages", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999713/12OmNvmG81E", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999716", "title": "Stealth Dicing Challenges for MEMS Wafer Applications", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999716/12OmNvq5jFf", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itc/2014/4722/0/07035329", "title": "IC laser trimming speed-up through wafer-level spatial correlation modeling", "doi": null, "abstractUrl": "/proceedings-article/itc/2014/07035329/12OmNwAt1Fq", "parentPublication": { "id": "proceedings/itc/2014/4722/0", "title": "2014 IEEE International Test Conference (ITC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999714", "title": "Plasma Dicing 300mm Framed Wafers — Analysis of Improvement in Die Strength and Cost Benefits for Thin Die Singulation", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999714/12OmNwHhoTG", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2011/4296/3/4296e740", "title": "The Design and Implementation of Helicopter Blades Pyramid Angle Measurement System", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296e740/12OmNzkuKIS", "parentPublication": { "id": "proceedings/icmtma/2011/4296/3", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBTawn8", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNypIYA4", "doi": "10.1109/CVPRW.2014.72", "title": "Projection Center Calibration for a Co-located Projector Camera System", "normalizedTitle": "Projection Center Calibration for a Co-located Projector Camera System", "abstract": "A co-located projector camera system where the projector and camera are positioned in the same optical position by a plate beam splitter enables various spatial augmented reality applications for dynamic three dimensional scenes. The extremely precise alignment of the projection centers of the camera and projector is necessary for these applications. However, the conventional calibration procedure for a camera and projector cannot achieve high accuracy because an iterative verification process for the alignment is not included. This paper proposes a novel interactive alignment approach that displays a capture of the projected grid pattern on the calibration screen. Additionally, a misalignment display technique that employs projector camera feedback is proposed for fine adjustment.", "abstracts": [ { "abstractType": "Regular", "content": "A co-located projector camera system where the projector and camera are positioned in the same optical position by a plate beam splitter enables various spatial augmented reality applications for dynamic three dimensional scenes. The extremely precise alignment of the projection centers of the camera and projector is necessary for these applications. However, the conventional calibration procedure for a camera and projector cannot achieve high accuracy because an iterative verification process for the alignment is not included. This paper proposes a novel interactive alignment approach that displays a capture of the projected grid pattern on the calibration screen. Additionally, a misalignment display technique that employs projector camera feedback is proposed for fine adjustment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A co-located projector camera system where the projector and camera are positioned in the same optical position by a plate beam splitter enables various spatial augmented reality applications for dynamic three dimensional scenes. The extremely precise alignment of the projection centers of the camera and projector is necessary for these applications. However, the conventional calibration procedure for a camera and projector cannot achieve high accuracy because an iterative verification process for the alignment is not included. This paper proposes a novel interactive alignment approach that displays a capture of the projected grid pattern on the calibration screen. Additionally, a misalignment display technique that employs projector camera feedback is proposed for fine adjustment.", "fno": "4308a449", "keywords": [ "Cameras", "Calibration", "Optical Feedback", "Optical Imaging", "Accuracy", "Lighting", "Shape", "Centers Of Projection", "Projector Camera Systems", "Calibration" ], "authors": [ { "affiliation": null, "fullName": "Toshiyuki Amano", "givenName": "Toshiyuki", "surname": "Amano", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-06-01T00:00:00", "pubType": "proceedings", "pages": "449-454", "year": "2014", "issn": "2160-7516", "isbn": "978-1-4799-4308-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4308a441", "articleId": "12OmNyqzLWD", "__typename": "AdjacentArticleType" }, "next": { "fno": "4308a455", "articleId": "12OmNwFzO2X", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2011/0529/0/05981726", "title": "Fully automatic multi-projector calibration with an uncalibrated camera", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204317", "title": "Geometric video projector auto-calibration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2014/4261/0/4261a200", "title": "Projection Mapping for a Kinect-Projector System", "doi": null, "abstractUrl": "/proceedings-article/svr/2014/4261a200/12OmNwe2Izu", "parentPublication": { "id": "proceedings/svr/2014/4261/0", "title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2012/4836/0/4836a007", "title": "Real-time Continuous Geometric Calibration for Projector-Camera System under Ambient Illumination", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2012/4836a007/12OmNzahc85", "parentPublication": { "id": "proceedings/icvrv/2012/4836/0", "title": "2012 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699178", "title": "A Single-Shot-Per-Pose Camera-Projector Calibration System for Imperfect Planar Targets", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699178/19F1O0IjR8k", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a261", "title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523844", "title": "Directionally Decomposing Structured Light for Projector Calibration", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzQhP7Z", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "acronym": "isot", "groupId": "1002942", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNzCF4UY", "doi": "10.1109/ISOT.2014.84", "title": "Acoustic Research and Control of Piezoelectric Speakers Using a Spatially Modulated TiOPc/Piezo Buzzer Actuator", "normalizedTitle": "Acoustic Research and Control of Piezoelectric Speakers Using a Spatially Modulated TiOPc/Piezo Buzzer Actuator", "abstract": "An opto-piezoelectric laminate composed of a photosensitive electrode and a PZT (Lead Zirconate Titanate) layer modulated by a light field was adopted as the research platform of this paper. The photosensitive electrode is a thin film formed by mixing TiOPc (Titanium oxide phthalocyanine) and a copolymer. During operation, the instant variation of TiOPc impedance when illuminated with light beam patterns created spatially varying electric field across the piezoelectric actuator made of PZT. The processes used to create this opto-piezoelectric composite and its application to acoustics are discussed in this paper. For example, it was known that the low-frequency performance of flat piezoelectric loudspeakers is poor. By taking advantages of the characteristic of the photoelectric conductive electrode, which demonstrated noticeable impedance change at low-mid range frequency (about 40-8000Hz), a way to enhance the low-frequency performance of the flat piezoelectric speakers are examined. Various illumination pattern tried and examined to control the acoustic beam patterns created are also presented. The experimental results confirmed the directivity controlling capability of acoustic pressure generated through external illuminated light patterns by using the opto-piezoelectric platform are detailed as well.", "abstracts": [ { "abstractType": "Regular", "content": "An opto-piezoelectric laminate composed of a photosensitive electrode and a PZT (Lead Zirconate Titanate) layer modulated by a light field was adopted as the research platform of this paper. The photosensitive electrode is a thin film formed by mixing TiOPc (Titanium oxide phthalocyanine) and a copolymer. During operation, the instant variation of TiOPc impedance when illuminated with light beam patterns created spatially varying electric field across the piezoelectric actuator made of PZT. The processes used to create this opto-piezoelectric composite and its application to acoustics are discussed in this paper. For example, it was known that the low-frequency performance of flat piezoelectric loudspeakers is poor. By taking advantages of the characteristic of the photoelectric conductive electrode, which demonstrated noticeable impedance change at low-mid range frequency (about 40-8000Hz), a way to enhance the low-frequency performance of the flat piezoelectric speakers are examined. Various illumination pattern tried and examined to control the acoustic beam patterns created are also presented. The experimental results confirmed the directivity controlling capability of acoustic pressure generated through external illuminated light patterns by using the opto-piezoelectric platform are detailed as well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An opto-piezoelectric laminate composed of a photosensitive electrode and a PZT (Lead Zirconate Titanate) layer modulated by a light field was adopted as the research platform of this paper. The photosensitive electrode is a thin film formed by mixing TiOPc (Titanium oxide phthalocyanine) and a copolymer. During operation, the instant variation of TiOPc impedance when illuminated with light beam patterns created spatially varying electric field across the piezoelectric actuator made of PZT. The processes used to create this opto-piezoelectric composite and its application to acoustics are discussed in this paper. For example, it was known that the low-frequency performance of flat piezoelectric loudspeakers is poor. By taking advantages of the characteristic of the photoelectric conductive electrode, which demonstrated noticeable impedance change at low-mid range frequency (about 40-8000Hz), a way to enhance the low-frequency performance of the flat piezoelectric speakers are examined. Various illumination pattern tried and examined to control the acoustic beam patterns created are also presented. The experimental results confirmed the directivity controlling capability of acoustic pressure generated through external illuminated light patterns by using the opto-piezoelectric platform are detailed as well.", "fno": "07119447", "keywords": [ "Lighting", "Acoustic Beams", "Laser Beams", "Frequency Measurement", "Electric Fields", "Measurement By Laser Beam", "Electrodes", "Sound Field", "Piezoelectric Material", "Photoconductive Material", "Electrical Impedance", "Flat Loudspeaker", "Frequency Response", "Directivity", "Beam Pattern" ], "authors": [ { "affiliation": null, "fullName": "Pei Wen Wang", "givenName": "Pei Wen", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Te Chieh Chang", "givenName": "Te Chieh", "surname": "Chang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chih Kung Lee", "givenName": "Chih Kung", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "isot", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-11-01T00:00:00", "pubType": "proceedings", "pages": "320-325", "year": "2014", "issn": null, "isbn": "978-1-4673-6752-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07119446", "articleId": "12OmNCcKQmB", "__typename": "AdjacentArticleType" }, "next": { "fno": "07119448", "articleId": "12OmNqC2uZl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ssst/2005/8808/0/01460873", "title": "Active vibration control of flexible steel cantilever beam using piezoelectric actuators", "doi": null, "abstractUrl": "/proceedings-article/ssst/2005/01460873/12OmNA2cYzz", "parentPublication": { "id": "proceedings/ssst/2005/8808/0", "title": "Proceedings of the Thirty-Seventh Southeastern Symposium on System Theory (SSST05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isot/2014/6752/0/07119415", "title": "Actuated MOEMS Micro-Mirror Based on PMN PT Piezoelectric Material", "doi": null, "abstractUrl": "/proceedings-article/isot/2014/07119415/12OmNApu5AQ", "parentPublication": { "id": "proceedings/isot/2014/6752/0", "title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisa/2012/1402/0/06220962", "title": "Piezoelectric Transducer Based 3D Intraoral Scanner", "doi": null, "abstractUrl": "/proceedings-article/icisa/2012/06220962/12OmNB836JP", "parentPublication": { "id": "proceedings/icisa/2012/1402/0", "title": "2012 International Conference on Information Science and Applications (ICISA 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2009/3583/2/3583b045", "title": "Design and Fabrication of a Novel PZT Films Based Piezoelectric Micromachined Ultrasonic Transducers", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b045/12OmNBSBkil", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icat/2017/3337/0/08171644", "title": "Development of a soft PZT based tactile sensor array for force localization", "doi": null, "abstractUrl": "/proceedings-article/icat/2017/08171644/12OmNBeRtNx", "parentPublication": { "id": "proceedings/icat/2017/3337/0", "title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icca/2003/7777/0/01594998", "title": "Design of a Repetitive Control System for a Piezoelectric Actuator Based on the Inverse Hysteresis Model", "doi": null, "abstractUrl": "/proceedings-article/icca/2003/01594998/12OmNBubONY", "parentPublication": { "id": "proceedings/icca/2003/7777/0", "title": "4th International Conference on Control and Automation. Final Program and Book of Abstracts", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iseim/2005/6063/3/01496264", "title": "A research on the piezoelectric vibration actuator for mobile phone", "doi": null, "abstractUrl": "/proceedings-article/iseim/2005/01496264/12OmNrJAeap", "parentPublication": { "id": "proceedings/iseim/2005/6063/3", "title": "International Symposium on Electrical Insulating Materials", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581772", "title": "Independent parallel scanning force microscopy using Pb(Zr,Ti)O/sub 3/ microcantilever array", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581772/12OmNwlqhKl", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06456198", "title": "Modeling for Piezoelectric Stacks in Series and Parallel", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06456198/12OmNx5GU93", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ectc/2017/6315/0/07999928", "title": "Multi Beam Full Cut Dicing of Thin Si IC Wafers", "doi": null, "abstractUrl": "/proceedings-article/ectc/2017/07999928/12OmNxFsmCC", "parentPublication": { "id": "proceedings/ectc/2017/6315/0", "title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1h0F9OLLALm", "title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "acronym": "icmcce", "groupId": "1824464", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1h0FgohMNG0", "doi": "10.1109/ICMCCE48743.2019.00112", "title": "Design of a Stroboscopic Laser Grating Stripe Projection Device", "normalizedTitle": "Design of a Stroboscopic Laser Grating Stripe Projection Device", "abstract": "A three-dimensional measuring system based on a line-structured laser and a high-speed rotating multi-faced prism has been designed and described in detail. The grating fringe can be produced using stroboscopic effect in the system. The relationship between grating stripe and modulation signal and prism rotation speed are analyzed from both theoretical and experimental aspects. At the same time, the grating stripe projection device is applied to the three-dimensional measurement of gypsum section.", "abstracts": [ { "abstractType": "Regular", "content": "A three-dimensional measuring system based on a line-structured laser and a high-speed rotating multi-faced prism has been designed and described in detail. The grating fringe can be produced using stroboscopic effect in the system. The relationship between grating stripe and modulation signal and prism rotation speed are analyzed from both theoretical and experimental aspects. At the same time, the grating stripe projection device is applied to the three-dimensional measurement of gypsum section.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A three-dimensional measuring system based on a line-structured laser and a high-speed rotating multi-faced prism has been designed and described in detail. The grating fringe can be produced using stroboscopic effect in the system. The relationship between grating stripe and modulation signal and prism rotation speed are analyzed from both theoretical and experimental aspects. At the same time, the grating stripe projection device is applied to the three-dimensional measurement of gypsum section.", "fno": "468900a470", "keywords": [ "Diffraction Gratings", "Measurement By Laser Beam", "Optical Design Techniques", "Optical Prisms", "Optical Projectors", "Optical Rotation", "Stroboscopes", "Stroboscopic Laser Grating Stripe Projection Device", "Three Dimensional Measuring System", "Line Structured Laser", "High Speed Rotating Multifaced Prism", "Grating Fringe", "Stroboscopic Effect", "Modulation Signal", "Prism Rotation Speed", "Three Dimensional Measurement", "Gypsum Section", "Surface Emitting Lasers", "Gratings", "Measurement By Laser Beam", "Modulation", "Laser Modes", "Laser Feedback", "Laser", "Stroboscopic Effect", "Grating Fringe", "Three Dimensional Reconstruction" ], "authors": [ { "affiliation": "Tianjin University of Science & Technology", "fullName": "Guowei Yang", "givenName": "Guowei", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tianjin University of Science & Technology", "fullName": "Peijie Shi", "givenName": "Peijie", "surname": "Shi", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmcce", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "470-4704", "year": "2019", "issn": null, "isbn": "978-1-7281-4689-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "468900a465", "articleId": "1h0Fkc72HJe", "__typename": "AdjacentArticleType" }, "next": { "fno": "468900a475", "articleId": "1h0FigddQfC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dsd/2017/2146/0/2146a252", "title": "Role of Laser-Induced IR Drops in the Occurrence of Faults: Assessment and Simulation", "doi": null, "abstractUrl": "/proceedings-article/dsd/2017/2146a252/12OmNAObbHn", "parentPublication": { "id": "proceedings/dsd/2017/2146/0", "title": "2017 Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671770", "title": "Interactive syntactic modeling with a single-point laser range finder and camera", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671770/12OmNBhHtgO", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2016/3071/0/3071a850", "title": "A Laser-Based Vision System for Tire Tread Depth Inspection", "doi": null, "abstractUrl": "/proceedings-article/is3c/2016/3071a850/12OmNC8MsAk", "parentPublication": { "id": "proceedings/is3c/2016/3071/0", "title": "2016 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itc/2014/4722/0/07035329", "title": "IC laser trimming speed-up through wafer-level spatial correlation modeling", "doi": null, "abstractUrl": "/proceedings-article/itc/2014/07035329/12OmNwAt1Fq", "parentPublication": { "id": "proceedings/itc/2014/4722/0", "title": "2014 IEEE International Test Conference (ITC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671799", "title": "In-situ interactive modeling using a single-point laser rangefinder coupled with a new hybrid orientation tracker", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671799/12OmNz61dzi", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2017/4868/0/07832198", "title": "The Modern Measurement Technology for the Jump Error in the Specific Position and Aperture of Tiny Revolving Part", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2017/07832198/12OmNzVoBUt", "parentPublication": { "id": "proceedings/icmtma/2017/4868/0", "title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007248", "title": "Simultaneous Projection and Positioning of Laser Projector Pixels", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007248/13rRUxASupD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2022/9978/0/997800a006", "title": "A Transmission Grating-based Polarization Demodulated Grating Interferometric Sensor", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2022/997800a006/1ByeBzTTXmE", "parentPublication": { "id": "proceedings/icmtma/2022/9978/0", "title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/qe/2022/01/09964063", "title": "The “Squeeze Laser”", "doi": null, "abstractUrl": "/journal/qe/2022/01/09964063/1IAFLDGVVVm", "parentPublication": { "id": "trans/qe", "title": "IEEE Transactions on Quantum Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsmt/2020/8668/0/866800a009", "title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG", "parentPublication": { "id": "proceedings/iccsmt/2020/8668/0", "title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1t2n7SlHRvO", "title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "acronym": "icmtma", "groupId": "1002837", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1t2n9aXMNPO", "doi": "10.1109/ICMTMA52658.2021.00145", "title": "Study on spherical aberration in the laser optical system", "normalizedTitle": "Study on spherical aberration in the laser optical system", "abstract": "The effect of spherical aberration on the radius of image beam waist in the design of laser optical system is discussed in detail, and the relationship between the spherical aberration and the image beam waist radius in the laser optical system is deduced. Radius variation of image beam waist due to spherical aberration which varies with the position of object beam waist and the optical system focus is analyzed by the form of curve. The analysis results indicate that the effect of spherical aberration on the radius of image beam waist should be considered according to the position of object beam waist in the design of a laser optical system. Finally, a design example of the optical system used for atmospheric laser communication system is given to illustrate the effect of spherical aberration on the image beam waist.", "abstracts": [ { "abstractType": "Regular", "content": "The effect of spherical aberration on the radius of image beam waist in the design of laser optical system is discussed in detail, and the relationship between the spherical aberration and the image beam waist radius in the laser optical system is deduced. Radius variation of image beam waist due to spherical aberration which varies with the position of object beam waist and the optical system focus is analyzed by the form of curve. The analysis results indicate that the effect of spherical aberration on the radius of image beam waist should be considered according to the position of object beam waist in the design of a laser optical system. Finally, a design example of the optical system used for atmospheric laser communication system is given to illustrate the effect of spherical aberration on the image beam waist.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The effect of spherical aberration on the radius of image beam waist in the design of laser optical system is discussed in detail, and the relationship between the spherical aberration and the image beam waist radius in the laser optical system is deduced. Radius variation of image beam waist due to spherical aberration which varies with the position of object beam waist and the optical system focus is analyzed by the form of curve. The analysis results indicate that the effect of spherical aberration on the radius of image beam waist should be considered according to the position of object beam waist in the design of a laser optical system. Finally, a design example of the optical system used for atmospheric laser communication system is given to illustrate the effect of spherical aberration on the image beam waist.", "fno": "389200a628", "keywords": [ "Aberrations", "Laser Beams", "Optical Design Techniques", "Optical Fibre Communication", "Optical Focusing", "Spherical Aberration", "Laser Optical System", "Image Beam Waist Radius", "Object Beam Waist", "Optical System Focus", "Atmospheric Laser Communication System", "Radius Variation", "Integrated Optics", "Mechatronics", "Optical Design", "Measurement By Laser Beam", "Optical Variables Measurement", "Optical Imaging", "Geometrical Optics", "Laser Optical System Design", "Spherical Aberration", "Wave Aberration", "Beam Waist" ], "authors": [ { "affiliation": "Army Engineering University of PLA,Ordnance N.C.O Academy,Wuhan,China,430075", "fullName": "Heng Jiang", "givenName": "Heng", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Army Engineering University of PLA,Ordnance N.C.O Academy,Wuhan,China,430075", "fullName": "Chengyu Li", "givenName": "Chengyu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Army Engineering University of PLA,Ordnance N.C.O Academy,Wuhan,China,430075", "fullName": "Xiangjin Wang", "givenName": "Xiangjin", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmtma", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "628-631", "year": "2021", "issn": null, "isbn": "978-1-6654-3892-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "389200a624", "articleId": "1t2nlJGAz3q", "__typename": "AdjacentArticleType" }, "next": { "fno": "389200a632", "articleId": "1t2nmIZ5RBe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/eqec/2005/8973/0/01567237", "title": "Cold atoms: a sample with adjustable nonlinearities", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567237/12OmNvjyxxS", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/2/4077c391", "title": "Geometric Error Analysis for Spherical Mounted Retroreflector in Laser Tracker", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077c391/12OmNwNwzH2", "parentPublication": { "id": "proceedings/icicta/2010/4077/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031a016", "title": "A Closed-Loop Adaptive Optical System Based on Genetic Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031a016/12OmNyuPLiy", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/4/252140821", "title": "\"Firefly capturing method\": Motion capturing by monocular camera with large spherical aberration of lens and Hough-transform-based image processing", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252140821/12OmNyv7mfL", "parentPublication": { "id": "proceedings/icpr/2006/2521/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d268", "title": "Revisiting Cross-Channel Information Transfer for Chromatic Aberration Correction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d268/12OmNywxlHl", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/1/4252a659", "title": "Investigation of Aberration for Optical System in Integral Stereolithography System", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252a659/12OmNzDNtop", "parentPublication": { "id": "proceedings/icoip/2010/4252/2", "title": "Optoelectronics and Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682378", "title": "Study on the Interaction of Optical Field and Transverse Acoustic Mode in Silicon Optical Fibers", "doi": null, "abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682378/12OmNzwZ6tL", "parentPublication": { "id": "proceedings/greencom-ithingscpscom/2013/5046/0", "title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200c593", "title": "Universal and Flexible Optical Aberration Correction Using Deep-Prior Based Deconvolution", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200c593/1BmFZCckWZO", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitbs/2020/6698/0/669800a372", "title": "Study on Mitigation Performance of Hyperboloid Spherical Seismic Isolation Bearing for Long-Span High-Speed Railway Continuous Girder Bridge", "doi": null, "abstractUrl": "/proceedings-article/icitbs/2020/669800a372/1kuHIFyfjnq", "parentPublication": { "id": "proceedings/icitbs/2020/6698/0", "title": "2020 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2021/3892/0/389200a026", "title": "Design and Implementation of Optical Axis Parallelism Detection System for Laser Ranging Direct-sight Mirror", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2021/389200a026/1t2nkN0adMs", "parentPublication": { "id": "proceedings/icmtma/2021/3892/0", "title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1u8pv7UOCti", "title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)", "acronym": "iccsmt", "groupId": "1840604", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1u8pDO7YPzG", "doi": "10.1109/ICCSMT51754.2020.00009", "title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion", "normalizedTitle": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion", "abstract": "Positioning technology is the basis for realizing fully autonomous navigation of mobile robots. Particle filtering is used as a non-parametric Bayesian filter to solve this problem. However, due to particle degradation, random sampling error, large amount of calculation, etc., particle filter positioning always has accuracy and efficiency problems. In order to improve the efficiency and accuracy of the global initial positioning of the laser, this paper proposes a global positioning algorithm fusion laser and vision. The algorithm integrates particle filtering, laser beam model, and visual observation model to enable the robot to complete faster and more accurate global positioning with a small amount of prior and motion information. The experimental results on the MIT-STATA dataset show that compared with the particle filter algorithm, this method can improve the accuracy and efficiency of the robot's global positioning.", "abstracts": [ { "abstractType": "Regular", "content": "Positioning technology is the basis for realizing fully autonomous navigation of mobile robots. Particle filtering is used as a non-parametric Bayesian filter to solve this problem. However, due to particle degradation, random sampling error, large amount of calculation, etc., particle filter positioning always has accuracy and efficiency problems. In order to improve the efficiency and accuracy of the global initial positioning of the laser, this paper proposes a global positioning algorithm fusion laser and vision. The algorithm integrates particle filtering, laser beam model, and visual observation model to enable the robot to complete faster and more accurate global positioning with a small amount of prior and motion information. The experimental results on the MIT-STATA dataset show that compared with the particle filter algorithm, this method can improve the accuracy and efficiency of the robot's global positioning.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Positioning technology is the basis for realizing fully autonomous navigation of mobile robots. Particle filtering is used as a non-parametric Bayesian filter to solve this problem. However, due to particle degradation, random sampling error, large amount of calculation, etc., particle filter positioning always has accuracy and efficiency problems. In order to improve the efficiency and accuracy of the global initial positioning of the laser, this paper proposes a global positioning algorithm fusion laser and vision. The algorithm integrates particle filtering, laser beam model, and visual observation model to enable the robot to complete faster and more accurate global positioning with a small amount of prior and motion information. The experimental results on the MIT-STATA dataset show that compared with the particle filter algorithm, this method can improve the accuracy and efficiency of the robot's global positioning.", "fno": "866800a009", "keywords": [ "Bayes Methods", "Global Positioning System", "Mobile Robots", "Object Detection", "Particle Filtering Numerical Methods", "Robot Vision", "Sensor Fusion", "SLAM Robots", "SLAM Global Positioning Algorithm", "Vision Fusion", "Positioning Technology", "Fully Autonomous Navigation", "Mobile Robots", "Particle Filtering", "Nonparametric Bayesian Filter", "Particle Degradation", "Random Sampling Error", "Particle Filter Positioning", "Efficiency Problems", "Global Initial Positioning", "Fusion Laser", "Laser Beam Model", "Visual Observation Model", "Accurate Global Positioning", "Particle Filter Algorithm", "Laser Theory", "Visualization", "Particle Beams", "Filtering", "Measurement By Laser Beam", "Filtering Algorithms", "Laser Modes", "Particle Filtering", "Multi Sensor Fusion", "SLAM", "Global Positioning" ], "authors": [ { "affiliation": "Chongqing University of Posts and Telecommunications,School of Advanced Manufacturing Engineering,Chongqing,China,400065", "fullName": "Song Feng", "givenName": "Song", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "Chongqing University of Posts and Telecommunications,National Information Accessibility Research Center,Chongqing,China,400065", "fullName": "Denggui Ren", "givenName": "Denggui", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": "Chongqing University of Posts and Telecommunications,National Information Accessibility Research Center,Chongqing,China,400065", "fullName": "Yi Zhang", "givenName": "Yi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Chongqing University of Posts and Telecommunications,National Information Accessibility Research Center,Chongqing,China,400065", "fullName": "Chao Huang", "givenName": "Chao", "surname": "Huang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccsmt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "9-15", "year": "2020", "issn": null, "isbn": "978-1-7281-8668-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "866800a005", "articleId": "1u8pwbuIRDq", "__typename": "AdjacentArticleType" }, "next": { "fno": "866800a016", "articleId": "1u8pxAUk87e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dsd/2017/2146/0/2146a252", "title": "Role of Laser-Induced IR Drops in the Occurrence of Faults: Assessment and Simulation", "doi": null, "abstractUrl": "/proceedings-article/dsd/2017/2146a252/12OmNAObbHn", "parentPublication": { "id": "proceedings/dsd/2017/2146/0", "title": "2017 Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2018/4652/0/465201a232", "title": "Environment-Dependent Depth Enhancement with Multi-modal Sensor Fusion Learning", "doi": null, "abstractUrl": "/proceedings-article/irc/2018/465201a232/12OmNB6UIdn", "parentPublication": { "id": "proceedings/irc/2018/4652/0", "title": "2018 Second IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/is3c/2016/3071/0/3071a850", "title": "A Laser-Based Vision System for Tire Tread Depth Inspection", "doi": null, "abstractUrl": "/proceedings-article/is3c/2016/3071a850/12OmNC8MsAk", "parentPublication": { "id": "proceedings/is3c/2016/3071/0", "title": "2016 International Symposium on Computer, Consumer and Control (IS3C)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imccc/2016/1195/0/07774873", "title": "A New Positioning Method for Indoor Laser Navigation on Under-Determined Condition", "doi": null, "abstractUrl": "/proceedings-article/imccc/2016/07774873/12OmNwoxSaK", "parentPublication": { "id": "proceedings/imccc/2016/1195/0", "title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2017/0560/0/08026218", "title": "Error distribution modeling of embedded sensors on smartphones by using laser ranger", "doi": null, "abstractUrl": "/proceedings-article/icmew/2017/08026218/12OmNx5piZE", "parentPublication": { "id": "proceedings/icmew/2017/0560/0", "title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671799", "title": "In-situ interactive modeling using a single-point laser rangefinder coupled with a new hybrid orientation tracker", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671799/12OmNz61dzi", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007248", "title": "Simultaneous Projection and Positioning of Laser Projector Pixels", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007248/13rRUxASupD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2019/9245/0/924500a246", "title": "Particle Filter Localization Using Visual Markers Based Omnidirectional Vision and a Laser Sensor", "doi": null, "abstractUrl": "/proceedings-article/irc/2019/924500a246/18M7f37ayLm", "parentPublication": { "id": "proceedings/irc/2019/9245/0", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crc/2019/4620/0/462000a196", "title": "Remote Crack Measurement Using Android Camera with Laser-Positioning Technique", "doi": null, "abstractUrl": "/proceedings-article/crc/2019/462000a196/1iTuKuNHHws", "parentPublication": { "id": "proceedings/crc/2019/4620/0", "title": "2019 4th International Conference on Control, Robotics and Cybernetics (CRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2021/4486/0/448600a289", "title": "SLAM Method Based on Multi-Sensor Information Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2021/448600a289/1yEZmK3PgZy", "parentPublication": { "id": "proceedings/iccnea/2021/4486/0", "title": "2021 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAsTgX3", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNCxtyKC", "doi": "10.1109/CVPRW.2009.5204317", "title": "Geometric video projector auto-calibration", "normalizedTitle": "Geometric video projector auto-calibration", "abstract": "In this paper we address the problem of geometric calibration of video projectors. Like in most previous methods we also use a camera that observes the projection on a planar surface. Contrary to those previous methods, we neither require the camera to be calibrated nor the presence of a calibration grid or other metric information about the scene. We thus speak of geometric auto-calibration of projectors (GAP). The fact that camera calibration is not needed increases the usability of the method and at the same time eliminates one potential source of inaccuracy, since errors in the camera calibration would otherwise inevitably propagate through to the projector calibration. Our method enjoys a good stability and gives good results when compared against existing methods as depicted by our experiments.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we address the problem of geometric calibration of video projectors. Like in most previous methods we also use a camera that observes the projection on a planar surface. Contrary to those previous methods, we neither require the camera to be calibrated nor the presence of a calibration grid or other metric information about the scene. We thus speak of geometric auto-calibration of projectors (GAP). The fact that camera calibration is not needed increases the usability of the method and at the same time eliminates one potential source of inaccuracy, since errors in the camera calibration would otherwise inevitably propagate through to the projector calibration. Our method enjoys a good stability and gives good results when compared against existing methods as depicted by our experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we address the problem of geometric calibration of video projectors. Like in most previous methods we also use a camera that observes the projection on a planar surface. Contrary to those previous methods, we neither require the camera to be calibrated nor the presence of a calibration grid or other metric information about the scene. We thus speak of geometric auto-calibration of projectors (GAP). The fact that camera calibration is not needed increases the usability of the method and at the same time eliminates one potential source of inaccuracy, since errors in the camera calibration would otherwise inevitably propagate through to the projector calibration. Our method enjoys a good stability and gives good results when compared against existing methods as depicted by our experiments.", "fno": "05204317", "keywords": [ "Dispersion Relations", "Display Instrumentation", "Optical Projectors", "Geometric Video Projector Auto Calibration", "Planar Surface Projection", "Projection Display", "Video Projection", "Calibration", "Cameras", "Photometry", "Surface Fitting", "Layout", "Usability", "Stability", "Three Dimensional Displays", "Lighting", "Inverse Problems" ], "authors": [ { "affiliation": "DIRO, Université de Montréal, Canada", "fullName": "Jamil Drareni", "givenName": "Jamil", "surname": "Drareni", "__typename": "ArticleAuthorType" }, { "affiliation": "DIRO, Université de Montréal, Canada", "fullName": "Sebastien Roy", "givenName": "Sebastien", "surname": "Roy", "__typename": "ArticleAuthorType" }, { "affiliation": "INRIA Rhône-Alpes, France", "fullName": "Peter Sturm", "givenName": "Peter", "surname": "Sturm", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "39-46", "year": "2009", "issn": "2160-7508", "isbn": "978-1-4244-3994-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05204316", "articleId": "12OmNqH9hk4", "__typename": "AdjacentArticleType" }, "next": { "fno": "05204318", "articleId": "12OmNAS9zxR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2007/1179/0/04270464", "title": "Geometric Modeling and Calibration of Planar Multi-Projector Displays Using Rational Bezier Patches", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270464/12OmNBQkx7b", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981726", "title": "Fully automatic multi-projector calibration with an uncalibrated camera", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/1/212810014", "title": "Auto-Calibration of Multi-Projector Display Walls", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212810014/12OmNCb3fwi", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011885", "title": "Novel projector calibration approaches of multi-resolution display", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011885/12OmNCd2rEL", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2013/5053/0/06475056", "title": "Geometric calibration for a multi-camera-projector system", "doi": null, "abstractUrl": "/proceedings-article/wacv/2013/06475056/12OmNvBrgGd", "parentPublication": { "id": "proceedings/wacv/2013/5053/0", "title": "Applications of Computer Vision, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444797", "title": "Auto-calibration of cylindrical multi-projector systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444797/12OmNviHKkd", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d596", "title": "Simultaneous Geometric and Radiometric Calibration of a Projector-Camera Pair", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d596/12OmNwpGgNQ", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1101", "title": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1101/13rRUwInvJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1368", "title": "Registration Techniques for Using Imperfect and Par tially Calibrated Devices in Planar Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1368/13rRUwInvyp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy5hRda", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "acronym": "3dpvt", "groupId": "1000000", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNwI8caf", "doi": "10.1109/3DPVT.2006.125", "title": "Self-Calibration of Multiple Laser Planes for 3D Scene Reconstruction", "normalizedTitle": "Self-Calibration of Multiple Laser Planes for 3D Scene Reconstruction", "abstract": "Self-calibration is one of the most active issues concerning vision-based 3D measurements. However, in the case of the light sectioning method, there has been little research conducted on self-calibration techniques. In this paper, we study the problem of self-calibration for an active vision system which uses line lasers and a single camera. The problem can be defined as the estimation of multiple laser planes from the curves of laser reflections observed from a sequence of images captured by a single camera. The constraints of the problem can be obtained from observed intersection points between the curves. In this condition, the problem is formulated as simultaneous polynomial equations, in which the number of equations is larger than the number of variables. Approximated solutions of the equations can be computed by using Grobner bases. By refining them using nonlinear optimization, the final result can be obtained. We developed an actual 3D measurement system using the proposed method, which consists of only a laser projector with two line lasers and a single camera. Users are just required to move the projector freely so that the projected lines sweep across the surface of the scene to get the 3D shape.", "abstracts": [ { "abstractType": "Regular", "content": "Self-calibration is one of the most active issues concerning vision-based 3D measurements. However, in the case of the light sectioning method, there has been little research conducted on self-calibration techniques. In this paper, we study the problem of self-calibration for an active vision system which uses line lasers and a single camera. The problem can be defined as the estimation of multiple laser planes from the curves of laser reflections observed from a sequence of images captured by a single camera. The constraints of the problem can be obtained from observed intersection points between the curves. In this condition, the problem is formulated as simultaneous polynomial equations, in which the number of equations is larger than the number of variables. Approximated solutions of the equations can be computed by using Grobner bases. By refining them using nonlinear optimization, the final result can be obtained. We developed an actual 3D measurement system using the proposed method, which consists of only a laser projector with two line lasers and a single camera. Users are just required to move the projector freely so that the projected lines sweep across the surface of the scene to get the 3D shape.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Self-calibration is one of the most active issues concerning vision-based 3D measurements. However, in the case of the light sectioning method, there has been little research conducted on self-calibration techniques. In this paper, we study the problem of self-calibration for an active vision system which uses line lasers and a single camera. The problem can be defined as the estimation of multiple laser planes from the curves of laser reflections observed from a sequence of images captured by a single camera. The constraints of the problem can be obtained from observed intersection points between the curves. In this condition, the problem is formulated as simultaneous polynomial equations, in which the number of equations is larger than the number of variables. Approximated solutions of the equations can be computed by using Grobner bases. By refining them using nonlinear optimization, the final result can be obtained. We developed an actual 3D measurement system using the proposed method, which consists of only a laser projector with two line lasers and a single camera. Users are just required to move the projector freely so that the projected lines sweep across the surface of the scene to get the 3D shape.", "fno": "04155728", "keywords": [ "Active Vision", "Cameras", "Image Reconstruction", "Optimisation", "Polynomials", "Multiple Laser Planes", "3 D Scene Reconstruction", "Sectioning Method", "Self Calibration Techniques", "Active Vision System", "Single Camera", "Simultaneous Polynomial Equations", "Grobner Bases", "Nonlinear Optimization", "Layout", "Cameras", "Calibration", "Nonlinear Equations", "Optical Reflection", "Polynomials", "Shape", "Parameter Estimation", "Stereo Vision", "Laser Theory" ], "authors": [ { "affiliation": "Hiroshima City University, Japan", "fullName": "Ryo Furukawa", "givenName": "Ryo", "surname": "Furukawa", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University, Japan", "fullName": "Hiroshi Kawasaki", "givenName": "Hiroshi", "surname": "Kawasaki", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dpvt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-06-01T00:00:00", "pubType": "proceedings", "pages": "200-207", "year": "2006", "issn": null, "isbn": "0-7695-2825-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "282500192", "articleId": "12OmNwpXRSy", "__typename": "AdjacentArticleType" }, "next": { "fno": "282500208", "articleId": "12OmNBgQFS5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmtma/2009/3583/2/3583b479", "title": "A PSO-Based Ball-Plate Calibration for Laser Scanner", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2009/3583b479/12OmNAPBbg3", "parentPublication": { "id": "proceedings/icmtma/2009/3583/2", "title": "2009 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2009/3605/1/3605a781", "title": "The Calibration Algorithm Between 2D Laser Range Finder and Platform", "doi": null, "abstractUrl": "/proceedings-article/cso/2009/3605a781/12OmNB8kHX6", "parentPublication": { "id": "cso/2009/3605/1", "title": "2009 International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2013/3022/0/3022a668", "title": "Targetless Calibration of a Lidar - Perspective Camera Pair", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2013/3022a668/12OmNs0TKO5", "parentPublication": { "id": "proceedings/iccvw/2013/3022/0", "title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031c933", "title": "Research on Error Analysis and Calibration Method of Laser Scan Range Finder", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031c933/12OmNs59JHx", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eqec/2005/8973/0/01567293", "title": "The Q-switching instability in passively mode-locked lasers", "doi": null, "abstractUrl": "/proceedings-article/eqec/2005/01567293/12OmNvAAtv4", "parentPublication": { "id": "proceedings/eqec/2005/8973/0", "title": "2005 European Quantum Electronics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dodugc/2004/2259/0/01420869", "title": "Unsteady gas laser simulation", "doi": null, "abstractUrl": "/proceedings-article/dodugc/2004/01420869/12OmNx57HIJ", "parentPublication": { "id": "proceedings/dodugc/2004/2259/0", "title": "Proceedings. Users Group Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2011/4296/1/4296a666", "title": "Calibration Method of Three Dimensional (3D) Laser Measurement System Based on Projective Transformation", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2011/4296a666/12OmNyaoDy2", "parentPublication": { "id": "proceedings/icmtma/2011/4296/1", "title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/11/ttp2012112097", "title": "A Minimal Solution for the Extrinsic Calibration of a Camera and a Laser-Rangefinder", "doi": null, "abstractUrl": "/journal/tp/2012/11/ttp2012112097/13rRUy0HYL1", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a168", "title": "Estimating Relative Pose between Nonoverlapping Cameras by Four Laser Pointers Based on General Camera Model", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a168/17D45WgziSl", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxH9X7L", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNz6iOqk", "doi": "10.1109/CRV.2014.12", "title": "Towards Full Omnidirectional Depth Sensing Using Active Vision for Small Unmanned Aerial Vehicles", "normalizedTitle": "Towards Full Omnidirectional Depth Sensing Using Active Vision for Small Unmanned Aerial Vehicles", "abstract": "Collision avoidance for small unmanned aerial vehicles operating in a variety of environments is limited by the types of available depth sensors. Currently, there are no sensors that are lightweight, function outdoors in sunlight, and cover enough of a field of view to be useful in complex environments, although many sensors excel in one or two of these areas. We present a new depth estimation method, based on concepts from multi-view stereo and structured light methods, that uses only lightweight miniature cameras and a small laser dot matrix projector to produce measurements in the range of 1-12 meters. The field of view of the system is limited only by the number and type of cameras/projectors used, and can be fully Omni directional if desired. The sensitivity of the system to design and calibration parameters is tested in simulation, and results from a functional prototype are presented.", "abstracts": [ { "abstractType": "Regular", "content": "Collision avoidance for small unmanned aerial vehicles operating in a variety of environments is limited by the types of available depth sensors. Currently, there are no sensors that are lightweight, function outdoors in sunlight, and cover enough of a field of view to be useful in complex environments, although many sensors excel in one or two of these areas. We present a new depth estimation method, based on concepts from multi-view stereo and structured light methods, that uses only lightweight miniature cameras and a small laser dot matrix projector to produce measurements in the range of 1-12 meters. The field of view of the system is limited only by the number and type of cameras/projectors used, and can be fully Omni directional if desired. The sensitivity of the system to design and calibration parameters is tested in simulation, and results from a functional prototype are presented.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Collision avoidance for small unmanned aerial vehicles operating in a variety of environments is limited by the types of available depth sensors. Currently, there are no sensors that are lightweight, function outdoors in sunlight, and cover enough of a field of view to be useful in complex environments, although many sensors excel in one or two of these areas. We present a new depth estimation method, based on concepts from multi-view stereo and structured light methods, that uses only lightweight miniature cameras and a small laser dot matrix projector to produce measurements in the range of 1-12 meters. The field of view of the system is limited only by the number and type of cameras/projectors used, and can be fully Omni directional if desired. The sensitivity of the system to design and calibration parameters is tested in simulation, and results from a functional prototype are presented.", "fno": "4337a024", "keywords": [ "Cameras", "Calibration", "Noise", "Lasers", "Robot Sensing Systems", "Estimation", "Structured Light", "Active Vision", "Depth Sensing", "Unmanned Aerial Vehicle", "Omnidirectional" ], "authors": [ { "affiliation": null, "fullName": "Adam Harmat", "givenName": "Adam", "surname": "Harmat", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Inna Sharf", "givenName": "Inna", "surname": "Sharf", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-05-01T00:00:00", "pubType": "proceedings", "pages": "24-31", "year": "2014", "issn": null, "isbn": "978-1-4799-4337-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4337a016", "articleId": "12OmNCcbEiR", "__typename": "AdjacentArticleType" }, "next": { "fno": "4337a032", "articleId": "12OmNBU1jKk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2014/4337/0/4337a364", "title": "Trinocular Spherical Stereo Vision for Indoor Surveillance", "doi": null, "abstractUrl": "/proceedings-article/crv/2014/4337a364/12OmNAGNCaY", "parentPublication": { "id": "proceedings/crv/2014/4337/0", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2011/707/0/05753121", "title": "High frame rate for 3D Time-of-Flight cameras by dynamic sensor calibration", "doi": null, "abstractUrl": "/proceedings-article/iccp/2011/05753121/12OmNAXPxZW", "parentPublication": { "id": "proceedings/iccp/2011/707/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2015/9795/0/9795a470", "title": "3D-sensing Distributed Embedded System for People Tracking and Counting", "doi": null, "abstractUrl": "/proceedings-article/csci/2015/9795a470/12OmNAkniWG", "parentPublication": { "id": "proceedings/csci/2015/9795/0", "title": "2015 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipsn/2017/4890/0/07944809", "title": "Demo Abstract: Panoptes: A Cheap, Extensible, Open-Source Multi-camera Tracking System", "doi": null, "abstractUrl": "/proceedings-article/ipsn/2017/07944809/12OmNArKShm", "parentPublication": { "id": "proceedings/ipsn/2017/4890/0", "title": "2017 16th ACM/IEEE International Conference on Information Processing in Sensor Networks (IPSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ewdts/2013/2096/0/06673163", "title": "A WSN approach to unmanned aerial surveillance of traffic anomalies: Some challenges and potential solutions", "doi": null, "abstractUrl": "/proceedings-article/ewdts/2013/06673163/12OmNrIrPg9", "parentPublication": { "id": "proceedings/ewdts/2013/2096/0", "title": "2013 11th East-West Design and Test Symposium (EWDTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a039", "title": "Simultaneous Scene Reconstruction and Auto-Calibration Using Constrained Iterative Closest Point for 3D Depth Sensor Array", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a039/12OmNvDqsSA", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/omnivis/2002/1629/0/16290121", "title": "Omnidirectional Sensing for Human Interaction", "doi": null, "abstractUrl": "/proceedings-article/omnivis/2002/16290121/12OmNxwWowY", "parentPublication": { "id": "proceedings/omnivis/2002/1629/0", "title": "Omnidirectional Vision, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457546", "title": "Direct approach to the self-calibration of omnidirectional cameras", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457546/12OmNynJMXH", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssiai/2014/4053/0/06806039", "title": "Depth mapping using a low-cost camera array", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2014/06806039/12OmNzBwGAh", "parentPublication": { "id": "proceedings/ssiai/2014/4053/0", "title": "2014 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2023/02/09736583", "title": "PPCA - Privacy-Preserving Collision Avoidance for Autonomous Unmanned Aerial Vehicles", "doi": null, "abstractUrl": "/journal/tq/2023/02/09736583/1BN1Y4DftPW", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNASrawz", "title": "2009 IEEE Virtual Reality Conference", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNzV70vz", "doi": "10.1109/VR.2009.4810996", "title": "A Distributed Cooperative Framework for Continuous Multi-Projector Pose Estimation", "normalizedTitle": "A Distributed Cooperative Framework for Continuous Multi-Projector Pose Estimation", "abstract": "We present a novel calibration framework for multi-projector displays that achieves continuous geometric calibration by estimating and refining the poses of all projectors in an ongoing fashion during actual display use. Our framework provides scalability by operating as a distributed system of \"intelligent\" projector units: projectors augmented with rigidly-mounted cameras, and paired with dedicated computers. Each unit interacts asynchronously with its peers, leveraging their combined computational power to cooperatively estimate the poses of all of the projectors. In cases where the projection surface is static, our system is able to continuously refine all of the projector poses, even when they change simultaneously.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel calibration framework for multi-projector displays that achieves continuous geometric calibration by estimating and refining the poses of all projectors in an ongoing fashion during actual display use. Our framework provides scalability by operating as a distributed system of \"intelligent\" projector units: projectors augmented with rigidly-mounted cameras, and paired with dedicated computers. Each unit interacts asynchronously with its peers, leveraging their combined computational power to cooperatively estimate the poses of all of the projectors. In cases where the projection surface is static, our system is able to continuously refine all of the projector poses, even when they change simultaneously.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel calibration framework for multi-projector displays that achieves continuous geometric calibration by estimating and refining the poses of all projectors in an ongoing fashion during actual display use. Our framework provides scalability by operating as a distributed system of \"intelligent\" projector units: projectors augmented with rigidly-mounted cameras, and paired with dedicated computers. Each unit interacts asynchronously with its peers, leveraging their combined computational power to cooperatively estimate the poses of all of the projectors. In cases where the projection surface is static, our system is able to continuously refine all of the projector poses, even when they change simultaneously.", "fno": "04810996", "keywords": [ "Calibration", "Computer Graphics", "Computer Vision", "Image Sensors", "Pose Estimation", "Distributed Cooperative Framework", "Continuous Multi Projector Pose Estimation", "Multi Projector Displays", "Continuous Geometric Calibration", "Calibration", "Computer Displays", "Shape", "Computer Graphics", "Virtual Reality", "Smart Cameras", "Distributed Computing", "Robustness", "Optical Filters", "Image Motion Analysis", "Projector Displays", "Continuous Calibration", "I 3 3 Computer Graphics Picture Image Generation Display Algorithms", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality" ], "authors": [ { "affiliation": "University of North Carolina at Chapel Hill tmjohns@cs.unc.edu", "fullName": "Tyler Johnson", "givenName": "Tyler", "surname": "Johnson", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill welch@cs.unc.edu", "fullName": "Greg Welch", "givenName": "Greg", "surname": "Welch", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill fuchs@cs.unc.edu", "fullName": "Henry Fuchs", "givenName": "Henry", "surname": "Fuchs", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill elaforc@cs.unc.edu", "fullName": "Eric la Force", "givenName": "Eric", "surname": "la Force", "__typename": "ArticleAuthorType" }, { "affiliation": "University of North Carolina at Chapel Hill herman@cs.unc.edu", "fullName": "Herman Towles", "givenName": "Herman", "surname": "Towles", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-03-01T00:00:00", "pubType": "proceedings", "pages": "35-42", "year": "2009", "issn": "1087-8270", "isbn": "978-1-4244-3943-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04810995", "articleId": "1t2n7unazjW", "__typename": "AdjacentArticleType" }, "next": { "fno": "04810997", "articleId": "1t2n7JaRIQw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2016/3641/0/3641a063", "title": "Practical and Precise Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a063/12OmNB7cjhR", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2001/1272/2/127220504", "title": "A Self-Correcting Projector", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127220504/12OmNB8Cj43", "parentPublication": { "id": "proceedings/cvpr/2001/1272/2", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981726", "title": "Fully automatic multi-projector calibration with an uncalibrated camera", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981726/12OmNBSBk4F", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2009/3994/0/05204317", "title": "Geometric video projector auto-calibration", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2009/05204317/12OmNCxtyKC", "parentPublication": { "id": "proceedings/cvprw/2009/3994/0", "title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2012/4725/0/4725a026", "title": "FastFusion: A Scalable Multi-projector System", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a026/12OmNwLOYQU", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a464", "title": "Simple, Accurate, and Robust Projector-Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a464/12OmNx0RIZY", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1101", "title": "Asynchronous Distributed Calibration for Scalable and Reconfigurable Multi-Projector Displays", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1101/13rRUwInvJ9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2005/12/i1845", "title": "Autocalibration of a Projector-Camera System", "doi": null, "abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2018/6481/0/648101a190", "title": "Multi-projector Resolution Enhancement Through Biased Interpolation", "doi": null, "abstractUrl": "/proceedings-article/crv/2018/648101a190/17D45XacGiu", "parentPublication": { "id": "proceedings/crv/2018/6481/0", "title": "2018 15th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523844", "title": "Directionally Decomposing Structured Light for Projector Calibration", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523844/1wpqmnzDSzm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNynsbCZ", "title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNzXnNDt", "doi": "10.1109/ISMAR.2007.4538820", "title": "Laser Pointer Tracking in Projector-Augmented Architectural Environments", "normalizedTitle": "Laser Pointer Tracking in Projector-Augmented Architectural Environments", "abstract": "We present a system that employs a custom-built pan-tilt-zoom camera for laser pointer tracking in arbitrary real environments. Once placed in a room, it carries out a fully automatic self-registration, registrations of projectors, and sampling of surface parameters, such as geometry and reflectivity. After these steps, it can be used for tracking a laser spot on the surface as well as an LED marker in 3D space, using inter-playing fish-eye context and controllable detail cameras. The captured surface information can be used for masking out areas that are problematic for laser pointer tracking, and for guiding geometric and radiometric image correction techniques that enable a projector-based augmentation on arbitrary surfaces. We describe a distributed software framework that couples laser pointer tracking for interaction, projector-based AR as well as video see-through AR for visualizations, with the domain specific functionality of existing desktop tools for architectural planning, simulation and building surveying.", "abstracts": [ { "abstractType": "Regular", "content": "We present a system that employs a custom-built pan-tilt-zoom camera for laser pointer tracking in arbitrary real environments. Once placed in a room, it carries out a fully automatic self-registration, registrations of projectors, and sampling of surface parameters, such as geometry and reflectivity. After these steps, it can be used for tracking a laser spot on the surface as well as an LED marker in 3D space, using inter-playing fish-eye context and controllable detail cameras. The captured surface information can be used for masking out areas that are problematic for laser pointer tracking, and for guiding geometric and radiometric image correction techniques that enable a projector-based augmentation on arbitrary surfaces. We describe a distributed software framework that couples laser pointer tracking for interaction, projector-based AR as well as video see-through AR for visualizations, with the domain specific functionality of existing desktop tools for architectural planning, simulation and building surveying.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a system that employs a custom-built pan-tilt-zoom camera for laser pointer tracking in arbitrary real environments. Once placed in a room, it carries out a fully automatic self-registration, registrations of projectors, and sampling of surface parameters, such as geometry and reflectivity. After these steps, it can be used for tracking a laser spot on the surface as well as an LED marker in 3D space, using inter-playing fish-eye context and controllable detail cameras. The captured surface information can be used for masking out areas that are problematic for laser pointer tracking, and for guiding geometric and radiometric image correction techniques that enable a projector-based augmentation on arbitrary surfaces. We describe a distributed software framework that couples laser pointer tracking for interaction, projector-based AR as well as video see-through AR for visualizations, with the domain specific functionality of existing desktop tools for architectural planning, simulation and building surveying.", "fno": "04538820", "keywords": [ "Augmented Reality", "Computer Vision", "Data Visualisation", "Image Sensors", "Optical Projectors", "Laser Pointer Tracking", "Projector Augmented Architectural Environments", "Pan Tilt Zoom Camera", "Fully Automatic Self Registration", "Radiometric Image Correction Techniques", "Distributed Software Framework", "Architectural Planning", "Building Surveying", "Surface Emitting Lasers", "Cameras", "Sampling Methods", "Geometrical Optics", "Reflectivity", "Light Emitting Diodes", "Automatic Control", "Optical Control", "Radiometry", "Software Tools", "I 3 3 Computer Graphics Picture Image Generation Digitizing And Scanning", "I 4 8 Image Processing And Computer Vision Scene Analysis Tracking", "J 5 Computer Applications Arts And Humanities Architecture" ], "authors": [ { "affiliation": "Bauhaus-University Weimar. email: Daniel.Kurz@medien.uni-weimar.de", "fullName": "Daniel Kurz", "givenName": "Daniel", "surname": "Kurz", "__typename": "ArticleAuthorType" }, { "affiliation": "Bauhaus-University Weimar. email: Ferry.Haentsch@medien.uni-weimar.de", "fullName": "Ferry Hantsch", "givenName": "Ferry", "surname": "Hantsch", "__typename": "ArticleAuthorType" }, { "affiliation": "Bauhaus-University Weimar. email: Max.Grosse@medien.uni-weimar.de", "fullName": "Max Grosse", "givenName": "Max", "surname": "Grosse", "__typename": "ArticleAuthorType" }, { "affiliation": "Bauhaus-University Weimar. email: Alexander.Schiewe@medien.uni-weimar.de", "fullName": "Alexander Schiewe", "givenName": "Alexander", "surname": "Schiewe", "__typename": "ArticleAuthorType" }, { "affiliation": "Bauhaus-University Weimar. email: Oliver.Bimber@medien.uni-weimar.de", "fullName": "Oliver Bimber", "givenName": "Oliver", "surname": "Bimber", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-11-01T00:00:00", "pubType": "proceedings", "pages": "19-26", "year": "2007", "issn": null, "isbn": "978-1-4244-1749-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": null, "next": { "fno": "04538822", "articleId": "12OmNAoUTsH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/achi/2008/3086/0/3086a039", "title": "NALP: Navigating Assistant for Large Display Presentation Using Laser Pointer", "doi": null, "abstractUrl": "/proceedings-article/achi/2008/3086a039/12OmNBQ2VYi", "parentPublication": { "id": "proceedings/achi/2008/3086/0", "title": "International Conference on Advances in Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdew/2005/2657/0/22851261", "title": "Unified Presentation Contents Retrieval Using Laser Pointer Information", "doi": null, "abstractUrl": "/proceedings-article/icdew/2005/22851261/12OmNrFkeQw", "parentPublication": { "id": "proceedings/icdew/2005/2657/0", "title": "21st International Conference on Data Engineering Workshops (ICDEW'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2015/7079/0/07169844", "title": "Laser-pointer human computer interaction system", "doi": null, "abstractUrl": "/proceedings-article/icmew/2015/07169844/12OmNvkGW7B", "parentPublication": { "id": "proceedings/icmew/2015/7079/0", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/2003/1991/0/19910491", "title": "Interactive Shape Acquisition using Marker Attached Laser Projector", "doi": null, "abstractUrl": "/proceedings-article/3dim/2003/19910491/12OmNvq5jyo", "parentPublication": { "id": "proceedings/3dim/2003/1991/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2006/2746/0/274600927", "title": "Treatment of Laser Pointer and Speech Information in Lecture Scene Retrieval", "doi": null, "abstractUrl": "/proceedings-article/ism/2006/274600927/12OmNwE9ODx", "parentPublication": { "id": "proceedings/ism/2006/2746/0", "title": "Eighth IEEE International Symposium on Multimedia (ISM'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/114310247", "title": "Smarter Presentations: Exploiting Homography in Camera-Projector Systems", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114310247/12OmNwHyZXS", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ems/2009/3886/0/3886a316", "title": "Comparison Between Single and Dual Laser Pointer Applications in Digital Scanning System", "doi": null, "abstractUrl": "/proceedings-article/ems/2009/3886a316/12OmNzXFoAF", "parentPublication": { "id": "proceedings/ems/2009/3886/0", "title": "Computer Modeling and Simulation, UKSIM European Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315067", "title": "Making one object look like another: controlling appearance using a projector-camera system", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315067/12OmNzcPAjA", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2003/2034/0/20340053", "title": "WACL: Supporting Telecommunications Using Wearable Active Camera with Laser Pointer", "doi": null, "abstractUrl": "/proceedings-article/iswc/2003/20340053/12OmNzdGnsc", "parentPublication": { "id": "proceedings/iswc/2003/2034/0", "title": "Seventh IEEE International Symposium on Wearable Computers, 2003. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007248", "title": "Simultaneous Projection and Positioning of Laser Projector Pixels", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007248/13rRUxASupD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNCdk2JE", "doi": "10.1109/VR.2015.7223330", "title": "Robust high-speed tracking against illumination changes for dynamic projection mapping", "normalizedTitle": "Robust high-speed tracking against illumination changes for dynamic projection mapping", "abstract": "Dynamic Projection Mapping, projection-based AR for a moving object without misalignment by a high-speed optical axis controller by rotational mirrors, has a trade-off between stability of highspeed tracking and high visibility for a variety of projection content. In this paper, a system that will provide robust high-speed tracking without any markers on objects against illumination changes, including projected images, is realized by introducing a retroreflective background with the optical axis controller for Dynamic Projection Mapping. Low-intensity episcopic light is projected with Projection Mapping content, and the light reflected from the background is sufficient for high-speed cameras but is nearly invisible to observers. In addition, we introduce adaptive windows and peripheral weighted erosion to maintain accurate high-speed tracking. Under low light conditions, we examined the visual performance of diffuse reflection and retroreflection from both camera and observer viewpoints. We evaluated stability relative to illumination and disturbance caused by non-target objects. Dynamic Projection Mapping with partially well-lit content in a low-intensity light environment is realized by our proposed system.", "abstracts": [ { "abstractType": "Regular", "content": "Dynamic Projection Mapping, projection-based AR for a moving object without misalignment by a high-speed optical axis controller by rotational mirrors, has a trade-off between stability of highspeed tracking and high visibility for a variety of projection content. In this paper, a system that will provide robust high-speed tracking without any markers on objects against illumination changes, including projected images, is realized by introducing a retroreflective background with the optical axis controller for Dynamic Projection Mapping. Low-intensity episcopic light is projected with Projection Mapping content, and the light reflected from the background is sufficient for high-speed cameras but is nearly invisible to observers. In addition, we introduce adaptive windows and peripheral weighted erosion to maintain accurate high-speed tracking. Under low light conditions, we examined the visual performance of diffuse reflection and retroreflection from both camera and observer viewpoints. We evaluated stability relative to illumination and disturbance caused by non-target objects. Dynamic Projection Mapping with partially well-lit content in a low-intensity light environment is realized by our proposed system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dynamic Projection Mapping, projection-based AR for a moving object without misalignment by a high-speed optical axis controller by rotational mirrors, has a trade-off between stability of highspeed tracking and high visibility for a variety of projection content. In this paper, a system that will provide robust high-speed tracking without any markers on objects against illumination changes, including projected images, is realized by introducing a retroreflective background with the optical axis controller for Dynamic Projection Mapping. Low-intensity episcopic light is projected with Projection Mapping content, and the light reflected from the background is sufficient for high-speed cameras but is nearly invisible to observers. In addition, we introduce adaptive windows and peripheral weighted erosion to maintain accurate high-speed tracking. Under low light conditions, we examined the visual performance of diffuse reflection and retroreflection from both camera and observer viewpoints. We evaluated stability relative to illumination and disturbance caused by non-target objects. Dynamic Projection Mapping with partially well-lit content in a low-intensity light environment is realized by our proposed system.", "fno": "07223330", "keywords": [ "Cameras", "High Speed Optical Techniques", "Optical Imaging", "Lighting", "Target Tracking", "Optical Control", "Mirrors", "I 4 8 Image Processing And Computer Vision Scene Analysis Tracking", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities" ], "authors": [ { "affiliation": "The University of Tokyo", "fullName": "Tomohiro Sueishi", "givenName": "Tomohiro", "surname": "Sueishi", "__typename": "ArticleAuthorType" }, { "affiliation": "Gunma University", "fullName": "Hiromasa Oku", "givenName": "Hiromasa", "surname": "Oku", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Masatoshi Ishikawa", "givenName": "Masatoshi", "surname": "Ishikawa", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "97-104", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223329", "articleId": "12OmNvxsSSu", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223331", "articleId": "12OmNx2QUJN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a761", "title": "Holographic Projection Using Converging Spherical Wave Illumination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a761/12OmNASraPv", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a699", "title": "Lumipen: Projection-Based Mixed Reality for Dynamic Objects", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a699/12OmNBE7MtC", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2013/0015/0/06607637", "title": "Acitve projection ar using high-speed optical axis control and appearance estimation algorithm", "doi": null, "abstractUrl": "/proceedings-article/icme/2013/06607637/12OmNBkP3vY", "parentPublication": { "id": "proceedings/icme/2013/0015/0", "title": "2013 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457545", "title": "Projection through quadric mirrors made faster", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457545/12OmNyNQSKT", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/03/07516689", "title": "Dynamic Projection Mapping onto Deforming Non-Rigid Surface Using Deformable Dot Cluster Marker", "doi": null, "abstractUrl": "/journal/tg/2017/03/07516689/13rRUwdIOUR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/03/ttp2011030446", "title": "Ray Projection for Recovering Projective Transformations and Illumination Changes", "doi": null, "abstractUrl": "/journal/tp/2011/03/ttp2011030446/13rRUwkfB0s", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486514", "title": "Portable Lumipen: Dynamic SAR in Your Hand", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486514/14jQfRlKNy4", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798245", "title": "Shadowless Projector: Suppressing Shadows in Projection Mapping with Micro Mirror Array Plate", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798245/1cI6ar8DdyE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102813", "title": "Projection Mapping System To A Widely Dynamic Sphere With Circumferential Markers", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102813/1kwqWza3GI8", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a174", "title": "Real-Time Adaptive Color Correction in Dynamic Projection Mapping", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a174/1pysyl9FDhu", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz5JC0v", "title": "2010 International Conference on Digital Image Computing: Techniques and Applications", "acronym": "dicta", "groupId": "1001512", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNvHGryH", "doi": "10.1109/DICTA.2010.51", "title": "Non-rigid Face Tracking Using Short Track-Life Features", "normalizedTitle": "Non-rigid Face Tracking Using Short Track-Life Features", "abstract": "We define a “generic” non-rigid face tracker as any system that exhibits robustness to changes in illumination, expression and viewpoint during the tracking of facial land-marks in a video sequence. A popular approach to the problem is to detect/track an ensemble of local features over time whilst enforcing they conform to a global non-rigid shape prior. In general these approaches employ a strategy that assumes: (i) the feature points being tracked, ignoring occlusion, should roughly correspond across all frames, and (ii) that these feature points should correspond to the landmark points defining the non-rigid face shape model. In this paper, we challenge these two assumptions through the novel application of interest point detectors and descriptors (e.g. SIFT & SURF). We motivate this strategy by demonstrating empirically that salient features on the face for tracking on average only have a “track-life” of a few frames and rarely co-occur at the vertex points of the shape model. Due to the short track-life of these features we propose that new features should be detected at every frame rather than tracked from previous frames. By employing such a strategy we demonstrate that our proposed method has natural invariance to large discontinuous changes in motion. We additionally propose the employment of an online feature registration step that is able to rectify error accumulation and provides fast recovery from occlusion during tracking.", "abstracts": [ { "abstractType": "Regular", "content": "We define a “generic” non-rigid face tracker as any system that exhibits robustness to changes in illumination, expression and viewpoint during the tracking of facial land-marks in a video sequence. A popular approach to the problem is to detect/track an ensemble of local features over time whilst enforcing they conform to a global non-rigid shape prior. In general these approaches employ a strategy that assumes: (i) the feature points being tracked, ignoring occlusion, should roughly correspond across all frames, and (ii) that these feature points should correspond to the landmark points defining the non-rigid face shape model. In this paper, we challenge these two assumptions through the novel application of interest point detectors and descriptors (e.g. SIFT & SURF). We motivate this strategy by demonstrating empirically that salient features on the face for tracking on average only have a “track-life” of a few frames and rarely co-occur at the vertex points of the shape model. Due to the short track-life of these features we propose that new features should be detected at every frame rather than tracked from previous frames. By employing such a strategy we demonstrate that our proposed method has natural invariance to large discontinuous changes in motion. We additionally propose the employment of an online feature registration step that is able to rectify error accumulation and provides fast recovery from occlusion during tracking.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We define a “generic” non-rigid face tracker as any system that exhibits robustness to changes in illumination, expression and viewpoint during the tracking of facial land-marks in a video sequence. A popular approach to the problem is to detect/track an ensemble of local features over time whilst enforcing they conform to a global non-rigid shape prior. In general these approaches employ a strategy that assumes: (i) the feature points being tracked, ignoring occlusion, should roughly correspond across all frames, and (ii) that these feature points should correspond to the landmark points defining the non-rigid face shape model. In this paper, we challenge these two assumptions through the novel application of interest point detectors and descriptors (e.g. SIFT & SURF). We motivate this strategy by demonstrating empirically that salient features on the face for tracking on average only have a “track-life” of a few frames and rarely co-occur at the vertex points of the shape model. Due to the short track-life of these features we propose that new features should be detected at every frame rather than tracked from previous frames. By employing such a strategy we demonstrate that our proposed method has natural invariance to large discontinuous changes in motion. We additionally propose the employment of an online feature registration step that is able to rectify error accumulation and provides fast recovery from occlusion during tracking.", "fno": "4271a241", "keywords": [ "Non Rigid Face Tracking" ], "authors": [ { "affiliation": null, "fullName": "Simon Lucey", "givenName": "Simon", "surname": "Lucey", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun-Su Jang", "givenName": "Jun-Su", "surname": "Jang", "__typename": "ArticleAuthorType" } ], "idPrefix": "dicta", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-12-01T00:00:00", "pubType": "proceedings", "pages": "241-248", "year": "2010", "issn": null, "isbn": "978-0-7695-4271-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4271a235", "articleId": "12OmNAYoKiT", "__typename": "AdjacentArticleType" }, "next": { "fno": "4271a249", "articleId": "12OmNBNM8UD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dimpvt/2011/4369/0/4369a116", "title": "A Feature-Preserved Canonical Form for Non-rigid 3D Meshes", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2011/4369a116/12OmNAkWvhH", "parentPublication": { "id": "proceedings/3dimpvt/2011/4369/0", "title": "2011 International Conference on 3D Imaging, Modeling, Processing, Visualization and Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420434", "title": "A geometric criterion for shape-based non-rigid correspondence", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420434/12OmNBCHMJS", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1996/7258/0/72580245", "title": "Non-Rigid Matching Using Demons", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1996/72580245/12OmNBSjJ1s", "parentPublication": { "id": "proceedings/cvpr/1996/7258/0", "title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1997/7822/0/78220897", "title": "Motion of Disturbances: Detection and Tracking of multi-Body non-Rigid Motion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1997/78220897/12OmNqyUUvS", "parentPublication": { "id": "proceedings/cvpr/1997/7822/0", "title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/2/3507b522", "title": "Non-rigid Medical Image Registration Based on the Thin-Plate Spline Algorithm", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507b522/12OmNrnJ6JI", "parentPublication": { "id": "proceedings/csie/2009/3507/2", "title": "Computer Science and Information Engineering, World Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2011/4520/0/4520a987", "title": "A Non-rigid Feature Extraction Method for Shape Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/2011/4520a987/12OmNvA1hDg", "parentPublication": { "id": "proceedings/icdar/2011/4520/0", "title": "2011 International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2010/4271/0/4271a058", "title": "Adaptive Non-rigid Object Tracking by Fusing Visual and Motional Descriptors", "doi": null, "abstractUrl": "/proceedings-article/dicta/2010/4271a058/12OmNwDj1ax", "parentPublication": { "id": "proceedings/dicta/2010/4271/0", "title": "2010 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a197", "title": "Estimation of Non-rigid Surface Deformation Using Developable Surface Model", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a197/12OmNy6qfPR", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dimpvt/2012/4873/0/4873a254", "title": "Pinhole-to-Projection Pyramid Subtraction for Reconstructing Non-rigid Objects from Range Images", "doi": null, "abstractUrl": "/proceedings-article/3dimpvt/2012/4873a254/12OmNyvGymV", "parentPublication": { "id": "proceedings/3dimpvt/2012/4873/0", "title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c274", "title": "Non-rigid Image Registration for Historical Manuscript Restoration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c274/12OmNyxXlyO", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKir6", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45WHONoz", "doi": "10.1109/ICPR.2018.8546201", "title": "Non-rigid Reconstruction with a Single Moving RGB-D Camera", "normalizedTitle": "Non-rigid Reconstruction with a Single Moving RGB-D Camera", "abstract": "We present a novel non-rigid reconstruction method using a moving RGB-D camera. Current approaches use only non-rigid part of the scene and completely ignore the rigid background. Non-rigid parts often lack sufficient geometric and photometric information for tracking large frame-to-frame motion. Our approach uses camera pose estimated from the rigid background for foreground tracking. This enables robust foreground tracking in situations where large frame-to-frame motion occurs. Moreover, we are proposing a multi-scale deformation graph which improves non-rigid tracking without compromising the quality of the reconstruction. We are also contributing a synthetic dataset which is made publically available for evaluating non-rigid reconstruction methods. The dataset provides frame-by-frame ground truth geometry of the scene, the camera trajectory, and masks for background foreground. Experimental results show that our approach is more robust in handling larger frame-to-frame motions and provides better reconstruction compared to state-of-the-art approaches.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel non-rigid reconstruction method using a moving RGB-D camera. Current approaches use only non-rigid part of the scene and completely ignore the rigid background. Non-rigid parts often lack sufficient geometric and photometric information for tracking large frame-to-frame motion. Our approach uses camera pose estimated from the rigid background for foreground tracking. This enables robust foreground tracking in situations where large frame-to-frame motion occurs. Moreover, we are proposing a multi-scale deformation graph which improves non-rigid tracking without compromising the quality of the reconstruction. We are also contributing a synthetic dataset which is made publically available for evaluating non-rigid reconstruction methods. The dataset provides frame-by-frame ground truth geometry of the scene, the camera trajectory, and masks for background foreground. Experimental results show that our approach is more robust in handling larger frame-to-frame motions and provides better reconstruction compared to state-of-the-art approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel non-rigid reconstruction method using a moving RGB-D camera. Current approaches use only non-rigid part of the scene and completely ignore the rigid background. Non-rigid parts often lack sufficient geometric and photometric information for tracking large frame-to-frame motion. Our approach uses camera pose estimated from the rigid background for foreground tracking. This enables robust foreground tracking in situations where large frame-to-frame motion occurs. Moreover, we are proposing a multi-scale deformation graph which improves non-rigid tracking without compromising the quality of the reconstruction. We are also contributing a synthetic dataset which is made publically available for evaluating non-rigid reconstruction methods. The dataset provides frame-by-frame ground truth geometry of the scene, the camera trajectory, and masks for background foreground. Experimental results show that our approach is more robust in handling larger frame-to-frame motions and provides better reconstruction compared to state-of-the-art approaches.", "fno": "08546201", "keywords": [ "Cameras", "Image Colour Analysis", "Image Motion Analysis", "Image Reconstruction", "Image Sequences", "Object Detection", "Object Tracking", "Pose Estimation", "Rigid Background", "Robust Foreground Tracking", "Frame To Frame Motion", "Nonrigid Tracking", "Frame By Frame Ground Truth Geometry", "Camera Trajectory", "Background Foreground", "Single Moving RGB D", "Nonrigid Part", "Photometric Information", "Nonrigid Reconstruction Method", "Geometric Information", "Image Reconstruction", "Tracking", "Strain", "Cameras", "Image Segmentation", "Three Dimensional Displays", "Geometry" ], "authors": [ { "affiliation": "CSIRO Data61, Autonomous Systems Laboratory, Brisbane, Australia", "fullName": "Shafeeq Elanattil", "givenName": "Shafeeq", "surname": "Elanattil", "__typename": "ArticleAuthorType" }, { "affiliation": "CSIRO Data61, Autonomous Systems Laboratory, Brisbane, Australia", "fullName": "Peyman Moghadam", "givenName": "Peyman", "surname": "Moghadam", "__typename": "ArticleAuthorType" }, { "affiliation": "Queensland University of Technology, Brisbane, Australia", "fullName": "Sridha Sridharan", "givenName": "Sridha", "surname": "Sridharan", "__typename": "ArticleAuthorType" }, { "affiliation": "Queensland University of Technology, Brisbane, Australia", "fullName": "Clinton Fookes", "givenName": "Clinton", "surname": "Fookes", "__typename": "ArticleAuthorType" }, { "affiliation": "CSIRO Data61, Autonomous Systems Laboratory, Brisbane, Australia", "fullName": "Mark Cox", "givenName": "Mark", "surname": "Cox", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-08-01T00:00:00", "pubType": "proceedings", "pages": "1049-1055", "year": "2018", "issn": "1051-4651", "isbn": "978-1-5386-3788-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08545658", "articleId": "17D45XuDNE6", "__typename": "AdjacentArticleType" }, "next": { "fno": "08545586", "articleId": "17D45WIXbQG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2015/9711/0/5720a823", "title": "Reconstruction of Articulated Objects from a Moving Camera", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a823/12OmNBpVPZD", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671777", "title": "Real-time RGB-D camera relocalization", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671777/12OmNqEAT3B", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643609", "title": "Camera motion tracking in a dynamic scene", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643609/12OmNy5zsvG", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032a910", "title": "BodyFusion: Real-Time Capture of Human Motion and Surface Geometry Using a Single Depth Camera", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a910/12OmNzT7Otl", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a042", "title": "Patch-Based Non-rigid 3D Reconstruction from a Single Depth Stream", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a042/17D45WGGoME", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a596", "title": "TwinFusion: High Framerate Non-rigid Fusion through Fast Correspondence Tracking", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a596/17D45WXIkHD", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093583", "title": "NRMVS: Non-Rigid Multi-View Stereo", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093583/1jPbfZI3SVi", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093533", "title": "Robust Template-Based Non-Rigid Motion Tracking Using Local Coordinate Regularization", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093533/1jPbs0rPdC0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h000", "title": "DeepDeform: Learning Non-Rigid RGB-D Reconstruction With Semi-Supervised Data", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h000/1m3nAs3lc1a", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900b450", "title": "Neural Deformation Graphs for Globally-consistent Non-rigid Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900b450/1yeJlVNk3bW", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1aPuOYN1siI", "title": "2019 International Conference on High Performance Big Data and Intelligent Systems (HPBD&IS)", "acronym": "hpbd&is", "groupId": "1831985", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1aPuQy42GXu", "doi": "10.1109/HPBDIS.2019.8735493", "title": "A Multi-Patch Network for Non-Rigid Object Tracking", "normalizedTitle": "A Multi-Patch Network for Non-Rigid Object Tracking", "abstract": "Non-rigid object tracking is an important yet challenging task in computer vision. In this paper, a multi-patch neural network (MPNet) model is presented to address the problem of non-rigid object tracking. The model learns a multiple patch based framework, which mainly consists of two branches of neural networks. One branch is to track the global target and the other branch partitions the target into multiple patches which are tracked separately. The global tracking and the multiple patch tracking are combined to compute the final tracking results. Compared with the existing methods, our model exploits the trajectories of various parts of a non-rigid object and therefore can accurately track the non-rigid object. Experiments on visual tracking datasets prove the strength of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Non-rigid object tracking is an important yet challenging task in computer vision. In this paper, a multi-patch neural network (MPNet) model is presented to address the problem of non-rigid object tracking. The model learns a multiple patch based framework, which mainly consists of two branches of neural networks. One branch is to track the global target and the other branch partitions the target into multiple patches which are tracked separately. The global tracking and the multiple patch tracking are combined to compute the final tracking results. Compared with the existing methods, our model exploits the trajectories of various parts of a non-rigid object and therefore can accurately track the non-rigid object. Experiments on visual tracking datasets prove the strength of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Non-rigid object tracking is an important yet challenging task in computer vision. In this paper, a multi-patch neural network (MPNet) model is presented to address the problem of non-rigid object tracking. The model learns a multiple patch based framework, which mainly consists of two branches of neural networks. One branch is to track the global target and the other branch partitions the target into multiple patches which are tracked separately. The global tracking and the multiple patch tracking are combined to compute the final tracking results. Compared with the existing methods, our model exploits the trajectories of various parts of a non-rigid object and therefore can accurately track the non-rigid object. Experiments on visual tracking datasets prove the strength of the proposed method.", "fno": "08735493", "keywords": [ "Computer Vision", "Learning Artificial Intelligence", "Neural Nets", "Object Tracking", "Target Tracking", "Multiple Patch Tracking", "Nonrigid Object Tracking", "Multipatch Neural Network Model", "Visual Object Tracking", "Computer Vision", "MP Net Model", "Global Target Tracking", "Target Tracking", "Object Tracking", "Videos", "Neural Networks", "Computational Modeling", "Visualization", "Strain", "Visual Tracking", "Non Rigid Object", "Multi Patch" ], "authors": [ { "affiliation": "Xi’an Jiaotong University, Xi’an, China", "fullName": "Yiping Sun", "givenName": "Yiping", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi’an Jiaotong University, Xi’an, China", "fullName": "Ping Wei", "givenName": "Ping", "surname": "Wei", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi’an Jiaotong University, Xi’an, China", "fullName": "Chunlong Xia", "givenName": "Chunlong", "surname": "Xia", "__typename": "ArticleAuthorType" }, { "affiliation": "Xi’an Jiaotong University, Xi’an, China", "fullName": "Nanning Zheng", "givenName": "Nanning", "surname": "Zheng", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpbd&is", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-05-01T00:00:00", "pubType": "proceedings", "pages": "228-232", "year": "2019", "issn": null, "isbn": "978-1-7281-0466-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08735469", "articleId": "1aPuPYg1UEU", "__typename": "AdjacentArticleType" }, "next": { "fno": "08735486", "articleId": "1aPuSXivwOs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csse/2008/3336/1/3336a903", "title": "Multiple Non-rigid Objects Tracking by Modified Kernel Particle Filter", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336a903/12OmNCvcLLC", "parentPublication": { "id": "proceedings/csse/2008/3336/1", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2014/7434/0/7434a260", "title": "Tracking Non-rigid Object Using Discriminative Features", "doi": null, "abstractUrl": "/proceedings-article/cis/2014/7434a260/12OmNrkBwxO", "parentPublication": { "id": "proceedings/cis/2014/7434/0", "title": "2014 Tenth International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206502", "title": "Tracking of a non-rigid object via patch-based dynamic appearance modeling and adaptive Basin Hopping Monte Carlo sampling", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206502/12OmNs0TKJ2", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2010/4271/0/4271a058", "title": "Adaptive Non-rigid Object Tracking by Fusing Visual and Motional Descriptors", "doi": null, "abstractUrl": "/proceedings-article/dicta/2010/4271a058/12OmNwDj1ax", "parentPublication": { "id": "proceedings/dicta/2010/4271/0", "title": "2010 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccbd/2016/3555/0/3555a248", "title": "A Robust Appearance Model for Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/ccbd/2016/3555a248/12OmNxuXcvS", "parentPublication": { "id": "proceedings/ccbd/2016/3555/0", "title": "2016 7th International Conference on Cloud Computing and Big Data (CCBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995656", "title": "A novel supervised level set method for non-rigid object tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995656/12OmNyVerY5", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/1/3336a911", "title": "Color Segmentation and Part Model Matching for Non-rigid Objects Tracking", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336a911/12OmNyr8YcL", "parentPublication": { "id": "proceedings/csse/2008/3336/1", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a042", "title": "Patch-Based Non-rigid 3D Reconstruction from a Single Depth Stream", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a042/17D45WGGoME", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093533", "title": "Robust Template-Based Non-Rigid Motion Tracking Using Local Coordinate Regularization", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093533/1jPbs0rPdC0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e909", "title": "Learning to Optimize Non-Rigid Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e909/1m3ndxKghMs", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeJlVNk3bW", "doi": "10.1109/CVPR46437.2021.00150", "title": "Neural Deformation Graphs for Globally-consistent Non-rigid Reconstruction", "normalizedTitle": "Neural Deformation Graphs for Globally-consistent Non-rigid Reconstruction", "abstract": "We introduce Neural Deformation Graphs for globally-consistent deformation tracking and 3D reconstruction of non-rigid objects. Specifically, we implicitly model a deformation graph via a deep neural network. This neural deformation graph does not rely on any object-specific structure and, thus, can be applied to general non-rigid deformation tracking. Our method globally optimizes this neural graph on a given sequence of depth camera observations of a non-rigidly moving object. Based on explicit viewpoint consistency as well as inter-frame graph and surface consistency constraints, the underlying network is trained in a self-supervised fashion. We additionally optimize for the geometry of the object with an implicit deformable multi-MLP shape representation. Our approach does not assume sequential input data, thus enabling robust tracking of fast motions or even temporally disconnected recordings. Our experiments demonstrate that our Neural Deformation Graphs outperform state-of-the-art non-rigid reconstruction approaches both qualitatively and quantitatively, with 64% improved reconstruction and 54% improved deformation tracking performance. Code is publicly available.<sup>1</sup>", "abstracts": [ { "abstractType": "Regular", "content": "We introduce Neural Deformation Graphs for globally-consistent deformation tracking and 3D reconstruction of non-rigid objects. Specifically, we implicitly model a deformation graph via a deep neural network. This neural deformation graph does not rely on any object-specific structure and, thus, can be applied to general non-rigid deformation tracking. Our method globally optimizes this neural graph on a given sequence of depth camera observations of a non-rigidly moving object. Based on explicit viewpoint consistency as well as inter-frame graph and surface consistency constraints, the underlying network is trained in a self-supervised fashion. We additionally optimize for the geometry of the object with an implicit deformable multi-MLP shape representation. Our approach does not assume sequential input data, thus enabling robust tracking of fast motions or even temporally disconnected recordings. Our experiments demonstrate that our Neural Deformation Graphs outperform state-of-the-art non-rigid reconstruction approaches both qualitatively and quantitatively, with 64% improved reconstruction and 54% improved deformation tracking performance. Code is publicly available.<sup>1</sup>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce Neural Deformation Graphs for globally-consistent deformation tracking and 3D reconstruction of non-rigid objects. Specifically, we implicitly model a deformation graph via a deep neural network. This neural deformation graph does not rely on any object-specific structure and, thus, can be applied to general non-rigid deformation tracking. Our method globally optimizes this neural graph on a given sequence of depth camera observations of a non-rigidly moving object. Based on explicit viewpoint consistency as well as inter-frame graph and surface consistency constraints, the underlying network is trained in a self-supervised fashion. We additionally optimize for the geometry of the object with an implicit deformable multi-MLP shape representation. Our approach does not assume sequential input data, thus enabling robust tracking of fast motions or even temporally disconnected recordings. Our experiments demonstrate that our Neural Deformation Graphs outperform state-of-the-art non-rigid reconstruction approaches both qualitatively and quantitatively, with 64% improved reconstruction and 54% improved deformation tracking performance. Code is publicly available.1", "fno": "450900b450", "keywords": [ "Cameras", "Deep Learning Artificial Intelligence", "Geometry", "Graph Theory", "Image Motion Analysis", "Image Reconstruction", "Image Representation", "Image Sequences", "Object Detection", "Object Tracking", "Globally Consistent Nonrigid Reconstruction", "Globally Consistent Deformation Tracking", "Nonrigid Objects", "Deep Neural Network", "Neural Deformation Graph", "Object Specific Structure", "Nonrigid Deformation Tracking", "Inter Frame Graph", "Surface Consistency Constraints", "Multi MLP Shape Representation", "Measurement", "Geometry", "Deformable Models", "Three Dimensional Displays", "Tracking", "Shape", "Neural Networks" ], "authors": [ { "affiliation": "Technical University of Munich", "fullName": "Aljaž Božič", "givenName": "Aljaž", "surname": "Božič", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Pablo Palafox", "givenName": "Pablo", "surname": "Palafox", "__typename": "ArticleAuthorType" }, { "affiliation": "Facebook Reality Labs Research", "fullName": "Michael Zollhöfer", "givenName": "Michael", "surname": "Zollhöfer", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Justus Thies", "givenName": "Justus", "surname": "Thies", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Angela Dai", "givenName": "Angela", "surname": "Dai", "__typename": "ArticleAuthorType" }, { "affiliation": "Technical University of Munich", "fullName": "Matthias Nießner", "givenName": "Matthias", "surname": "Nießner", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "1450-1459", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeJlSf3kfS", "name": "pcvpr202145090-09577883s1-mm_450900b450.zip", "size": "1.49 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577883s1-mm_450900b450.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900b440", "articleId": "1yeJSYxaxKo", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900b460", "articleId": "1yeL1bUOuhq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391d083", "title": "Robust Non-rigid Motion Tracking and Surface Reconstruction Using L0 Regularization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d083/12OmNB9KHwl", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981733", "title": "Separating rigid motion from linear local deformation models", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981733/12OmNxzuMQY", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/05/07888591", "title": "Robust Non-Rigid Motion Tracking and Surface Reconstruction Using Z_$L_0$_Z Regularization", "doi": null, "abstractUrl": "/journal/tg/2018/05/07888591/13rRUILtJqX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/07/07527684", "title": "Procrustean Normal Distribution for Non-Rigid Structure from Motion", "doi": null, "abstractUrl": "/journal/tp/2017/07/07527684/13rRUygT7og", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c607", "title": "Image Collection Pop-up: 3D Reconstruction and Clustering of Rigid and Non-rigid Categories", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c607/17D45WHONnD", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08546201", "title": "Non-rigid Reconstruction with a Single Moving RGB-D Camera", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08546201/17D45WHONoz", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200m2686", "title": "4DComplete: Non-Rigid Motion Estimation Beyond the Observable Surface", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200m2686/1BmLpZ0QahW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09852717", "title": "Efficient Registration for Human Surfaces Via Isometric Regularization on Embedded Deformation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09852717/1FHlThR8hLG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093533", "title": "Robust Template-Based Non-Rigid Motion Tracking Using Local Coordinate Regularization", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093533/1jPbs0rPdC0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412394", "title": "Sequential Non-Rigid Factorisation for Head Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412394/1tmjQjZtze8", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAY79oS", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAoUTnl", "doi": "10.1109/ICME.2014.6890318", "title": "L0 co-intrinsic images decomposition", "normalizedTitle": "L0 co-intrinsic images decomposition", "abstract": "In this paper, we focus on co-intrinsic decomposition, a new problem that performs intrinsic decomposition on a pair of images simultaneously, which share the same foreground with arbitrarily different illuminations and backgrounds. We specifically demand the common foreground across different images to share same reflectance values. For the purpose of efficiency and feasibility, we perform the co-intrinsic decomposition at superpixel-level and propose a uniform approach to automatically derive non-local reflectance relationships via unsupervised L0 sparsity between superpixels from intra-and inter-images. We present a unicolor-light-based intrinsic model, from which we construct a non-local L0 sparse co-Retinex model that imposes feasible constraints on shading, reflectance and environment light, respectively. The co-intrinsic decomposition is finally modeled as a quadratic minimization problem that leads to a fast closed form solution. Extensive experiments show plausible results of our approach in extracting common reflectance components from multiple images. We also validate the benefits of our results in boosting the accuracy of image co-saliency detection.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we focus on co-intrinsic decomposition, a new problem that performs intrinsic decomposition on a pair of images simultaneously, which share the same foreground with arbitrarily different illuminations and backgrounds. We specifically demand the common foreground across different images to share same reflectance values. For the purpose of efficiency and feasibility, we perform the co-intrinsic decomposition at superpixel-level and propose a uniform approach to automatically derive non-local reflectance relationships via unsupervised L0 sparsity between superpixels from intra-and inter-images. We present a unicolor-light-based intrinsic model, from which we construct a non-local L0 sparse co-Retinex model that imposes feasible constraints on shading, reflectance and environment light, respectively. The co-intrinsic decomposition is finally modeled as a quadratic minimization problem that leads to a fast closed form solution. Extensive experiments show plausible results of our approach in extracting common reflectance components from multiple images. We also validate the benefits of our results in boosting the accuracy of image co-saliency detection.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we focus on co-intrinsic decomposition, a new problem that performs intrinsic decomposition on a pair of images simultaneously, which share the same foreground with arbitrarily different illuminations and backgrounds. We specifically demand the common foreground across different images to share same reflectance values. For the purpose of efficiency and feasibility, we perform the co-intrinsic decomposition at superpixel-level and propose a uniform approach to automatically derive non-local reflectance relationships via unsupervised L0 sparsity between superpixels from intra-and inter-images. We present a unicolor-light-based intrinsic model, from which we construct a non-local L0 sparse co-Retinex model that imposes feasible constraints on shading, reflectance and environment light, respectively. The co-intrinsic decomposition is finally modeled as a quadratic minimization problem that leads to a fast closed form solution. Extensive experiments show plausible results of our approach in extracting common reflectance components from multiple images. We also validate the benefits of our results in boosting the accuracy of image co-saliency detection.", "fno": "06890318", "keywords": [ "Lighting", "Image Color Analysis", "Image Decomposition", "Minimization", "Educational Institutions", "Correlation", "Mathematical Model", "Quadratic Minimization", "Co Intrinsic Images Decomposition", "L 0 Sparsity", "Superpixels", "Unicolor Light" ], "authors": [ { "affiliation": "School of Computer Science and Technology, Tianjin University, Tianjin, China", "fullName": "Haipeng Dai", "givenName": "Haipeng", "surname": "Dai", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Technology, Tianjin University, Tianjin, China", "fullName": "Wei Feng", "givenName": "Wei", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Software, Tianjin University, Tianjin, China", "fullName": "Liang Wan", "givenName": "Liang", "surname": "Wan", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science and Technology, Tianjin University, Tianjin, China", "fullName": "Xuecheng Nie", "givenName": "Xuecheng", "surname": "Nie", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2014", "issn": null, "isbn": "978-1-4799-4761-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06890317", "articleId": "12OmNvSbBGO", "__typename": "AdjacentArticleType" }, "next": { "fno": "06890319", "articleId": "12OmNqN6R0e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391d469", "title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995738", "title": "Intrinsic images decomposition using a local and global sparse representation of reflectance", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995738/12OmNCd2ryX", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iihmsp/2006/2745/0/04041690", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iihmsp/2006/04041690/12OmNCfjeyV", "parentPublication": { "id": "proceedings/iihmsp/2006/2745/0", "title": "2006 International Conference on Intelligent Information Hiding and Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890313", "title": "Intrinsic image decomposition by hierarchical L0 sparsity", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890313/12OmNwnYFW1", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a433", "title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122904", "title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122904/13rRUxOdD3R", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2019/9552/0/955200a175", "title": "Towards High-Quality Intrinsic Images in the Wild", "doi": null, "abstractUrl": "/proceedings-article/icme/2019/955200a175/1cdOQdLofSw", "parentPublication": { "id": "proceedings/icme/2019/9552/0", "title": "2019 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c521", "title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c521/1hVluc7QzBK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09625763", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "doi": null, "abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBDyAaZ", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBoNrqU", "doi": "10.1109/ICCV.2015.396", "title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "normalizedTitle": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "abstract": "We propose a data-driven approach for intrinsic image decomposition, which is the process of inferring the confounding factors of reflectance and shading in an image. We pose this as a two-stage learning problem. First, we train a model to predict relative reflectance ordering between image patches ('brighter', 'darker', 'same') from large-scale human annotations, producing a data-driven reflectance prior. Second, we show how to naturally integrate this learned prior into existing energy minimization frame-works for intrinsic image decomposition. We compare our method to the state-of-the-art approach of Bell et al. [7] on both decomposition and image relighting tasks, demonstrating the benefits of the simple relative reflectance prior, especially for scenes under challenging lighting conditions.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a data-driven approach for intrinsic image decomposition, which is the process of inferring the confounding factors of reflectance and shading in an image. We pose this as a two-stage learning problem. First, we train a model to predict relative reflectance ordering between image patches ('brighter', 'darker', 'same') from large-scale human annotations, producing a data-driven reflectance prior. Second, we show how to naturally integrate this learned prior into existing energy minimization frame-works for intrinsic image decomposition. We compare our method to the state-of-the-art approach of Bell et al. [7] on both decomposition and image relighting tasks, demonstrating the benefits of the simple relative reflectance prior, especially for scenes under challenging lighting conditions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a data-driven approach for intrinsic image decomposition, which is the process of inferring the confounding factors of reflectance and shading in an image. We pose this as a two-stage learning problem. First, we train a model to predict relative reflectance ordering between image patches ('brighter', 'darker', 'same') from large-scale human annotations, producing a data-driven reflectance prior. Second, we show how to naturally integrate this learned prior into existing energy minimization frame-works for intrinsic image decomposition. We compare our method to the state-of-the-art approach of Bell et al. [7] on both decomposition and image relighting tasks, demonstrating the benefits of the simple relative reflectance prior, especially for scenes under challenging lighting conditions.", "fno": "8391d469", "keywords": [ "Image Decomposition", "Lighting", "Image Color Analysis", "Minimization", "Optimization", "Streaming Media", "Computer Vision" ], "authors": [ { "affiliation": null, "fullName": "Tinghui Zhou", "givenName": "Tinghui", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Philipp Krähenbühl", "givenName": "Philipp", "surname": "Krähenbühl", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Alexei A. Efros", "givenName": "Alexei A.", "surname": "Efros", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-12-01T00:00:00", "pubType": "proceedings", "pages": "3469-3477", "year": "2015", "issn": "2380-7504", "isbn": "978-1-4673-8391-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8391d460", "articleId": "12OmNB1wkHc", "__typename": "AdjacentArticleType" }, "next": { "fno": "8391d478", "articleId": "12OmNwE9OqB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890318", "title": "L0 co-intrinsic images decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995738", "title": "Intrinsic images decomposition using a local and global sparse representation of reflectance", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995738/12OmNCd2ryX", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890313", "title": "Intrinsic image decomposition by hierarchical L0 sparsity", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890313/12OmNwnYFW1", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a433", "title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122904", "title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122904/13rRUxOdD3R", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/06/08353139", "title": "Physically-Based Simulation of Cosmetics via Intrinsic Image Decomposition with Facial Priors", "doi": null, "abstractUrl": "/journal/tp/2019/06/08353139/13rRUyogGBw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2018/5321/0/08499478", "title": "Unrolled Optimization with Deep Priors for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2018/08499478/17D45VTRoDD", "parentPublication": { "id": "proceedings/bigmm/2018/5321/0", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c521", "title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c521/1hVluc7QzBK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09625763", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "doi": null, "abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwt5sgJ", "title": "CVPR 2011", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNCbU3cE", "doi": "10.1109/CVPR.2011.5995507", "title": "Intrinsic images using optimization", "normalizedTitle": "Intrinsic images using optimization", "abstract": "In this paper, we present a novel intrinsic image recovery approach using optimization. Our approach is based on the assumption of in a local window in natural images. Our method adopts a premise that neighboring pixels in a local window of a single image having similar intensity values should have similar reflectance values. Thus the intrinsic image decomposition is formulated by optimizing an energy function with adding a weighting constraint to the local image properties. In order to improve the intrinsic image extraction results, we specify local constrain cues by integrating the user strokes in our energy formulation, including constant-reflectance, constant-illumination and fixed-illumination brushes. Our experimental results demonstrate that our approach achieves a better recovery of intrinsic reflectance and illumination components than by previous approaches.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel intrinsic image recovery approach using optimization. Our approach is based on the assumption of in a local window in natural images. Our method adopts a premise that neighboring pixels in a local window of a single image having similar intensity values should have similar reflectance values. Thus the intrinsic image decomposition is formulated by optimizing an energy function with adding a weighting constraint to the local image properties. In order to improve the intrinsic image extraction results, we specify local constrain cues by integrating the user strokes in our energy formulation, including constant-reflectance, constant-illumination and fixed-illumination brushes. Our experimental results demonstrate that our approach achieves a better recovery of intrinsic reflectance and illumination components than by previous approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel intrinsic image recovery approach using optimization. Our approach is based on the assumption of in a local window in natural images. Our method adopts a premise that neighboring pixels in a local window of a single image having similar intensity values should have similar reflectance values. Thus the intrinsic image decomposition is formulated by optimizing an energy function with adding a weighting constraint to the local image properties. In order to improve the intrinsic image extraction results, we specify local constrain cues by integrating the user strokes in our energy formulation, including constant-reflectance, constant-illumination and fixed-illumination brushes. Our experimental results demonstrate that our approach achieves a better recovery of intrinsic reflectance and illumination components than by previous approaches.", "fno": "05995507", "keywords": [ "Illumination Component Recovery", "Intrinsic Image Recovery", "Optimization", "Natural Image Color Characteristics", "Pixel Intensity Value", "Pixel Reflectance Value", "Intrinsic Image Decomposition", "Energy Function", "Intrinsic Image Extraction", "Intrinsic Reflectance Recovery" ], "authors": [ { "affiliation": "Beijing Lab. of Intell. Inf. Technol., Beijing Inst. of Technol., Beijing, China", "fullName": "Jianbing Shen", "givenName": null, "surname": "Jianbing Shen", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Lab. of Intell. Inf. Technol., Beijing Inst. of Technol., Beijing, China", "fullName": "Xiaoshan Yang", "givenName": null, "surname": "Xiaoshan Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Lab. of Intell. Inf. Technol., Beijing Inst. of Technol., Beijing, China", "fullName": "Yunde Jia", "givenName": null, "surname": "Yunde Jia", "__typename": "ArticleAuthorType" }, { "affiliation": "State Key Lab. of Transient Opt. & Photonics, Chinese Acad. of Sci., Xi'an, China", "fullName": "Xuelong Li", "givenName": null, "surname": "Xuelong Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-06-01T00:00:00", "pubType": "proceedings", "pages": "3481-3487", "year": "2011", "issn": null, "isbn": "978-1-4577-0394-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05995506", "articleId": "12OmNA0dMNp", "__typename": "AdjacentArticleType" }, "next": { "fno": "05995508", "articleId": "12OmNwErpzD", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890318", "title": "L0 co-intrinsic images decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995738", "title": "Intrinsic images decomposition using a local and global sparse representation of reflectance", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995738/12OmNCd2ryX", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iihmsp/2006/2745/0/04041690", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iihmsp/2006/04041690/12OmNCfjeyV", "parentPublication": { "id": "proceedings/iihmsp/2006/2745/0", "title": "2006 International Conference on Intelligent Information Hiding and Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118b494", "title": "The Photometry of Intrinsic Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b494/12OmNCxbXHx", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2006/2745/0/27450159", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2006/27450159/12OmNqGA55h", "parentPublication": { "id": "proceedings/iih-msp/2006/2745/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2010/4271/0/4271a486", "title": "Re-texturing by Intrinsic Video", "doi": null, "abstractUrl": "/proceedings-article/dicta/2010/4271a486/12OmNzYwc7W", "parentPublication": { "id": "proceedings/dicta/2010/4271/0", "title": "2010 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2004/10/i1336", "title": "Illumination Normalization with Time-Dependent Intrinsic Images for Video Surveillance", "doi": null, "abstractUrl": "/journal/tp/2004/10/i1336/13rRUILtJs5", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122904", "title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122904/13rRUxOdD3R", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2005/09/i1459", "title": "Recovering Intrinsic Images from a Single Image", "doi": null, "abstractUrl": "/journal/tp/2005/09/i1459/13rRUxlgxXw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/06/09961945", "title": "Intrinsic Image Transfer for Illumination Manipulation", "doi": null, "abstractUrl": "/journal/tp/2023/06/09961945/1IxvTvt5Qty", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBDyAaZ", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNzC5Tdg", "doi": "10.1109/ICCV.2015.57", "title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "normalizedTitle": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "abstract": "We present a method for intrinsic image decomposition, which aims to decompose images into reflectance and shading layers. Our input is a sequence of images with varying illumination acquired by a static camera, e.g. an indoor scene with a moving light source or an outdoor timelapse. We leverage the local color variations observed over time to infer constraints on the reflectance and solve the ill-posed image decomposition problem. In particular, we derive an adaptive local energy from the observations of each local neighborhood over time, and integrate distant pairwise constraints to enforce coherent decomposition across all surfaces with consistent shading changes. Our method is solely based on multiple observations of a Lambertian scene under varying illumination and does not require user interaction, scene geometry, or an explicit lighting model. We compare our results with several intrinsic decomposition methods on a number of synthetic and captured datasets.", "abstracts": [ { "abstractType": "Regular", "content": "We present a method for intrinsic image decomposition, which aims to decompose images into reflectance and shading layers. Our input is a sequence of images with varying illumination acquired by a static camera, e.g. an indoor scene with a moving light source or an outdoor timelapse. We leverage the local color variations observed over time to infer constraints on the reflectance and solve the ill-posed image decomposition problem. In particular, we derive an adaptive local energy from the observations of each local neighborhood over time, and integrate distant pairwise constraints to enforce coherent decomposition across all surfaces with consistent shading changes. Our method is solely based on multiple observations of a Lambertian scene under varying illumination and does not require user interaction, scene geometry, or an explicit lighting model. We compare our results with several intrinsic decomposition methods on a number of synthetic and captured datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a method for intrinsic image decomposition, which aims to decompose images into reflectance and shading layers. Our input is a sequence of images with varying illumination acquired by a static camera, e.g. an indoor scene with a moving light source or an outdoor timelapse. We leverage the local color variations observed over time to infer constraints on the reflectance and solve the ill-posed image decomposition problem. In particular, we derive an adaptive local energy from the observations of each local neighborhood over time, and integrate distant pairwise constraints to enforce coherent decomposition across all surfaces with consistent shading changes. Our method is solely based on multiple observations of a Lambertian scene under varying illumination and does not require user interaction, scene geometry, or an explicit lighting model. We compare our results with several intrinsic decomposition methods on a number of synthetic and captured datasets.", "fno": "8391a433", "keywords": [ "Lighting", "Image Color Analysis", "Light Sources", "Geometry", "Robustness", "Image Decomposition", "Image Sequences" ], "authors": [ { "affiliation": null, "fullName": "Pierre-Yves Laffont", "givenName": "Pierre-Yves", "surname": "Laffont", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jean-Charles Bazin", "givenName": "Jean-Charles", "surname": "Bazin", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-12-01T00:00:00", "pubType": "proceedings", "pages": "433-441", "year": "2015", "issn": "2380-7504", "isbn": "978-1-4673-8391-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "8391a424", "articleId": "12OmNzt0IAe", "__typename": "AdjacentArticleType" }, "next": { "fno": "8391a442", "articleId": "12OmNxxNbR9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890318", "title": "L0 co-intrinsic images decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459428", "title": "Ground truth dataset and baseline evaluations for intrinsic image algorithms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459428/12OmNqFrGrS", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a172", "title": "A Comprehensive Multi-Illuminant Dataset for Benchmarking of the Intrinsic Image Algorithms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a172/12OmNyQpgMj", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08457312", "title": "Intrinsic Image Decomposition with Step and Drift Shading Separation", "doi": null, "abstractUrl": "/journal/tg/2020/02/08457312/13Jkr98ynrb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icipmc/2022/6872/0/687200a051", "title": "A Double-stream Exchange Transformer Network for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/icipmc/2022/687200a051/1GIurknyg0w", "parentPublication": { "id": "proceedings/icipmc/2022/6872/0", "title": "2022 International Conference on Image Processing and Media Computing (ICIPMC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h819", "title": "GLoSH: Global-Local Spherical Harmonics for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h819/1hQqy771H9u", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c521", "title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c521/1hVluc7QzBK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d245", "title": "Unsupervised Learning for Intrinsic Image Decomposition From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d245/1m3obXljyCI", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09625763", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "doi": null, "abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "17D45VtKirt", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "17D45XeKgro", "doi": "10.1109/CVPR.2018.00673", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "normalizedTitle": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "abstract": "Multispectral images contain many clues of surface characteristics of the objects, thus can be used in many computer vision tasks, e.g., recolorization and segmentation. However, due to the complex geometry structure of natural scenes, the spectra curves of the same surface can look very different under different illuminations and from different angles. In this paper, a new Multispectral Image Intrinsic Decomposition model (MIID) is presented to decompose the shading and reflectance from a single multispectral image. We extend the Retinex model, which is proposed for RGB image intrinsic decomposition, for multispectral domain. Based on this, a subspace constraint is introduced to both the shading and reflectance spectral space to reduce the ill-posedness of the problem and make the problem solvable. A dataset of 22 scenes is given with the ground truth of shadings and reflectance to facilitate objective evaluations. The experiments demonstrate the effectiveness of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Multispectral images contain many clues of surface characteristics of the objects, thus can be used in many computer vision tasks, e.g., recolorization and segmentation. However, due to the complex geometry structure of natural scenes, the spectra curves of the same surface can look very different under different illuminations and from different angles. In this paper, a new Multispectral Image Intrinsic Decomposition model (MIID) is presented to decompose the shading and reflectance from a single multispectral image. We extend the Retinex model, which is proposed for RGB image intrinsic decomposition, for multispectral domain. Based on this, a subspace constraint is introduced to both the shading and reflectance spectral space to reduce the ill-posedness of the problem and make the problem solvable. A dataset of 22 scenes is given with the ground truth of shadings and reflectance to facilitate objective evaluations. The experiments demonstrate the effectiveness of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multispectral images contain many clues of surface characteristics of the objects, thus can be used in many computer vision tasks, e.g., recolorization and segmentation. However, due to the complex geometry structure of natural scenes, the spectra curves of the same surface can look very different under different illuminations and from different angles. In this paper, a new Multispectral Image Intrinsic Decomposition model (MIID) is presented to decompose the shading and reflectance from a single multispectral image. We extend the Retinex model, which is proposed for RGB image intrinsic decomposition, for multispectral domain. Based on this, a subspace constraint is introduced to both the shading and reflectance spectral space to reduce the ill-posedness of the problem and make the problem solvable. A dataset of 22 scenes is given with the ground truth of shadings and reflectance to facilitate objective evaluations. The experiments demonstrate the effectiveness of the proposed method.", "fno": "642000g430", "keywords": [ "Computer Vision", "Geometry", "Image Colour Analysis", "Image Segmentation", "Natural Scenes", "Recolorization", "Segmentation", "Complex Geometry Structure", "Natural Scenes", "Spectra Curves", "Retinex Model", "RGB Image Intrinsic Decomposition", "Multispectral Domain", "Subspace Constraint", "Reflectance Spectral Space", "Surface Characteristics", "Illumination", "Multispectral Image Intrinsic Decomposition Model", "Shading Decomposition", "Computer Vision", "MIID", "Lighting", "Image Color Analysis", "Image Segmentation", "Geometry", "Image Decomposition", "Subspace Constraints", "Color" ], "authors": [ { "affiliation": null, "fullName": "Qian Huang", "givenName": "Qian", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Weixin Zhu", "givenName": "Weixin", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yang Zhao", "givenName": "Yang", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Linsen Chen", "givenName": "Linsen", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yao Wang", "givenName": "Yao", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Tao Yue", "givenName": "Tao", "surname": "Yue", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xun Cao", "givenName": "Xun", "surname": "Cao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-06-01T00:00:00", "pubType": "proceedings", "pages": "6430-6439", "year": "2018", "issn": null, "isbn": "978-1-5386-6420-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "642000g421", "articleId": "17D45Wda7gf", "__typename": "AdjacentArticleType" }, "next": { "fno": "642000g440", "articleId": "17D45W9KVIX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890318", "title": "L0 co-intrinsic images decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d469", "title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a433", "title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08457312", "title": "Intrinsic Image Decomposition with Step and Drift Shading Separation", "doi": null, "abstractUrl": "/journal/tg/2020/02/08457312/13Jkr98ynrb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122904", "title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122904/13rRUxOdD3R", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icipmc/2022/6872/0/687200a051", "title": "A Double-stream Exchange Transformer Network for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/icipmc/2022/687200a051/1GIurknyg0w", "parentPublication": { "id": "proceedings/icipmc/2022/6872/0", "title": "2022 International Conference on Image Processing and Media Computing (ICIPMC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h819", "title": "GLoSH: Global-Local Spherical Harmonics for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h819/1hQqy771H9u", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c521", "title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c521/1hVluc7QzBK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d245", "title": "Unsupervised Learning for Intrinsic Image Decomposition From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d245/1m3obXljyCI", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09625763", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "doi": null, "abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G55WEFExd6", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G56nWipNPa", "doi": "10.1109/CVPRW56347.2022.00046", "title": "HSI-Guided Intrinsic Image Decomposition for Outdoor Scenes", "normalizedTitle": "HSI-Guided Intrinsic Image Decomposition for Outdoor Scenes", "abstract": "Intrinisic image decomposition (IID) aims to recover the reflectance and shading components from images and is the prerequisite to many downstream computer vision applications, such as image editing and image relighting. Due to the inherent difficulty in acquiring ground truth reflectance and shading, existing datasets are either synthetic indoor scenes or objects using graphics rendering (e.g., CGIntrinsics and ShapeNet etc.) or real photos with very sparse manual annotation (e.g., IIW and SAW etc.). Accompanied with the complex nature of outdoor scenes, most IID methods focus on the decomposition of indoor environment. There is still a long way to go before we can handle IID of outdoor scenes. In this paper, we take the attempt to perform intrinsic image decomposition for outdoor scenes when RGB image is not the only thing we can get from the enviroment. With the observation of prior work where nir-infrared (NIR) images are transparent to a range of colourants/dyes, we propose to extend it to more spectra by collecting hyperspectral imaging (HSI) data which are well aligned with RGB images and to perform IID with both of them. We also apply existing mainstream IID methods for comparison to examine current progress and challenges at the road towards IID outdoors. We still make some improvements and find problems when performing IID for outdoor scenes, even though we do not handle it perfectly. The data we collect will be made publicly available for further potential investigation.", "abstracts": [ { "abstractType": "Regular", "content": "Intrinisic image decomposition (IID) aims to recover the reflectance and shading components from images and is the prerequisite to many downstream computer vision applications, such as image editing and image relighting. Due to the inherent difficulty in acquiring ground truth reflectance and shading, existing datasets are either synthetic indoor scenes or objects using graphics rendering (e.g., CGIntrinsics and ShapeNet etc.) or real photos with very sparse manual annotation (e.g., IIW and SAW etc.). Accompanied with the complex nature of outdoor scenes, most IID methods focus on the decomposition of indoor environment. There is still a long way to go before we can handle IID of outdoor scenes. In this paper, we take the attempt to perform intrinsic image decomposition for outdoor scenes when RGB image is not the only thing we can get from the enviroment. With the observation of prior work where nir-infrared (NIR) images are transparent to a range of colourants/dyes, we propose to extend it to more spectra by collecting hyperspectral imaging (HSI) data which are well aligned with RGB images and to perform IID with both of them. We also apply existing mainstream IID methods for comparison to examine current progress and challenges at the road towards IID outdoors. We still make some improvements and find problems when performing IID for outdoor scenes, even though we do not handle it perfectly. The data we collect will be made publicly available for further potential investigation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Intrinisic image decomposition (IID) aims to recover the reflectance and shading components from images and is the prerequisite to many downstream computer vision applications, such as image editing and image relighting. Due to the inherent difficulty in acquiring ground truth reflectance and shading, existing datasets are either synthetic indoor scenes or objects using graphics rendering (e.g., CGIntrinsics and ShapeNet etc.) or real photos with very sparse manual annotation (e.g., IIW and SAW etc.). Accompanied with the complex nature of outdoor scenes, most IID methods focus on the decomposition of indoor environment. There is still a long way to go before we can handle IID of outdoor scenes. In this paper, we take the attempt to perform intrinsic image decomposition for outdoor scenes when RGB image is not the only thing we can get from the enviroment. With the observation of prior work where nir-infrared (NIR) images are transparent to a range of colourants/dyes, we propose to extend it to more spectra by collecting hyperspectral imaging (HSI) data which are well aligned with RGB images and to perform IID with both of them. We also apply existing mainstream IID methods for comparison to examine current progress and challenges at the road towards IID outdoors. We still make some improvements and find problems when performing IID for outdoor scenes, even though we do not handle it perfectly. The data we collect will be made publicly available for further potential investigation.", "fno": "873900a312", "keywords": [ "Computer Vision", "Geophysical Image Processing", "Image Colour Analysis", "Image Matching", "Image Processing", "Image Recognition", "Image Reconstruction", "Image Representation", "Image Segmentation", "Image Sensors", "Object Recognition", "Rendering Computer Graphics", "Solid Modelling", "HSI Guided Intrinsic Image Decomposition", "Outdoor Scenes", "Intrinisic Image Decomposition", "Shading Components", "Image Editing", "Image Relighting", "Ground Truth Reflectance", "Synthetic Indoor Scenes", "IID Methods Focus", "RGB Image", "Nir Infrared Images", "Hyperspectral Imaging Data", "Existing Mainstream IID", "IID Outdoors", "Reflectivity", "Computer Vision", "Roads", "Surface Acoustic Waves", "Manuals", "Rendering Computer Graphics", "Image Decomposition" ], "authors": [ { "affiliation": "Beijing Institute of Technology", "fullName": "Fan Zhang", "givenName": "Fan", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Amsterdam", "fullName": "Shaodi You", "givenName": "Shaodi", "surname": "You", "__typename": "ArticleAuthorType" }, { "affiliation": "International Digital Economy Academy", "fullName": "Yu Li", "givenName": "Yu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Beijing Institute of Technology", "fullName": "Ying Fu", "givenName": "Ying", "surname": "Fu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "312-321", "year": "2022", "issn": null, "isbn": "978-1-6654-8739-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "873900a304", "articleId": "1G56RRPFTj2", "__typename": "AdjacentArticleType" }, "next": { "fno": "873900a322", "articleId": "1G578cpG2E8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2004/2128/2/212820761", "title": "A Real-Time Vehicle Detection and Tracking System in Outdoor Traffic Scenes", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212820761/12OmNCbkQBu", "parentPublication": { "id": "proceedings/icpr/2004/2128/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/2/3119b187", "title": "View-Dependent Real-Time Rendering of Large Outdoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119b187/12OmNCf1Du3", "parentPublication": { "id": "proceedings/cisp/2008/3119/3", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a124", "title": "Semantic Mapping of Large-Scale Outdoor Scenes for Autonomous Off-Road Driving", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a124/12OmNvEyR6g", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2013/4989/0/4989c067", "title": "Mesh Based Semantic Modelling for Indoor and Outdoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989c067/12OmNwekjC2", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2006/2521/3/252131135", "title": "Comparison of Similarity Measures for Trajectory Clustering in Outdoor Surveillance Scenes", "doi": null, "abstractUrl": "/proceedings-article/icpr/2006/252131135/12OmNwx3QdR", "parentPublication": { "id": "proceedings/icpr/2006/2521/3", "title": "2006 18th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020210", "title": "Rich Intrinsic Image Decomposition of Outdoor Scenes from Multiple Views", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020210/13rRUILtJm9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9758", "title": "PIE-Net: Photometric Invariant Edge Guided Network for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9758/1H0N3uaU7mM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a591", "title": "Localizing Adverts in Outdoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a591/1cJ0DUBH74Q", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2021/1952/0/09466269", "title": "DeRenderNet: Intrinsic Image Decomposition of Urban Scenes with Shape-(In)dependent Shading Rendering", "doi": null, "abstractUrl": "/proceedings-article/iccp/2021/09466269/1uST1ciE3EA", "parentPublication": { "id": "proceedings/iccp/2021/1952/0", "title": "2021 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09625763", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "doi": null, "abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hVluc7QzBK", "doi": "10.1109/ICCV.2019.00261", "title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "normalizedTitle": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "abstract": "Intrinsic image decomposition is a highly under-constrained problem that has been extensively studied by computer vision researchers. Previous methods impose additional constraints by exploiting either empirical or data-driven priors. In this paper, we revisit intrinsic image decomposition with the aid of near-infrared (NIR) imagery. We show that NIR band is considerably less sensitive to textures and can be exploited to reduce ambiguity caused by reflectance variation, promoting a simple yet powerful prior for shading smoothness. With this observation, we formulate intrinsic decomposition as an energy minimisation problem. Unlike existing methods, our energy formulation decouples reflectance and shading estimation, into a convex local shading component based on NIR-RGB image pair, and a reflectance component that encourages reflectance homogeneity both locally and globally. We further show the minimisation process can be approached by a series of multi-dimensional kernel convolutions, each within linear time complexity. To validate the proposed algorithm, a NIR-RGB dataset is captured over real-world objects, where our NIR-assisted approach demonstrates clear superiority over RGB methods.", "abstracts": [ { "abstractType": "Regular", "content": "Intrinsic image decomposition is a highly under-constrained problem that has been extensively studied by computer vision researchers. Previous methods impose additional constraints by exploiting either empirical or data-driven priors. In this paper, we revisit intrinsic image decomposition with the aid of near-infrared (NIR) imagery. We show that NIR band is considerably less sensitive to textures and can be exploited to reduce ambiguity caused by reflectance variation, promoting a simple yet powerful prior for shading smoothness. With this observation, we formulate intrinsic decomposition as an energy minimisation problem. Unlike existing methods, our energy formulation decouples reflectance and shading estimation, into a convex local shading component based on NIR-RGB image pair, and a reflectance component that encourages reflectance homogeneity both locally and globally. We further show the minimisation process can be approached by a series of multi-dimensional kernel convolutions, each within linear time complexity. To validate the proposed algorithm, a NIR-RGB dataset is captured over real-world objects, where our NIR-assisted approach demonstrates clear superiority over RGB methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Intrinsic image decomposition is a highly under-constrained problem that has been extensively studied by computer vision researchers. Previous methods impose additional constraints by exploiting either empirical or data-driven priors. In this paper, we revisit intrinsic image decomposition with the aid of near-infrared (NIR) imagery. We show that NIR band is considerably less sensitive to textures and can be exploited to reduce ambiguity caused by reflectance variation, promoting a simple yet powerful prior for shading smoothness. With this observation, we formulate intrinsic decomposition as an energy minimisation problem. Unlike existing methods, our energy formulation decouples reflectance and shading estimation, into a convex local shading component based on NIR-RGB image pair, and a reflectance component that encourages reflectance homogeneity both locally and globally. We further show the minimisation process can be approached by a series of multi-dimensional kernel convolutions, each within linear time complexity. To validate the proposed algorithm, a NIR-RGB dataset is captured over real-world objects, where our NIR-assisted approach demonstrates clear superiority over RGB methods.", "fno": "480300c521", "keywords": [ "Computational Complexity", "Computer Vision", "Convolution", "Image Colour Analysis", "Minimisation", "Nonlocal Intrinsic Decomposition", "Near Infrared Priors", "Intrinsic Image Decomposition", "Computer Vision Researchers", "Empirical Data Driven Priors", "Near Infrared Imagery", "NIR Band", "Reflectance Variation", "Shading Smoothness", "Energy Minimisation Problem", "Shading Estimation", "Convex Local Shading Component", "NIR RGB Image Pair", "Reflectance Homogeneity", "NIR RGB Dataset", "NIR Assisted Approach", "Multidimensional Kernel Convolutions", "Image Color Analysis", "Lighting", "Cameras", "Computer Vision", "Minimization", "Shape", "Training" ], "authors": [ { "affiliation": "Australian National University", "fullName": "Ziang Cheng", "givenName": "Ziang", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": "National Institute of Informatics", "fullName": "Yinqiang Zheng", "givenName": "Yinqiang", "surname": "Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Data61-CSIRO", "fullName": "Shaodi You", "givenName": "Shaodi", "surname": "You", "__typename": "ArticleAuthorType" }, { "affiliation": "National Institute of Informatics", "fullName": "Imari Sato", "givenName": "Imari", "surname": "Sato", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "2521-2530", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300c511", "articleId": "1hQqgj92sqA", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300c531", "articleId": "1hVlSTXipi0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2015/6759/0/07301300", "title": "Make my day - high-fidelity color denoising with Near-Infrared", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301300/12OmNAlvI5E", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890318", "title": "L0 co-intrinsic images decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d469", "title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019390", "title": "Intrinsic decomposition from a single RGB-D image with sparse and non-local priors", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019390/12OmNwAt1FT", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851c452", "title": "Simultaneous Estimation of Near IR BRDF and Fine-Scale Surface Geometry", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c452/12OmNy6qfI1", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/04/07115131", "title": "Intrinsic Scene Properties from a Single RGB-D Image", "doi": null, "abstractUrl": "/journal/tp/2016/04/07115131/13rRUNvyamf", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/08/06671601", "title": "Automatic and Accurate Shadow Detection Using Near-Infrared Information", "doi": null, "abstractUrl": "/journal/tp/2014/08/06671601/13rRUxly8Ug", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a312", "title": "HSI-Guided Intrinsic Image Decomposition for Outdoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a312/1G56nWipNPa", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/10/08738841", "title": "Photometric Depth Super-Resolution", "doi": null, "abstractUrl": "/journal/tp/2020/10/08738841/1aXLIsObtUQ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBTawn8", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAWH9Je", "doi": "10.1109/CVPRW.2014.26", "title": "Dense View Interpolation on Mobile Devices Using Focal Stacks", "normalizedTitle": "Dense View Interpolation on Mobile Devices Using Focal Stacks", "abstract": "Light field rendering is a widely used technique to generate novel views of a scene from novel viewpoints. Interpolative methods for light field rendering require a dense description of the scene in the form of closely spaced images. In this work, we present a simple method for dense view interpolation over general static scenes, using commonly available mobile devices. We capture an approximate focal stack of the scene from adjacent camera locations and interpolate intermediate images by shifting each focal region according to appropriate disparities. We do not rely on focus distance control to capture focal stacks and describe an automatic method of estimating the focal textures and the blur and disparity parameters required for view interpolation.", "abstracts": [ { "abstractType": "Regular", "content": "Light field rendering is a widely used technique to generate novel views of a scene from novel viewpoints. Interpolative methods for light field rendering require a dense description of the scene in the form of closely spaced images. In this work, we present a simple method for dense view interpolation over general static scenes, using commonly available mobile devices. We capture an approximate focal stack of the scene from adjacent camera locations and interpolate intermediate images by shifting each focal region according to appropriate disparities. We do not rely on focus distance control to capture focal stacks and describe an automatic method of estimating the focal textures and the blur and disparity parameters required for view interpolation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Light field rendering is a widely used technique to generate novel views of a scene from novel viewpoints. Interpolative methods for light field rendering require a dense description of the scene in the form of closely spaced images. In this work, we present a simple method for dense view interpolation over general static scenes, using commonly available mobile devices. We capture an approximate focal stack of the scene from adjacent camera locations and interpolate intermediate images by shifting each focal region according to appropriate disparities. We do not rely on focus distance control to capture focal stacks and describe an automatic method of estimating the focal textures and the blur and disparity parameters required for view interpolation.", "fno": "4308a138", "keywords": [ "Cameras", "Interpolation", "Equations", "Indexes", "Kernel", "Estimation", "Mobile Handsets", "Mobile Computational Photography" ], "authors": [ { "affiliation": null, "fullName": "Parikshit Sakurikar", "givenName": "Parikshit", "surname": "Sakurikar", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "P.J. Narayanan", "givenName": "P.J.", "surname": "Narayanan", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-06-01T00:00:00", "pubType": "proceedings", "pages": "138-143", "year": "2014", "issn": "2160-7516", "isbn": "978-1-4799-4308-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4308a132", "articleId": "12OmNAmVH7l", "__typename": "AdjacentArticleType" }, "next": { "fno": "4308a144", "articleId": "12OmNxG1yQV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2018/4886/0/488601b056", "title": "SceneFlowFields: Dense Interpolation of Sparse Scene Flow Correspondences", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b056/12OmNBA9oAz", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926682", "title": "Densification of Semi-Dense Reconstructions for Novel View Generation of Live Scenes", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926682/12OmNBPc8wD", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/1998/8620/0/86200061", "title": "Nonlinear View Interpolation", "doi": null, "abstractUrl": "/proceedings-article/pg/1998/86200061/12OmNqFa5oF", "parentPublication": { "id": "proceedings/pg/1998/8620/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890281", "title": "High resolution free-view interpolation of planar structure", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890281/12OmNyL0TqG", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/3/01394702", "title": "Reconstructing dense light field from a multi-focus images array", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394702/12OmNyRxFDN", "parentPublication": { "id": "proceedings/icme/2004/8603/3", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457g363", "title": "InterpoNet, a Brain Inspired Neural Network for Optical Flow Dense Interpolation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g363/12OmNyprnpB", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2017/3013/0/3013b587", "title": "Performance Evaluation of Interpolation Algorithms for Division of Focal Plane Polarization Image Sensors", "doi": null, "abstractUrl": "/proceedings-article/icisce/2017/3013b587/12OmNzb7Zkc", "parentPublication": { "id": "proceedings/icisce/2017/3013/0", "title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/02/08417976", "title": "Focal Loss for Dense Object Detection", "doi": null, "abstractUrl": "/journal/tp/2020/02/08417976/1gqq4MumO8o", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106041", "title": "A Benchmark of Light Field View Interpolation Methods", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106041/1kwqyUmJPIk", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a197", "title": "SSGP: Sparse Spatial Guided Propagation for Robust and Generic Interpolation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a197/1uqGkuuB73O", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAZOJTb", "title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)", "acronym": "iscsct", "groupId": "1002560", "volume": "2", "displayVolume": "2", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBTs7Be", "doi": "10.1109/ISCSCT.2008.346", "title": "A Color Correction Algorithm of Multi-view Video Based on Depth Segmentation", "normalizedTitle": "A Color Correction Algorithm of Multi-view Video Based on Depth Segmentation", "abstract": "In order to solve the problem of inconsistent color appearance between different views, and subsequently influences on multi-view video coding, a color correction algorithm of multi-view video based on depth segmentation is proposed in this paper. Firstly, depth information in each view is calculated. Then, background and foreground region are separated with the depth information. Finally, correction factors are achieved and color correction is used for the two regions respectively. Moreover, the boundary between the background and foreground has been treated with color gradual change. Experimental results show that the proposed algorithm can obtain consistent color appearance for different views and high coding efficiency. It is an effective method of color correction.", "abstracts": [ { "abstractType": "Regular", "content": "In order to solve the problem of inconsistent color appearance between different views, and subsequently influences on multi-view video coding, a color correction algorithm of multi-view video based on depth segmentation is proposed in this paper. Firstly, depth information in each view is calculated. Then, background and foreground region are separated with the depth information. Finally, correction factors are achieved and color correction is used for the two regions respectively. Moreover, the boundary between the background and foreground has been treated with color gradual change. Experimental results show that the proposed algorithm can obtain consistent color appearance for different views and high coding efficiency. It is an effective method of color correction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to solve the problem of inconsistent color appearance between different views, and subsequently influences on multi-view video coding, a color correction algorithm of multi-view video based on depth segmentation is proposed in this paper. Firstly, depth information in each view is calculated. Then, background and foreground region are separated with the depth information. Finally, correction factors are achieved and color correction is used for the two regions respectively. Moreover, the boundary between the background and foreground has been treated with color gradual change. Experimental results show that the proposed algorithm can obtain consistent color appearance for different views and high coding efficiency. It is an effective method of color correction.", "fno": "3498b206", "keywords": [ "Depth Segmentation", "Color Correction", "Multi View Video Coding" ], "authors": [ { "affiliation": null, "fullName": "Yue Fei", "givenName": "Yue", "surname": "Fei", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mei Yu", "givenName": "Mei", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Feng Shao", "givenName": "Feng", "surname": "Shao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gangyi Jiang", "givenName": "Gangyi", "surname": "Jiang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iscsct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "206-209", "year": "2008", "issn": null, "isbn": "978-0-7695-3498-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3498b202", "articleId": "12OmNCbCrWS", "__typename": "AdjacentArticleType" }, "next": { "fno": "3498b210", "articleId": "12OmNC1oT3m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2012/4711/0/4711a109", "title": "Depth-Based Disocclusion Filling for Virtual View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a109/12OmNrAv3Dp", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apcip/2009/3699/2/3699b104", "title": "View Synthesis Oriented Depth Map Coding Algorithm", "doi": null, "abstractUrl": "/proceedings-article/apcip/2009/3699b104/12OmNvjyxW5", "parentPublication": { "id": "proceedings/apcip/2009/3699/1", "title": "Information Processing, Asia-Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2001/1195/0/11950323", "title": "View Morphing Using Sprites with Depth", "doi": null, "abstractUrl": "/proceedings-article/iv/2001/11950323/12OmNx8wTiw", "parentPublication": { "id": "proceedings/iv/2001/1195/0", "title": "Proceedings Fifth International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2011/4589/0/4589a013", "title": "High Quality Free Viewpoint Synthesis Using Multi-view Images with Depth Information", "doi": null, "abstractUrl": "/proceedings-article/ism/2011/4589a013/12OmNxWcH0w", "parentPublication": { "id": "proceedings/ism/2011/4589/0", "title": "2011 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apwcs/2010/4003/0/4003a147", "title": "A New Virtual View Rendering Method Based on Depth Image", "doi": null, "abstractUrl": "/proceedings-article/apwcs/2010/4003a147/12OmNy6HQX6", "parentPublication": { "id": "proceedings/apwcs/2010/4003/0", "title": "Wearable Computing Systems, Asia-Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/1/4077a970", "title": "Color Correction for Multi-view Video Based on Color Variation Curve", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077a970/12OmNyRxFEF", "parentPublication": { "id": "proceedings/icicta/2010/4077/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2011/4335/0/4335a857", "title": "Color Correction Based on SIFT and GRNN for Multi-view Video", "doi": null, "abstractUrl": "/proceedings-article/cso/2011/4335a857/12OmNyeWdMm", "parentPublication": { "id": "proceedings/cso/2011/4335/0", "title": "2011 Fourth International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2012/4656/0/4656a402", "title": "Improved View Synthesis with Depth Reliability Maps", "doi": null, "abstractUrl": "/proceedings-article/dcc/2012/4656a402/12OmNzQzqjy", "parentPublication": { "id": "proceedings/dcc/2012/4656/0", "title": "Data Compression Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/peits/2008/3342/0/3342a149", "title": "Multi-View Video Coding Using Color Correction", "doi": null, "abstractUrl": "/proceedings-article/peits/2008/3342a149/12OmNzwpUlz", "parentPublication": { "id": "proceedings/peits/2008/3342/0", "title": "2008 Workshop on Power Electronics and Intelligent Transportation System", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/03/ttp2011030603", "title": "Robust Bilayer Segmentation and Motion/Depth Estimation with a Handheld Camera", "doi": null, "abstractUrl": "/journal/tp/2011/03/ttp2011030603/13rRUwfZBWm", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1lgooiYzVba", "title": "Computing, Communication, Control and Management, ISECS International Colloquium on", "acronym": "cccm", "groupId": "1002423", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBp52uZ", "doi": "10.1109/CCCM.2008.313", "title": "A Color Error Correction Mode for Digital Camera Based on Polynomial Curve Generation", "normalizedTitle": "A Color Error Correction Mode for Digital Camera Based on Polynomial Curve Generation", "abstract": "Digital camera is one of the main devices in computer and multimedia technology and its color management model is the key to guarantee color consistency in succedent image production and transfers. The paper presents a color conversion model for digital camera based on polynomial curve generation. Firstly, color rendering principle of digital camera is analyzed. Then digital camera data is pretreated to a unitary field to deduce final model. Thirdly, standard color target is taken for experimental sample and substitutes color blocks in color shade district for complete color space to solve the difficulties of experimental color blocks selecting; Fourthly, the model using polynomial curve generation algorithm to correct color error is deduced; Finally, the realization and experiment results show that, compared with some methods which have relatively high accuracy, the algorithm can improve color conversion accuracy and can satisfy the engineering requirement in digital camera color management .", "abstracts": [ { "abstractType": "Regular", "content": "Digital camera is one of the main devices in computer and multimedia technology and its color management model is the key to guarantee color consistency in succedent image production and transfers. The paper presents a color conversion model for digital camera based on polynomial curve generation. Firstly, color rendering principle of digital camera is analyzed. Then digital camera data is pretreated to a unitary field to deduce final model. Thirdly, standard color target is taken for experimental sample and substitutes color blocks in color shade district for complete color space to solve the difficulties of experimental color blocks selecting; Fourthly, the model using polynomial curve generation algorithm to correct color error is deduced; Finally, the realization and experiment results show that, compared with some methods which have relatively high accuracy, the algorithm can improve color conversion accuracy and can satisfy the engineering requirement in digital camera color management .", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Digital camera is one of the main devices in computer and multimedia technology and its color management model is the key to guarantee color consistency in succedent image production and transfers. The paper presents a color conversion model for digital camera based on polynomial curve generation. Firstly, color rendering principle of digital camera is analyzed. Then digital camera data is pretreated to a unitary field to deduce final model. Thirdly, standard color target is taken for experimental sample and substitutes color blocks in color shade district for complete color space to solve the difficulties of experimental color blocks selecting; Fourthly, the model using polynomial curve generation algorithm to correct color error is deduced; Finally, the realization and experiment results show that, compared with some methods which have relatively high accuracy, the algorithm can improve color conversion accuracy and can satisfy the engineering requirement in digital camera color management .", "fno": "3290a458", "keywords": [ "Color Error Correction", "Polynomial Curve Generation", "Digital Camera Color Conversion", "Color Rendering Principle", "Color Shade Districts" ], "authors": [ { "affiliation": null, "fullName": "Li Xinwu", "givenName": "Li", "surname": "Xinwu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cccm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-08-01T00:00:00", "pubType": "proceedings", "pages": "458-461", "year": "2008", "issn": null, "isbn": "978-0-7695-3290-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3290a454", "articleId": "12OmNwlHSUM", "__typename": "AdjacentArticleType" }, "next": { "fno": "3290a462", "articleId": "12OmNBv2CjT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csse/2008/3336/4/3336g174", "title": "Applications of Tetrahedral Interpolation in Color Conversion Model", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336g174/12OmNAH5dmz", "parentPublication": { "id": "csse/2008/3336/4", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1994/6265/1/00576263", "title": "Online color camera calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/1994/00576263/12OmNBLdKIw", "parentPublication": { "id": "proceedings/icpr/1994/6265/1", "title": "Proceedings of 12th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a225", "title": "Adaptive Color Curve Models for Image Matting", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a225/12OmNBgz4AR", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2010/3962/3/3962e303", "title": "Scanner Color Chart Making and its Application in Printing Color Conversion", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2010/3962e303/12OmNCdTePL", "parentPublication": { "id": "proceedings/icmtma/2010/3962/3", "title": "2010 International Conference on Measuring Technology and Mechatronics Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2008/3278/0/3278a173", "title": "Face Detection with Automatic White Balance for Digital Still Camera", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2008/3278a173/12OmNvRU0rA", "parentPublication": { "id": "proceedings/iih-msp/2008/3278/0", "title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2009/3769/0/3769b598", "title": "Color Correction for Object Identification from Images with Different Color Illumination", "doi": null, "abstractUrl": "/proceedings-article/ncm/2009/3769b598/12OmNxUv6gW", "parentPublication": { "id": "proceedings/ncm/2009/3769/0", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2012/4608/0/4608b285", "title": "Research of Color Correction Algorithm for Multi-projector Screen Based on Projector-Camera System", "doi": null, "abstractUrl": "/proceedings-article/isdea/2012/4608b285/12OmNxwENpp", "parentPublication": { "id": "proceedings/isdea/2012/4608/0", "title": "2012 Second International Conference on Intelligent System Design and Engineering Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/1/4077a970", "title": "Color Correction for Multi-view Video Based on Color Variation Curve", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077a970/12OmNyRxFEF", "parentPublication": { "id": "proceedings/icicta/2010/4077/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2001/1263/0/12630210", "title": "Character Segmentation of Color Images from Digital Camera", "doi": null, "abstractUrl": "/proceedings-article/icdar/2001/12630210/12OmNz61dmM", "parentPublication": { "id": "proceedings/icdar/2001/1263/0", "title": "Proceedings of Sixth International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/10/ttp2012102058", "title": "Joint Depth and Color Camera Calibration with Distortion Correction", "doi": null, "abstractUrl": "/journal/tp/2012/10/ttp2012102058/13rRUxly8Ue", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyaXPPU", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "acronym": "icmew", "groupId": "1801805", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNs0C9Ua", "doi": "10.1109/ICMEW.2015.7169773", "title": "A disocclusion filling method using multiple sprites with depth for virtual view synthesis", "normalizedTitle": "A disocclusion filling method using multiple sprites with depth for virtual view synthesis", "abstract": "Depth image based rendering (DIBR) is an important technique to generate virtual view images with limited 3-D data. However, disocclusion is a critical problem that the regions occluded by foreground objects become visible in virtual views, which is difficult to be visual-plausibly inferred. In this paper, we propose a novel temporally consistent filling method using multiple sprites with depth (MSD) to fill the disocclusions faithfully. MSD stores the background as well as intermediate foreground objects of past frames in multiple sprites, so that amounts of pixels in disocclusions can be recovered from MSD. Moreover, we also introduce a method to recognize the source space for exemplar based inpainting to fill the remaining disocclusions. The experimental results show the proposed method achieves objective and subjective improvement compared with the state-of-the-art methods. The synthesized sequences by the proposed method have higher spatio-temporal consistency.", "abstracts": [ { "abstractType": "Regular", "content": "Depth image based rendering (DIBR) is an important technique to generate virtual view images with limited 3-D data. However, disocclusion is a critical problem that the regions occluded by foreground objects become visible in virtual views, which is difficult to be visual-plausibly inferred. In this paper, we propose a novel temporally consistent filling method using multiple sprites with depth (MSD) to fill the disocclusions faithfully. MSD stores the background as well as intermediate foreground objects of past frames in multiple sprites, so that amounts of pixels in disocclusions can be recovered from MSD. Moreover, we also introduce a method to recognize the source space for exemplar based inpainting to fill the remaining disocclusions. The experimental results show the proposed method achieves objective and subjective improvement compared with the state-of-the-art methods. The synthesized sequences by the proposed method have higher spatio-temporal consistency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Depth image based rendering (DIBR) is an important technique to generate virtual view images with limited 3-D data. However, disocclusion is a critical problem that the regions occluded by foreground objects become visible in virtual views, which is difficult to be visual-plausibly inferred. In this paper, we propose a novel temporally consistent filling method using multiple sprites with depth (MSD) to fill the disocclusions faithfully. MSD stores the background as well as intermediate foreground objects of past frames in multiple sprites, so that amounts of pixels in disocclusions can be recovered from MSD. Moreover, we also introduce a method to recognize the source space for exemplar based inpainting to fill the remaining disocclusions. The experimental results show the proposed method achieves objective and subjective improvement compared with the state-of-the-art methods. The synthesized sequences by the proposed method have higher spatio-temporal consistency.", "fno": "07169773", "keywords": [ "Sprites Computer", "Image Color Analysis", "Filling", "Rendering Computer Graphics", "Three Dimensional Displays", "Robustness", "Inpainting", "Disocclusion Filling", "Depth Image Based Rendering", "3 D", "Multiple Sprites With Depth" ], "authors": [ { "affiliation": "Department of Electronic Engineering, The Chinese University of Hong Kong, Hong Kong", "fullName": "Chi Ho Cheung", "givenName": null, "surname": "Chi Ho Cheung", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronic Engineering, The Chinese University of Hong Kong, Hong Kong", "fullName": "Lu Sheng", "givenName": null, "surname": "Lu Sheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Electronic Engineering, The Chinese University of Hong Kong, Hong Kong", "fullName": "King Ngi Ngan", "givenName": null, "surname": "King Ngi Ngan", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-06-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2015", "issn": null, "isbn": "978-1-4799-7079-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07169772", "articleId": "12OmNxjjEhf", "__typename": "AdjacentArticleType" }, "next": { "fno": "07169774", "articleId": "12OmNvSKNBx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209b073", "title": "Superpixel-Based Disocclusion Filling in Depth Image Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b073/12OmNBqdr4j", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574740", "title": "Hole-filling for single-view plus-depth based rendering with temporal texture synthesis", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574740/12OmNqBbHwM", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a109", "title": "Depth-Based Disocclusion Filling for Virtual View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a109/12OmNrAv3Dp", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761649", "title": "Improved novel view synthesis from depth image with large baseline", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761649/12OmNrAv3Sk", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2015/7079/0/07169759", "title": "Improving DIBR technique to resolve foreground color/depth edge misalignment", "doi": null, "abstractUrl": "/proceedings-article/icmew/2015/07169759/12OmNscOUe7", "parentPublication": { "id": "proceedings/icmew/2015/7079/0", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2001/1195/0/11950323", "title": "View Morphing Using Sprites with Depth", "doi": null, "abstractUrl": "/proceedings-article/iv/2001/11950323/12OmNx8wTiw", "parentPublication": { "id": "proceedings/iv/2001/1195/0", "title": "Proceedings Fifth International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460992", "title": "Faithful Spatio-Temporal disocclusion filling using local optimization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460992/12OmNxuFBpr", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/06/08642935", "title": "A Disocclusion Inpainting Framework for Depth-Based View Synthesis", "doi": null, "abstractUrl": "/journal/tp/2020/06/08642935/17PYElAbxtK", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600c647", "title": "Deformable Sprites for Unsupervised Video Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600c647/1H1iYumOcQE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/01467591", "title": "Interactive montages of sprites for indexing and summarizing security video", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/01467591/1htC5GezDUc", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNC1GueH", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNxuFBpr", "doi": "", "title": "Faithful Spatio-Temporal disocclusion filling using local optimization", "normalizedTitle": "Faithful Spatio-Temporal disocclusion filling using local optimization", "abstract": "We present a novel method to fill disoccluded regions occurring in Depth Image Based Rendering (DIBR) in a faithful way. Given a video stream and a corresponding depth map, DIBR can render arbitrary new views of a scene. Areas that are not visible in the reference view need to be filled after warping. We present a novel framework for this task which can reconstruct the disoc-cluded regions by taking temporally neighboring frames into account. An efficient optimization scheme is employed to find faithful filling regions. This way, in contrast to common methods, we can fill disocclusions with their true color values, yielding high-quality view synthesis results.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel method to fill disoccluded regions occurring in Depth Image Based Rendering (DIBR) in a faithful way. Given a video stream and a corresponding depth map, DIBR can render arbitrary new views of a scene. Areas that are not visible in the reference view need to be filled after warping. We present a novel framework for this task which can reconstruct the disoc-cluded regions by taking temporally neighboring frames into account. An efficient optimization scheme is employed to find faithful filling regions. This way, in contrast to common methods, we can fill disocclusions with their true color values, yielding high-quality view synthesis results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel method to fill disoccluded regions occurring in Depth Image Based Rendering (DIBR) in a faithful way. Given a video stream and a corresponding depth map, DIBR can render arbitrary new views of a scene. Areas that are not visible in the reference view need to be filled after warping. We present a novel framework for this task which can reconstruct the disoc-cluded regions by taking temporally neighboring frames into account. An efficient optimization scheme is employed to find faithful filling regions. This way, in contrast to common methods, we can fill disocclusions with their true color values, yielding high-quality view synthesis results.", "fno": "06460992", "keywords": [ "Image Colour Analysis", "Rendering Computer Graphics", "Video Signal Processing", "Faithful Spatio Temporal Disocclusion Filling", "Local Optimization", "Depth Image Based Rendering", "DIBR", "Video Stream", "Reference View", "Novel Framework", "Efficient Optimization Scheme", "Neighboring Frames", "True Color Values", "High Quality View Synthesis Results", "Image Color Analysis", "Streaming Media", "Cameras", "Color", "Image Reconstruction", "Interpolation", "Smoothing Methods" ], "authors": [ { "affiliation": "Department of Mathematics and Computer Science, University of Mu¨nster", "fullName": "Michael Schmeing", "givenName": "Michael", "surname": "Schmeing", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Mathematics and Computer Science, University of Mu¨nster", "fullName": "Xiaoyi Jiang", "givenName": "Xiaoyi", "surname": "Jiang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "3799-3802", "year": "2012", "issn": "1051-4651", "isbn": "978-1-4673-2216-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06460991", "articleId": "12OmNzxPTNt", "__typename": "AdjacentArticleType" }, "next": { "fno": "06460993", "articleId": "12OmNwFicYJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dpvt/2006/2825/0/04155795", "title": "Automatic Hole-Filling of Triangular Meshes Using Local Radial Basis Function", "doi": null, "abstractUrl": "/proceedings-article/3dpvt/2006/04155795/12OmNApu5sy", "parentPublication": { "id": "proceedings/3dpvt/2006/2825/0", "title": "Third International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a339", "title": "Foreground-Object-Protected Depth Map Smoothing for DIBR", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a339/12OmNBUS78r", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b073", "title": "Superpixel-Based Disocclusion Filling in Depth Image Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b073/12OmNBqdr4j", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a109", "title": "Depth-Based Disocclusion Filling for Virtual View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a109/12OmNrAv3Dp", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761649", "title": "Improved novel view synthesis from depth image with large baseline", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761649/12OmNrAv3Sk", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2015/7079/0/07169773", "title": "A disocclusion filling method using multiple sprites with depth for virtual view synthesis", "doi": null, "abstractUrl": "/proceedings-article/icmew/2015/07169773/12OmNs0C9Ua", "parentPublication": { "id": "proceedings/icmew/2015/7079/0", "title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460109", "title": "Salient region detection using local and global saliency", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460109/12OmNs4S8Fj", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890181", "title": "Dictionary based hole filling with assistance of depth", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890181/12OmNvDZF5x", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019307", "title": "Perceptual quality assessment of 3D synthesized images", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019307/12OmNwDACCR", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a603", "title": "Disocclusion-Reducing Geometry for Multiple RGB-D Video Streams", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a603/1tnXAB55dEk", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1B12DGrwoyQ", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1B13BrNvA40", "doi": "10.1109/WACV51458.2022.00315", "title": "Revealing Disocclusions in Temporal View Synthesis through Infilling Vector Prediction", "normalizedTitle": "Revealing Disocclusions in Temporal View Synthesis through Infilling Vector Prediction", "abstract": "We consider the problem of temporal view synthesis, where the goal is to predict a future video frame from the past frames using knowledge of the depth and relative camera motion. In contrast to revealing the disoccluded regions through intensity based infilling, we study the idea of an infilling vector to infill by pointing to a non-disoccluded region in the synthesized view. To exploit the structure of disocclusions created by camera motion during their infilling, we rely on two important cues, temporal correlation of infilling directions and depth. We design a learning framework to predict the infilling vector by computing a temporal prior that reflects past infilling directions and a normalized depth map as input to the network. We conduct extensive experiments on a large scale dataset we build for evaluating temporal view synthesis in addition to the SceneNet RGB-D dataset. Our experiments demonstrate that our infilling vector prediction approach achieves superior quantitative and qualitative infilling performance compared to other approaches in literature.", "abstracts": [ { "abstractType": "Regular", "content": "We consider the problem of temporal view synthesis, where the goal is to predict a future video frame from the past frames using knowledge of the depth and relative camera motion. In contrast to revealing the disoccluded regions through intensity based infilling, we study the idea of an infilling vector to infill by pointing to a non-disoccluded region in the synthesized view. To exploit the structure of disocclusions created by camera motion during their infilling, we rely on two important cues, temporal correlation of infilling directions and depth. We design a learning framework to predict the infilling vector by computing a temporal prior that reflects past infilling directions and a normalized depth map as input to the network. We conduct extensive experiments on a large scale dataset we build for evaluating temporal view synthesis in addition to the SceneNet RGB-D dataset. Our experiments demonstrate that our infilling vector prediction approach achieves superior quantitative and qualitative infilling performance compared to other approaches in literature.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We consider the problem of temporal view synthesis, where the goal is to predict a future video frame from the past frames using knowledge of the depth and relative camera motion. In contrast to revealing the disoccluded regions through intensity based infilling, we study the idea of an infilling vector to infill by pointing to a non-disoccluded region in the synthesized view. To exploit the structure of disocclusions created by camera motion during their infilling, we rely on two important cues, temporal correlation of infilling directions and depth. We design a learning framework to predict the infilling vector by computing a temporal prior that reflects past infilling directions and a normalized depth map as input to the network. We conduct extensive experiments on a large scale dataset we build for evaluating temporal view synthesis in addition to the SceneNet RGB-D dataset. Our experiments demonstrate that our infilling vector prediction approach achieves superior quantitative and qualitative infilling performance compared to other approaches in literature.", "fno": "091500d093", "keywords": [ "Cameras", "Image Colour Analysis", "Learning Artificial Intelligence", "Motion Estimation", "Rendering Computer Graphics", "Video Signal Processing", "Temporal Correlation", "Infilling Directions", "Temporal View Synthesis", "Infilling Vector Prediction Approach", "Disocclusions", "Video Frame", "Relative Camera Motion", "Intensity Based Infilling", "Scene Net RGB D Dataset", "Learning Framework", "Normalized Depth Map", "Rendering", "Computer Vision", "Correlation", "Predictive Models", "Cameras", "Rendering Computer Graphics", "Computational Photography", "Image And Video Synthesis 3 D Computer Vision", "Deep Learning", "Vision For Graphics" ], "authors": [ { "affiliation": "Indian Institute of Science,Bengaluru,India", "fullName": "Vijayalakshmi Kanchana", "givenName": "Vijayalakshmi", "surname": "Kanchana", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Science,Bengaluru,India", "fullName": "Nagabhushan Somraj", "givenName": "Nagabhushan", "surname": "Somraj", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Science,Bengaluru,India", "fullName": "Suraj Yadwad", "givenName": "Suraj", "surname": "Yadwad", "__typename": "ArticleAuthorType" }, { "affiliation": "Indian Institute of Science,Bengaluru,India", "fullName": "Rajiv Soundararajan", "givenName": "Rajiv", "surname": "Soundararajan", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-01-01T00:00:00", "pubType": "proceedings", "pages": "3093-3102", "year": "2022", "issn": null, "isbn": "978-1-6654-0915-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1B13BnC4db2", "name": "pwacv202209150-09706629s1-mm_091500d093.zip", "size": "16.4 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pwacv202209150-09706629s1-mm_091500d093.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "091500d083", "articleId": "1B13rZ8C6wU", "__typename": "AdjacentArticleType" }, "next": { "fno": "091500d103", "articleId": "1B13z3ESSis", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icmew/2016/1552/0/07574740", "title": "Hole-filling for single-view plus-depth based rendering with temporal texture synthesis", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574740/12OmNqBbHwM", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a109", "title": "Depth-Based Disocclusion Filling for Virtual View Synthesis", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a109/12OmNrAv3Dp", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761649", "title": "Improved novel view synthesis from depth image with large baseline", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761649/12OmNrAv3Sk", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a115", "title": "Virtual View Reconstruction Using Temporal Information", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a115/12OmNwBT1sw", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2015/8020/0/07450419", "title": "View-Dependent Projective Atlases", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2015/07450419/12OmNxWuihn", "parentPublication": { "id": "proceedings/cad-graphics/2015/8020/0", "title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2017/0560/0/08026313", "title": "Light field compression using depth image based view synthesis", "doi": null, "abstractUrl": "/proceedings-article/icmew/2017/08026313/12OmNzmclIh", "parentPublication": { "id": "proceedings/icmew/2017/0560/0", "title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/06/08642935", "title": "A Disocclusion Inpainting Framework for Depth-Based View Synthesis", "doi": null, "abstractUrl": "/journal/tp/2020/06/08642935/17PYElAbxtK", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a817", "title": "Temporal View Synthesis of Dynamic Scenes through 3D Object Motion Estimation with Multi-Plane Images", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a817/1JrQPYIt6QU", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a186", "title": "Low Complexity Hybrid View Synthesis Optimization for 3D-HEVC", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a186/1ap5xtOpRvO", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a690", "title": "Spherical View Synthesis for Self-Supervised 360&#x00B0; Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a690/1ezRCwCzke4", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmI1xNvy12", "doi": "10.1109/ICCV48922.2021.01235", "title": "MINE: Towards Continuous Depth MPI with NeRF for Novel View Synthesis", "normalizedTitle": "MINE: Towards Continuous Depth MPI with NeRF for Novel View Synthesis", "abstract": "In this paper, we propose MINE to perform novel view synthesis and depth estimation via dense 3D reconstruction from a single image. Our approach is a continuous depth generalization of the Multiplane Images (MPI) by introducing the NEural radiance fields (NeRF). Given a single image as input, MINE predicts a 4-channel image (RGB and volume density) at arbitrary depth values to jointly reconstruct the camera frustum and fill in occluded contents. The reconstructed and inpainted frustum can then be easily rendered into novel RGB or depth views using differentiable rendering. Extensive experiments on RealEstate10K, KITTI and Flowers Light Fields show that our MINE outperforms state-of-the-art by a large margin in novel view synthesis. We also achieve competitive results in depth estimation on iBims-1 and NYU-v2 without annotated depth supervision. Our source code is available at https://github.com/vincentfung13/MINE.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose MINE to perform novel view synthesis and depth estimation via dense 3D reconstruction from a single image. Our approach is a continuous depth generalization of the Multiplane Images (MPI) by introducing the NEural radiance fields (NeRF). Given a single image as input, MINE predicts a 4-channel image (RGB and volume density) at arbitrary depth values to jointly reconstruct the camera frustum and fill in occluded contents. The reconstructed and inpainted frustum can then be easily rendered into novel RGB or depth views using differentiable rendering. Extensive experiments on RealEstate10K, KITTI and Flowers Light Fields show that our MINE outperforms state-of-the-art by a large margin in novel view synthesis. We also achieve competitive results in depth estimation on iBims-1 and NYU-v2 without annotated depth supervision. Our source code is available at https://github.com/vincentfung13/MINE.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose MINE to perform novel view synthesis and depth estimation via dense 3D reconstruction from a single image. Our approach is a continuous depth generalization of the Multiplane Images (MPI) by introducing the NEural radiance fields (NeRF). Given a single image as input, MINE predicts a 4-channel image (RGB and volume density) at arbitrary depth values to jointly reconstruct the camera frustum and fill in occluded contents. The reconstructed and inpainted frustum can then be easily rendered into novel RGB or depth views using differentiable rendering. Extensive experiments on RealEstate10K, KITTI and Flowers Light Fields show that our MINE outperforms state-of-the-art by a large margin in novel view synthesis. We also achieve competitive results in depth estimation on iBims-1 and NYU-v2 without annotated depth supervision. Our source code is available at https://github.com/vincentfung13/MINE.", "fno": "281200m2558", "keywords": [ "Computer Vision", "Three Dimensional Displays", "Codes", "Estimation", "Rendering Computer Graphics", "Cameras", "Light Fields", "3 D From A Single Image And Shape From X", "Image And Video Synthesis", "Stereo", "3 D From Multiview And Other Sensors" ], "authors": [ { "affiliation": "ByteDance", "fullName": "Jiaxin Li", "givenName": "Jiaxin", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "ByteDance", "fullName": "Zijian Feng", "givenName": "Zijian", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "ByteDance", "fullName": "Qi She", "givenName": "Qi", "surname": "She", "__typename": "ArticleAuthorType" }, { "affiliation": "ByteDance", "fullName": "Henghui Ding", "givenName": "Henghui", "surname": "Ding", "__typename": "ArticleAuthorType" }, { "affiliation": "ByteDance", "fullName": "Changhu Wang", "givenName": "Changhu", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore", "fullName": "Gim Hee Lee", "givenName": "Gim Hee", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "12558-12568", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200m2548", "articleId": "1BmJI0Lqlxe", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200m2569", "articleId": "1BmFAk4MRe8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2015/9711/0/5720a199", "title": "Joint Estimation of Depth, Reflectance and Illumination for Depth Refinement", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a199/12OmNBeRtQL", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2013/5099/0/5099a123", "title": "Explorable Volumetric Depth Images from Raycasting", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2013/5099a123/12OmNwBT1oL", "parentPublication": { "id": "proceedings/sibgrapi/2013/5099/0", "title": "2013 XXVI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460988", "title": "Glass object localization by joint inference of boundary and depth", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460988/12OmNwMFMiR", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200e661", "title": "Specificity-preserving RGB-D Saliency Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200e661/1BmEfBtcGQM", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2872", "title": "Depth-supervised NeRF: Fewer Views and Faster Training for Free", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2872/1H1ieODToYw", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2943", "title": "Towards Multimodal Depth Estimation from Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2943/1H1k4uRP4sM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/01467605", "title": "RGB-Z: mapping a sparse depth map to a high resolution RGB camera image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/01467605/1htC5iEJs1W", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/10/09184389", "title": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes", "doi": null, "abstractUrl": "/journal/tg/2020/10/09184389/1mLIesC5z0Y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i908", "title": "Differentiable Diffusion for Dense Depth Estimation from Multi-view Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i908/1yeIXHXoqCk", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i530", "title": "NeX: Real-time View Synthesis with Neural Basis Expansion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i530/1yeJhQdQUYE", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirq", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNBEpnEt", "doi": "10.1109/ISMAR.2012.6402574", "title": "Occlusion capable optical see-through head-mounted display using freeform optics", "normalizedTitle": "Occlusion capable optical see-through head-mounted display using freeform optics", "abstract": "Most state-of-the-art optical see-through head-mounted display (OST-HMD) lacks mutual occlusion capability between computer-rendered and real objects so that the virtual view through an OST-HMD appears “ghost-like”, floating in the real world. In this paper, we demonstrated a light-weight, compact OST-HMD with mutual occlusion capability by exploring a highly innovative optical approach based on emerging freeform optical design and fabrication technologies. Our approach enabled us to achieve an occlusion-capable OST-HMD system with a very compelling form factor and high optical performance. The proposed display technology is designed for highly efficient liquid crystal on silicon (LCoS) type spatial light modulator (SLM) and bright Organic LED (OLED) microdisplay, which is capable of working in both indoor and outdoor environments. Our current design offered a 1280×1024 color resolution with a field of view (FOV) of 40 degrees and lightweight optics about 30 grams per eye.", "abstracts": [ { "abstractType": "Regular", "content": "Most state-of-the-art optical see-through head-mounted display (OST-HMD) lacks mutual occlusion capability between computer-rendered and real objects so that the virtual view through an OST-HMD appears “ghost-like”, floating in the real world. In this paper, we demonstrated a light-weight, compact OST-HMD with mutual occlusion capability by exploring a highly innovative optical approach based on emerging freeform optical design and fabrication technologies. Our approach enabled us to achieve an occlusion-capable OST-HMD system with a very compelling form factor and high optical performance. The proposed display technology is designed for highly efficient liquid crystal on silicon (LCoS) type spatial light modulator (SLM) and bright Organic LED (OLED) microdisplay, which is capable of working in both indoor and outdoor environments. Our current design offered a 1280×1024 color resolution with a field of view (FOV) of 40 degrees and lightweight optics about 30 grams per eye.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most state-of-the-art optical see-through head-mounted display (OST-HMD) lacks mutual occlusion capability between computer-rendered and real objects so that the virtual view through an OST-HMD appears “ghost-like”, floating in the real world. In this paper, we demonstrated a light-weight, compact OST-HMD with mutual occlusion capability by exploring a highly innovative optical approach based on emerging freeform optical design and fabrication technologies. Our approach enabled us to achieve an occlusion-capable OST-HMD system with a very compelling form factor and high optical performance. The proposed display technology is designed for highly efficient liquid crystal on silicon (LCoS) type spatial light modulator (SLM) and bright Organic LED (OLED) microdisplay, which is capable of working in both indoor and outdoor environments. Our current design offered a 1280×1024 color resolution with a field of view (FOV) of 40 degrees and lightweight optics about 30 grams per eye.", "fno": "06402574", "keywords": [ "Optical Device Fabrication", "Optical Imaging", "Biomedical Optical Imaging", "Optical Design", "Microdisplays", "Adaptive Optics", "Augmented Reality", "Head Mounted Display", "Optical See Through", "Mutual Occlusion" ], "authors": [ { "affiliation": "Augmented Vision Inc., Tucson, Arizona, USA", "fullName": "Chunyu Gao", "givenName": "Chunyu", "surname": "Gao", "__typename": "ArticleAuthorType" }, { "affiliation": "Augmented Vision Inc., Tucson, Arizona, USA", "fullName": "Yuxiang Lin", "givenName": "Yuxiang", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Optical Sciences, University of Arizona, Tucson, USA", "fullName": "Hong Hua", "givenName": "Hong", "surname": "Hua", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "281-282", "year": "2012", "issn": null, "isbn": "978-1-4673-4660-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06402573", "articleId": "12OmNvmXJ3t", "__typename": "AdjacentArticleType" }, "next": { "fno": "06402575", "articleId": "12OmNyKJigS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660a043", "title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07064856", "title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08456571", "title": "Restoring the Awareness in the Occluded Visual Field for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2018/11/08456571/14M3DYLGFgs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a640", "title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a389", "title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10050791", "title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable", "doi": null, "abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08827571", "title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics", "doi": null, "abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998139", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09416829", "title": "Design of a Pupil-Matched Occlusion-Capable Optical See-Through Wearable Display", "doi": null, "abstractUrl": "/journal/tg/2022/12/09416829/1t8VUXSYL2E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1CJcAaH6aYg", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1CJeADcapNK", "doi": "10.1109/VRW55335.2022.00251", "title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion", "normalizedTitle": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion", "abstract": "The occlusion function benefits augmented reality (AR) in many aspects. However, existing occlusion-capable optical see-through augmented reality (OC-OST-AR) displays are designed by integrating virtual displays into a dedicated occlusion-capable architecture, hereby, we miss merits from emerging OST-AR displays. In this article, we propose an external occlusion module that can be added to common OST-AR displays. Per-pixel occlusion is supported with a small form-factor by using polarization-based optical path compression. The occlusion function can be switched on/off by controlling the incident light polarization. A prototype within a volume of <tex>Z_$6\\times 6\\times 3\\text{cm}$_Z</tex> is built. A preliminary experiment proves that occlusion is realized successfully.", "abstracts": [ { "abstractType": "Regular", "content": "The occlusion function benefits augmented reality (AR) in many aspects. However, existing occlusion-capable optical see-through augmented reality (OC-OST-AR) displays are designed by integrating virtual displays into a dedicated occlusion-capable architecture, hereby, we miss merits from emerging OST-AR displays. In this article, we propose an external occlusion module that can be added to common OST-AR displays. Per-pixel occlusion is supported with a small form-factor by using polarization-based optical path compression. The occlusion function can be switched on/off by controlling the incident light polarization. A prototype within a volume of <tex>$6\\times 6\\times 3\\text{cm}$</tex> is built. A preliminary experiment proves that occlusion is realized successfully.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The occlusion function benefits augmented reality (AR) in many aspects. However, existing occlusion-capable optical see-through augmented reality (OC-OST-AR) displays are designed by integrating virtual displays into a dedicated occlusion-capable architecture, hereby, we miss merits from emerging OST-AR displays. In this article, we propose an external occlusion module that can be added to common OST-AR displays. Per-pixel occlusion is supported with a small form-factor by using polarization-based optical path compression. The occlusion function can be switched on/off by controlling the incident light polarization. A prototype within a volume of - is built. A preliminary experiment proves that occlusion is realized successfully.", "fno": "840200a800", "keywords": [ "Augmented Reality", "Hidden Feature Removal", "Light Polarisation", "Mutual Occlusion", "Virtual Displays", "External Occlusion Module", "Per Pixel Occlusion", "Polarization Based Optical Path Compression", "Add On Occlusion", "Occlusion Capable Optical See Through Augmented Reality Displays", "OC OST AR Displays", "Form Factor", "Incident Light Polarization", "Optical Polarization", "Three Dimensional Displays", "Conferences", "Optical Switches", "Prototypes", "User Interfaces", "Optical Imaging", "Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality", "Hardware X 2014 Emerging Technologies X 2014 Emerging Optical And Photonic Technologies" ], "authors": [ { "affiliation": "Nara Institute of Science and Technology", "fullName": "Yan Zhang", "givenName": "Yan", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology", "fullName": "Kiyoshi Kiyokawa", "givenName": "Kiyoshi", "surname": "Kiyokawa", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology", "fullName": "Naoya Lsoyama", "givenName": "Naoya", "surname": "Lsoyama", "__typename": "ArticleAuthorType" }, { "affiliation": "Nara Institute of Science and Technology", "fullName": "Hideaki Uchiyama", "givenName": "Hideaki", "surname": "Uchiyama", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University", "fullName": "Xubo Yang", "givenName": "Xubo", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-03-01T00:00:00", "pubType": "proceedings", "pages": "800-801", "year": "2022", "issn": null, "isbn": "978-1-6654-8402-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1CJeAkP48OQ", "name": "pvrw202284020-09757695s1-mm_840200a800.zip", "size": "129 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757695s1-mm_840200a800.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "840200a798", "articleId": "1CJcJw3fs2s", "__typename": "AdjacentArticleType" }, "next": { "fno": "840200a802", "articleId": "1CJfq7DQm76", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223385", "title": "Continuous automatic calibration for optical see-through displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223385/12OmNynJMQZ", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007218", "title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10050791", "title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable", "doi": null, "abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a237", "title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08827571", "title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics", "doi": null, "abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998139", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a301", "title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09429918", "title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a422", "title": "Blending Shadows: Casting Shadows in Virtual and Real using Occlusion-Capable Augmented Reality Near-Eye Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a422/1yeD2Kh0vxS", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyjLoRw", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAGNCfe", "doi": "10.1109/ISMAR.2014.6948406", "title": "Interactive near-field illumination for photorealistic augmented reality on mobile devices", "normalizedTitle": "Interactive near-field illumination for photorealistic augmented reality on mobile devices", "abstract": "Mobile devices become more and more important today, especially for augmented reality (AR) applications in which the camera of the mobile device acts like a window into the mixed reality world. Up to now, no photorealistic augmentation is possible since the computational power of the mobile devices is still too weak. Even a streaming solution from a stationary PC would cause a latency that affects user interactions considerably. Therefore, we introduce a differential illumination method that allows for a consistent illumination of the inserted virtual objects on mobile devices, avoiding a delay. The necessary computation effort is shared between a stationary PC and the mobile devices to make use of the capacities available on both sides. The method is designed such that only a minimum amount of data has to be transferred asynchronously between the stationary PC and one or multiple mobile devices. This allows for an interactive illumination of virtual objects with a consistent appearance under both temporally and spatially varying real illumination conditions. To describe the complex near-field illumination in an indoor scenario, multiple HDR video cameras are used to capture the illumination from multiple directions. In this way, sources of illumination can be considered that are not directly visible to the mobile device because of occlusions and the limited field of view of built-in cameras.", "abstracts": [ { "abstractType": "Regular", "content": "Mobile devices become more and more important today, especially for augmented reality (AR) applications in which the camera of the mobile device acts like a window into the mixed reality world. Up to now, no photorealistic augmentation is possible since the computational power of the mobile devices is still too weak. Even a streaming solution from a stationary PC would cause a latency that affects user interactions considerably. Therefore, we introduce a differential illumination method that allows for a consistent illumination of the inserted virtual objects on mobile devices, avoiding a delay. The necessary computation effort is shared between a stationary PC and the mobile devices to make use of the capacities available on both sides. The method is designed such that only a minimum amount of data has to be transferred asynchronously between the stationary PC and one or multiple mobile devices. This allows for an interactive illumination of virtual objects with a consistent appearance under both temporally and spatially varying real illumination conditions. To describe the complex near-field illumination in an indoor scenario, multiple HDR video cameras are used to capture the illumination from multiple directions. In this way, sources of illumination can be considered that are not directly visible to the mobile device because of occlusions and the limited field of view of built-in cameras.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Mobile devices become more and more important today, especially for augmented reality (AR) applications in which the camera of the mobile device acts like a window into the mixed reality world. Up to now, no photorealistic augmentation is possible since the computational power of the mobile devices is still too weak. Even a streaming solution from a stationary PC would cause a latency that affects user interactions considerably. Therefore, we introduce a differential illumination method that allows for a consistent illumination of the inserted virtual objects on mobile devices, avoiding a delay. The necessary computation effort is shared between a stationary PC and the mobile devices to make use of the capacities available on both sides. The method is designed such that only a minimum amount of data has to be transferred asynchronously between the stationary PC and one or multiple mobile devices. This allows for an interactive illumination of virtual objects with a consistent appearance under both temporally and spatially varying real illumination conditions. To describe the complex near-field illumination in an indoor scenario, multiple HDR video cameras are used to capture the illumination from multiple directions. In this way, sources of illumination can be considered that are not directly visible to the mobile device because of occlusions and the limited field of view of built-in cameras.", "fno": "06948406", "keywords": [ "Lighting", "Cameras", "Mobile Handsets", "Rendering Computer Graphics", "Image Reconstruction", "Light Sources", "Streaming Media", "Augmented And Virtual Realities", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism H 5 1 Information Interfaces And Representation", "Artificial" ], "authors": [ { "affiliation": "Computational Visualistics, University of Magdeburg", "fullName": "Kai Rohmer", "givenName": "Kai", "surname": "Rohmer", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Media Lab, Technical University of Dresden", "fullName": "Wolfgang Buschel", "givenName": "Wolfgang", "surname": "Buschel", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Media Lab, Technical University of Dresden", "fullName": "Raimund Dachselt", "givenName": "Raimund", "surname": "Dachselt", "__typename": "ArticleAuthorType" }, { "affiliation": "Computational Visualistics, University of Magdeburg", "fullName": "Thorsten Grosch", "givenName": "Thorsten", "surname": "Grosch", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-09-01T00:00:00", "pubType": "proceedings", "pages": "29-38", "year": "2014", "issn": null, "isbn": "978-1-4799-6184-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06948405", "articleId": "12OmNqFJhNl", "__typename": "AdjacentArticleType" }, "next": { "fno": "06948407", "articleId": "12OmNxG1yH8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/sibgrapi/2015/7962/0/7962a033", "title": "A Comparison of Global Illumination Methods Using Perceptual Quality Metrics", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a033/12OmNBhHt9Z", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156382", "title": "Efficient volume illumination with multiple light sources through selective light updates", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156382/12OmNvDZF6A", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2013/3926/0/06726809", "title": "A review on illumination techniques in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2013/06726809/12OmNwMFMfk", "parentPublication": { "id": "proceedings/icccnt/2013/3926/0", "title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2013/5050/0/5050a913", "title": "Cartoon Rendering Illumination Model Based on Phong", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761433", "title": "Illumination Transition Image: Parameter-based Illumination Estimation and Re-rendering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761433/12OmNzVXNU8", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/12/07138641", "title": "Interactive Near-Field Illumination for Photorealistic Augmented Reality with Varying Materials on Mobile Devices", "doi": null, "abstractUrl": "/journal/tg/2015/12/07138641/13rRUNvgz4i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2016/02/mcs2016020090", "title": "Ambient Volume Illumination", "doi": null, "abstractUrl": "/magazine/cs/2016/02/mcs2016020090/13rRUxASu7D", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09904431", "title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights", "doi": null, "abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a189", "title": "Deep Consistent Illumination in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a189/1gyslmCJMjK", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800i077", "title": "Lighthouse: Predicting Lighting Volumes for Spatially-Coherent Illumination", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800i077/1m3omNjwpW0", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxuXcvH", "title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)", "acronym": "icis", "groupId": "1001200", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNrAMF1Y", "doi": "10.1109/ICIS.2016.7550918", "title": "A mosaic style rendering method based on fuzzy color modeling", "normalizedTitle": "A mosaic style rendering method based on fuzzy color modeling", "abstract": "This paper presents a non-photorealistic rendering method combining fuzzy color models with mosaic rendering to emulate the fuzziness of color usage. The method first triangulates the source image based on its local details, to emulate artists' methods of observing and analyzing the image structures. Then, it converts the color from the source image to obtain the fuzzy color for every triangle. Finally, it renders the triangles by a customizable fuzzy coloring strategy. The results show that the proposed method achieves good simulation of different artists' coloring strategies. Using the rendering application based on the proposed method, the user can quickly achieve many renderings in different coloring strategies by our mosaic rendering prototype.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a non-photorealistic rendering method combining fuzzy color models with mosaic rendering to emulate the fuzziness of color usage. The method first triangulates the source image based on its local details, to emulate artists' methods of observing and analyzing the image structures. Then, it converts the color from the source image to obtain the fuzzy color for every triangle. Finally, it renders the triangles by a customizable fuzzy coloring strategy. The results show that the proposed method achieves good simulation of different artists' coloring strategies. Using the rendering application based on the proposed method, the user can quickly achieve many renderings in different coloring strategies by our mosaic rendering prototype.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a non-photorealistic rendering method combining fuzzy color models with mosaic rendering to emulate the fuzziness of color usage. The method first triangulates the source image based on its local details, to emulate artists' methods of observing and analyzing the image structures. Then, it converts the color from the source image to obtain the fuzzy color for every triangle. Finally, it renders the triangles by a customizable fuzzy coloring strategy. The results show that the proposed method achieves good simulation of different artists' coloring strategies. Using the rendering application based on the proposed method, the user can quickly achieve many renderings in different coloring strategies by our mosaic rendering prototype.", "fno": "07550918", "keywords": [ "Image Color Analysis", "Rendering Computer Graphics", "Painting", "Image Segmentation", "Analytical Models", "Semantics", "Three Dimensional Displays", "Triangulation", "Non Photorealistic Rendering", "Fuzzy Color", "Mosaic Rendering" ], "authors": [ { "affiliation": "Jiangnan University, School of Digital Media, Wuxi, China", "fullName": "Xu Man-Di", "givenName": "Xu", "surname": "Man-Di", "__typename": "ArticleAuthorType" }, { "affiliation": "Jiangnan University, School of Digital Media, Wuxi, China", "fullName": "Liu Yuan", "givenName": "Liu", "surname": "Yuan", "__typename": "ArticleAuthorType" }, { "affiliation": "Jiangnan University, School of Digital Media, Wuxi, China", "fullName": "Lv Rui-Min", "givenName": "Lv", "surname": "Rui-Min", "__typename": "ArticleAuthorType" } ], "idPrefix": "icis", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2016-06-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2016", "issn": null, "isbn": "978-1-5090-0806-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07550917", "articleId": "12OmNBPtJz8", "__typename": "AdjacentArticleType" }, "next": { "fno": "07550919", "articleId": "12OmNxWLTvO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2012/1226/0/114P1C06", "title": "An analysis of color demosaicing in plenoptic cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/114P1C06/12OmNAJ4phI", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eecs/2017/2085/0/2085a318", "title": "Landscape Painterly Rendering Based on Observation", "doi": null, "abstractUrl": "/proceedings-article/eecs/2017/2085a318/12OmNARAncG", "parentPublication": { "id": "proceedings/eecs/2017/2085/0", "title": "2017 European Conference on Electrical Engineering and Computer Science (EECS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130326", "title": "Color correction using rotation matrix for HDR rendering in iCAM06", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130326/12OmNCcKQK0", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csie/2009/3507/6/3507f583", "title": "Color Vision Based High Dynamic Range Images Rendering", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507f583/12OmNCctfaE", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2012/4771/0/4771a062", "title": "Visual Color Design", "doi": null, "abstractUrl": "/proceedings-article/iv/2012/4771a062/12OmNwCaCqJ", "parentPublication": { "id": "proceedings/iv/2012/4771/0", "title": "2012 16th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571171", "title": "Molecular Rendering with Medieval and Renaissance Color Theory", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571171/12OmNxj23hk", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aqtr/2010/6724/3/05520671", "title": "LUX color transform for mosaic image rendering", "doi": null, "abstractUrl": "/proceedings-article/aqtr/2010/05520671/12OmNxzMnKJ", "parentPublication": { "id": "proceedings/aqtr/2010/6724/3", "title": "2010 IEEE International Conference on Automation, Quality and Testing, Robotics (AQTR 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2009/3789/0/3789a089", "title": "Artwork-Based 3D Ink Style Modeling and Rendering", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a089/12OmNzlUKmq", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/03/mcg2017030070", "title": "ColorSketch: A Drawing Assistant for Generating Color Sketches from Photos", "doi": null, "abstractUrl": "/magazine/cg/2017/03/mcg2017030070/13rRUwkfB20", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/07911336", "title": "Color Orchestra: Ordering Color Palettes for Interpolation and Prediction", "doi": null, "abstractUrl": "/journal/tg/2018/06/07911336/13rRUxASu0R", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz61dBt", "title": "2010 14th International Conference Information Visualisation", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNwF0BUx", "doi": "10.1109/IV.2010.16", "title": "Preserving Coherent Illumination in Style Transfer Functions for Volume Rendering", "normalizedTitle": "Preserving Coherent Illumination in Style Transfer Functions for Volume Rendering", "abstract": "Volume rendering has been widely used in different fields where several rendering algorithms have been developed, such as shear-warp, ray casting or splatting. But independently of the rendering method, transfer functions are usually used for mapping values and other properties of the volume into colors. As an improvement of transfer functions, style transfer functions are being used, where sphere maps extracted from artwork are used instead of plain colors. In this paper, we propose an interactive designer that would allow the user to create styles in an easy way, and shade them with just a color or a texture. In addition, it guarantees a coherent illumination, making it possible to easily use style transfer functions to achieve realistic rendering.", "abstracts": [ { "abstractType": "Regular", "content": "Volume rendering has been widely used in different fields where several rendering algorithms have been developed, such as shear-warp, ray casting or splatting. But independently of the rendering method, transfer functions are usually used for mapping values and other properties of the volume into colors. As an improvement of transfer functions, style transfer functions are being used, where sphere maps extracted from artwork are used instead of plain colors. In this paper, we propose an interactive designer that would allow the user to create styles in an easy way, and shade them with just a color or a texture. In addition, it guarantees a coherent illumination, making it possible to easily use style transfer functions to achieve realistic rendering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Volume rendering has been widely used in different fields where several rendering algorithms have been developed, such as shear-warp, ray casting or splatting. But independently of the rendering method, transfer functions are usually used for mapping values and other properties of the volume into colors. As an improvement of transfer functions, style transfer functions are being used, where sphere maps extracted from artwork are used instead of plain colors. In this paper, we propose an interactive designer that would allow the user to create styles in an easy way, and shade them with just a color or a texture. In addition, it guarantees a coherent illumination, making it possible to easily use style transfer functions to achieve realistic rendering.", "fno": "05571370", "keywords": [ "Data Visualisation", "Interactive Systems", "Rendering Computer Graphics", "Transfer Functions", "Coherent Illumination", "Style Transfer Functions", "Volume Rendering", "Sphere Maps", "Interactive Designer", "Lighting", "Transfer Functions", "Rendering Computer Graphics", "Image Color Analysis", "Visualization", "Materials", "Data Visualization", "Illumination", "Transfer Functions", "Volumetric Rendering" ], "authors": [ { "affiliation": null, "fullName": "Imanol Herrera", "givenName": "Imanol", "surname": "Herrera", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Carlos Buchart", "givenName": "Carlos", "surname": "Buchart", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Diego Borro", "givenName": "Diego", "surname": "Borro", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-07-01T00:00:00", "pubType": "proceedings", "pages": "43-47", "year": "2010", "issn": "1550-6037", "isbn": "978-1-4244-7846-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05571369", "articleId": "12OmNxUMHny", "__typename": "AdjacentArticleType" }, "next": { "fno": "05571363", "articleId": "12OmNA1mbcE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2011/935/0/05742355", "title": "Full-resolution interactive CPU volume rendering with coherent BVH traversal", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742355/12OmNC3XhlV", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532808", "title": "Scale-invariant volume rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532808/12OmNyoAA5X", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948416", "title": "Real-time illumination estimation from faces for coherent rendering", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948416/12OmNyqRnma", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346314", "title": "Volume rendering methods for computational fluid dynamics visualization", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346314/12OmNyuy9NX", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300067", "title": "Curvature-Based Transfer Functions for Direct Volume Rendering: Methods and Applications", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300067/12OmNz61d84", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/03/v0270", "title": "Multidimensional Transfer Functions for Interactive Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2002/03/v0270/13rRUB7a1fG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2008/06/mcs2008060082", "title": "Transfer-Function Specification for Rendering Disparate Volumes", "doi": null, "abstractUrl": "/magazine/cs/2008/06/mcs2008060082/13rRUwIF64Q", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/06/ttg2008061364", "title": "Texture-based Transfer Functions for Direct Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2008/06/ttg2008061364/13rRUwfI0Q1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/03/v0253", "title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models", "doi": null, "abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1336", "title": "Semantic Layers for Illustrative Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1336/13rRUytWF9e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpZ", "title": "2015 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNyFU73E", "doi": "10.1109/VR.2015.7223334", "title": "Image-space illumination for augmented reality in dynamic environments", "normalizedTitle": "Image-space illumination for augmented reality in dynamic environments", "abstract": "We present an efficient approach for probeless light estimation and coherent rendering of Augmented Reality in dynamic scenes. This approach can handle dynamically changing scene geometry and dynamically changing light sources in real time with a single mobile RGB-D sensor and without relying on an invasive lightprobe. We jointly filter both in-view dynamic geometry and outside-view static geometry. The resulting reconstruction provides the input for efficient global illumination computation in image-space. We demonstrate that our approach can deliver state-of-the-art Augmented Reality rendering effects for scenes that are more scalable and more dynamic than previous work.", "abstracts": [ { "abstractType": "Regular", "content": "We present an efficient approach for probeless light estimation and coherent rendering of Augmented Reality in dynamic scenes. This approach can handle dynamically changing scene geometry and dynamically changing light sources in real time with a single mobile RGB-D sensor and without relying on an invasive lightprobe. We jointly filter both in-view dynamic geometry and outside-view static geometry. The resulting reconstruction provides the input for efficient global illumination computation in image-space. We demonstrate that our approach can deliver state-of-the-art Augmented Reality rendering effects for scenes that are more scalable and more dynamic than previous work.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present an efficient approach for probeless light estimation and coherent rendering of Augmented Reality in dynamic scenes. This approach can handle dynamically changing scene geometry and dynamically changing light sources in real time with a single mobile RGB-D sensor and without relying on an invasive lightprobe. We jointly filter both in-view dynamic geometry and outside-view static geometry. The resulting reconstruction provides the input for efficient global illumination computation in image-space. We demonstrate that our approach can deliver state-of-the-art Augmented Reality rendering effects for scenes that are more scalable and more dynamic than previous work.", "fno": "07223334", "keywords": [ "Geometry", "Lighting", "Estimation", "Rendering Computer Graphics", "Cameras", "Image Reconstruction", "Image Color Analysis", "Radiance Transfer", "Augmented Reality", "Photometric Registration" ], "authors": [ { "affiliation": "Graz University of Technology", "fullName": "Lukas Gruber", "givenName": "Lukas", "surname": "Gruber", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Colorado at Colorado Springs", "fullName": "Jonathan Ventura", "givenName": "Jonathan", "surname": "Ventura", "__typename": "ArticleAuthorType" }, { "affiliation": "Graz University of Technology", "fullName": "Dieter Schmalstieg", "givenName": "Dieter", "surname": "Schmalstieg", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-03-01T00:00:00", "pubType": "proceedings", "pages": "127-134", "year": "2015", "issn": null, "isbn": "978-1-4799-1727-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07223333", "articleId": "12OmNzV70CZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "07223335", "articleId": "12OmNs0C9XZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2013/2869/0/06671792", "title": "Acceleration methods for radiance transfer in photorealistic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671792/12OmNwGIcB5", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2013/3926/0/06726809", "title": "A review on illumination techniques in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2013/06726809/12OmNwMFMfk", "parentPublication": { "id": "proceedings/icccnt/2013/3926/0", "title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2016/3641/0/3641a027", "title": "Instant Mixed Reality Lighting from Casual Scanning", "doi": null, "abstractUrl": "/proceedings-article/ismar/2016/3641a027/12OmNx5GTYC", "parentPublication": { "id": "proceedings/ismar/2016/3641/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802055", "title": "Global illumination for Augmented Reality on mobile phones", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802055/12OmNyRg4FC", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802044", "title": "Efficient and robust radiance transfer for probeless photorealistic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802044/12OmNz4SOCN", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444836", "title": "Photorealistic rendering for Augmented Reality: A global illumination and BRDF solution", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444836/12OmNz6iOaA", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007317", "title": "Natural Environment Illumination: Coherent Interactive Augmented Reality for Mobile and Non-Mobile Devices", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007317/13rRUILc8fg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/12/07138641", "title": "Interactive Near-Field Illumination for Photorealistic Augmented Reality with Varying Materials on Mobile Devices", "doi": null, "abstractUrl": "/journal/tg/2015/12/07138641/13rRUNvgz4i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09678000", "title": "Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering", "doi": null, "abstractUrl": "/journal/tg/2023/04/09678000/1A4SuYWCI7K", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2019/4540/0/08864523", "title": "Interactive Cloud-based Global Illumination for Shared Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2019/08864523/1e5ZtHuwxdm", "parentPublication": { "id": "proceedings/vs-games/2019/4540/0", "title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwMXnv0", "title": "2014 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNyRg4FC", "doi": "10.1109/VR.2014.6802055", "title": "Global illumination for Augmented Reality on mobile phones", "normalizedTitle": "Global illumination for Augmented Reality on mobile phones", "abstract": "The goal of our work is to create highly realistic graphics for Augmented Reality on mobile phones. One of the greatest challenges for this is to provide realistic lighting of the virtual objects that matches the real world lighting. This becomes even more difficult with the limited capabilities of mobile phone GPUs. Our approach differs in the following important aspects compared to previous attempts: (1) most have relied on rasterizer approaches, while our approach is based on raytracing; (2) we perform distributed rendering in order to address the limited mobile GPU capabilities; (3) we use image-based lighting from a pre-captured panorama to incorporate real world lighting. We utilize two markers: one for object tracking and one for registering the panorama. Our initial results are encouraging, as the visual quality resembles real objects and also the reference renderings which were created offline. However, we still need to validate our approach in human subject studies, especially with regards to the trade-off between latency of remote rendering and visual quality.", "abstracts": [ { "abstractType": "Regular", "content": "The goal of our work is to create highly realistic graphics for Augmented Reality on mobile phones. One of the greatest challenges for this is to provide realistic lighting of the virtual objects that matches the real world lighting. This becomes even more difficult with the limited capabilities of mobile phone GPUs. Our approach differs in the following important aspects compared to previous attempts: (1) most have relied on rasterizer approaches, while our approach is based on raytracing; (2) we perform distributed rendering in order to address the limited mobile GPU capabilities; (3) we use image-based lighting from a pre-captured panorama to incorporate real world lighting. We utilize two markers: one for object tracking and one for registering the panorama. Our initial results are encouraging, as the visual quality resembles real objects and also the reference renderings which were created offline. However, we still need to validate our approach in human subject studies, especially with regards to the trade-off between latency of remote rendering and visual quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The goal of our work is to create highly realistic graphics for Augmented Reality on mobile phones. One of the greatest challenges for this is to provide realistic lighting of the virtual objects that matches the real world lighting. This becomes even more difficult with the limited capabilities of mobile phone GPUs. Our approach differs in the following important aspects compared to previous attempts: (1) most have relied on rasterizer approaches, while our approach is based on raytracing; (2) we perform distributed rendering in order to address the limited mobile GPU capabilities; (3) we use image-based lighting from a pre-captured panorama to incorporate real world lighting. We utilize two markers: one for object tracking and one for registering the panorama. Our initial results are encouraging, as the visual quality resembles real objects and also the reference renderings which were created offline. However, we still need to validate our approach in human subject studies, especially with regards to the trade-off between latency of remote rendering and visual quality.", "fno": "06802055", "keywords": [ "Rendering Computer Graphics", "Lighting", "Mobile Handsets", "Visualization", "Augmented Reality", "Prototypes", "Image Color Analysis", "I 3 2 Computer Graphics Graphics Systems Distributed Networked Graphics", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial Augmented And Virtual Realities", "H 1 2 Information Systems Models And Principles Human Factors" ], "authors": [ { "affiliation": "Magic Vision Lab, University of South Australia", "fullName": "Michael Csongei", "givenName": "Michael", "surname": "Csongei", "__typename": "ArticleAuthorType" }, { "affiliation": "Magic Vision Lab, University of South Australia", "fullName": "Liem Hoang", "givenName": "Liem", "surname": "Hoang", "__typename": "ArticleAuthorType" }, { "affiliation": "Magic Vision Lab, University of South Australia", "fullName": "Christian Sandor", "givenName": "Christian", "surname": "Sandor", "__typename": "ArticleAuthorType" }, { "affiliation": "Samsung Advanced Institute of Technology, Samsung", "fullName": "Yong Beom Lee", "givenName": "Yong Beom", "surname": "Lee", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-03-01T00:00:00", "pubType": "proceedings", "pages": "69-70", "year": "2014", "issn": null, "isbn": "978-1-4799-2871-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06802054", "articleId": "12OmNBpmDG4", "__typename": "AdjacentArticleType" }, "next": { "fno": "06802056", "articleId": "12OmNBqdrca", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948453", "title": "[Poster] Indirect augmented reality considering real-world illumination change", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948453/12OmNCcbEaP", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671792", "title": "Acceleration methods for radiance transfer in photorealistic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671792/12OmNwGIcB5", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ds-rt/2011/1643/0/06051803", "title": "Performance Characterization on Mobile Phones for Collaborative Augmented Reality (CAR) Applications", "doi": null, "abstractUrl": "/proceedings-article/ds-rt/2011/06051803/12OmNwdtw7n", "parentPublication": { "id": "proceedings/ds-rt/2011/1643/0", "title": "2011 IEEE/ACM 15th International Symposium on Distributed Simulation and Real Time Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811036", "title": "A Global Illumination and BRDF Solution Applied to Photorealistic Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811036/12OmNxXUhOr", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492774", "title": "Stylized augmented reality for improved immersion", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492774/12OmNylbotS", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444836", "title": "Photorealistic rendering for Augmented Reality: A global illumination and BRDF solution", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444836/12OmNz6iOaA", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/04/mcg2009040006", "title": "Making Augmented Reality Practical on Mobile Phones, Part 2", "doi": null, "abstractUrl": "/magazine/cg/2009/04/mcg2009040006/13rRUxjQyjI", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a352", "title": "Augmented Reality Hologram", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a352/1fHkn9dGYow", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090568", "title": "Real-time Illumination Estimation for Mixed Reality on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090568/1jIxuGbpWa4", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811000", "title": "Virtual Heliodon: Spatially Augmented Reality for Architectural Daylighting Design", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811000/1t0I5sIRprW", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBC8AAD", "title": "2010 IEEE Virtual Reality Conference (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNz6iOaA", "doi": "10.1109/VR.2010.5444836", "title": "Photorealistic rendering for Augmented Reality: A global illumination and BRDF solution", "normalizedTitle": "Photorealistic rendering for Augmented Reality: A global illumination and BRDF solution", "abstract": "This paper presents a solution for the photorealistic rendering of synthetic objects into dynamic real scenes, in Augmented Reality applications. In order to achieve this goal, an Image Based Lighting approach is used, where environment maps with different levels of glossiness are generated for each virtual object in the scene at every frame. Due to this, illumination effects, such as color bleeding and specular reflections, can be simulated for virtual objects in a consistent way. A unifying sampling method for the spherical harmonics transformation pass is also used. It is independent of map format and does not need to apply different weights for each sample. The developed technique is combined with an extended version of Lafortune Spatial BRDF, featuring Fresnel effect and an innovative tangent rotation parameterization. The solution is evaluated in various Augmented Reality case studies, where other features like shadowing and lens effects are also exploited.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a solution for the photorealistic rendering of synthetic objects into dynamic real scenes, in Augmented Reality applications. In order to achieve this goal, an Image Based Lighting approach is used, where environment maps with different levels of glossiness are generated for each virtual object in the scene at every frame. Due to this, illumination effects, such as color bleeding and specular reflections, can be simulated for virtual objects in a consistent way. A unifying sampling method for the spherical harmonics transformation pass is also used. It is independent of map format and does not need to apply different weights for each sample. The developed technique is combined with an extended version of Lafortune Spatial BRDF, featuring Fresnel effect and an innovative tangent rotation parameterization. The solution is evaluated in various Augmented Reality case studies, where other features like shadowing and lens effects are also exploited.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a solution for the photorealistic rendering of synthetic objects into dynamic real scenes, in Augmented Reality applications. In order to achieve this goal, an Image Based Lighting approach is used, where environment maps with different levels of glossiness are generated for each virtual object in the scene at every frame. Due to this, illumination effects, such as color bleeding and specular reflections, can be simulated for virtual objects in a consistent way. A unifying sampling method for the spherical harmonics transformation pass is also used. It is independent of map format and does not need to apply different weights for each sample. The developed technique is combined with an extended version of Lafortune Spatial BRDF, featuring Fresnel effect and an innovative tangent rotation parameterization. The solution is evaluated in various Augmented Reality case studies, where other features like shadowing and lens effects are also exploited.", "fno": "05444836", "keywords": [ "Bidirectional Reflectance Distribution Function", "Photorealistic Rendering", "Augmented Reality", "Global Illumination", "BRDF Solution", "Image Based Lighting Approach", "Color Bleeding", "Specular Reflection", "Sampling Method", "Spherical Harmonics Transformation Pass", "Fresnel Effect", "Tangent Rotation Parameterization" ], "authors": [ { "affiliation": "Virtual Reality & Multimedia Res. Group, Fed. Univ. of Pernambuco, Recife, Brazil", "fullName": "Saulo Pessoa", "givenName": "Saulo", "surname": "Pessoa", "__typename": "ArticleAuthorType" }, { "affiliation": "Virtual Reality & Multimedia Res. Group, Fed. Univ. of Pernambuco, Recife, Brazil", "fullName": "Guilherme Moura", "givenName": "Guilherme", "surname": "Moura", "__typename": "ArticleAuthorType" }, { "affiliation": "Virtual Reality & Multimedia Res. Group, Fed. Univ. of Pernambuco, Recife, Brazil", "fullName": "Joao Lima", "givenName": "Joao", "surname": "Lima", "__typename": "ArticleAuthorType" }, { "affiliation": "Virtual Reality & Multimedia Res. Group, Fed. Univ. of Pernambuco, Recife, Brazil", "fullName": "Veronica Teichrieb", "givenName": "Veronica", "surname": "Teichrieb", "__typename": "ArticleAuthorType" }, { "affiliation": "Virtual Reality & Multimedia Res. Group, Fed. Univ. of Pernambuco, Recife, Brazil", "fullName": "Judith Kelner", "givenName": "Judith", "surname": "Kelner", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "3-10", "year": "2010", "issn": null, "isbn": "978-1-4244-6237-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444839", "articleId": "12OmNzlUKP2", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444820", "articleId": "12OmNykCccb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2014/6184/0/06948406", "title": "Interactive near-field illumination for photorealistic augmented reality on mobile devices", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948406/12OmNAGNCfe", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gamepec/2015/7207/0/07331844", "title": "Illumination rendering in Game and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/gamepec/2015/07331844/12OmNBEGYIm", "parentPublication": { "id": "proceedings/gamepec/2015/7207/0", "title": "2015 Game Physics and Mechanics International Conference (GAMEPEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2003/2006/0/20060208", "title": "Photorealistic rendering for augmented reality using environment illumination", "doi": null, "abstractUrl": "/proceedings-article/ismar/2003/20060208/12OmNwEJ0Jx", "parentPublication": { "id": "proceedings/ismar/2003/2006/0", "title": "The Second IEEE and ACM International Symposium on Mixed and Augmented Reality, 2003. Proceedings.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671792", "title": "Acceleration methods for radiance transfer in photorealistic augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671792/12OmNwGIcB5", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacific-graphics/2010/4205/0/4205a054", "title": "Thread-Based BRDF Rendering on GPU", "doi": null, "abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a054/12OmNweBUCO", "parentPublication": { "id": "proceedings/pacific-graphics/2010/4205/0", "title": "Pacific Conference on Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811036", "title": "A Global Illumination and BRDF Solution Applied to Photorealistic Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811036/12OmNxXUhOr", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2001/1227/0/12270322", "title": "Non-Photorealistic Rendering Using Watercolor Inspired Textures and Illumination", "doi": null, "abstractUrl": "/proceedings-article/pg/2001/12270322/12OmNy4r41q", "parentPublication": { "id": "proceedings/pg/2001/1227/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2010/4011/1/4011a332", "title": "The Analysis of Global Illumination Rendering Based on BRDF", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2010/4011a332/12OmNyvGynS", "parentPublication": { "id": "proceedings/nswctc/2010/4011/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2003/1988/0/19880350", "title": "Current Issues of Photorealistic Rendering for Virtual and Augmented Reality in Minimally Invasive Surgery", "doi": null, "abstractUrl": "/proceedings-article/iv/2003/19880350/12OmNzvQI5c", "parentPublication": { "id": "proceedings/iv/2003/1988/0", "title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2013/5001/0/06655770", "title": "Integrated Pipeline for Natural Interaction with Photorealistic Rendering", "doi": null, "abstractUrl": "/proceedings-article/svr/2013/06655770/12OmNzvhvMX", "parentPublication": { "id": "proceedings/svr/2013/5001/0", "title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1QemV928", "doi": "10.1109/ISMAR-Adjunct.2018.00126", "title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality", "normalizedTitle": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality", "abstract": "In this paper, we propose a system that can reproduce the material appearance of real objects using mobile augmented reality (AR). Our proposed system allows a user to manipulate a virtual object, whose model is generated from the shape and reflectance of a real object, using the user's own hand. The shape of the real object is reconstructed by integrating depth images of the object, which are captured using an RGB-D camera from different directions. The reflectance of the object is obtained by estimating the parameters of a reflectance model from the reconstructed shape and color images, assuming that a single light source is attached to the camera. We measured the shape and reflectance of some real objects and presented the material appearance of the objects using mobile AR. It was confirmed that users were able to obtain the perception of materials from changes in gloss and burnish of the objects by rotating the objects using their own hand.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a system that can reproduce the material appearance of real objects using mobile augmented reality (AR). Our proposed system allows a user to manipulate a virtual object, whose model is generated from the shape and reflectance of a real object, using the user's own hand. The shape of the real object is reconstructed by integrating depth images of the object, which are captured using an RGB-D camera from different directions. The reflectance of the object is obtained by estimating the parameters of a reflectance model from the reconstructed shape and color images, assuming that a single light source is attached to the camera. We measured the shape and reflectance of some real objects and presented the material appearance of the objects using mobile AR. It was confirmed that users were able to obtain the perception of materials from changes in gloss and burnish of the objects by rotating the objects using their own hand.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a system that can reproduce the material appearance of real objects using mobile augmented reality (AR). Our proposed system allows a user to manipulate a virtual object, whose model is generated from the shape and reflectance of a real object, using the user's own hand. The shape of the real object is reconstructed by integrating depth images of the object, which are captured using an RGB-D camera from different directions. The reflectance of the object is obtained by estimating the parameters of a reflectance model from the reconstructed shape and color images, assuming that a single light source is attached to the camera. We measured the shape and reflectance of some real objects and presented the material appearance of the objects using mobile AR. It was confirmed that users were able to obtain the perception of materials from changes in gloss and burnish of the objects by rotating the objects using their own hand.", "fno": "08699239", "keywords": [ "Augmented Reality", "Cameras", "Image Colour Analysis", "Image Reconstruction", "Mobile Computing", "Reproducing Material Appearance", "Mobile Augmented Reality", "Virtual Object", "Integrating Depth Images", "Reflectance Model", "Reconstructed Shape", "Color Images", "RGB D Camera", "AR", "Shape", "Cameras", "Augmented Reality", "Image Reconstruction", "Light Sources", "Shape Measurement", "Color", "Reflectance Property", "Reflectance Measurement", "Object Manipulation", "Mobile Display", "RGB D Camera", "Human Centered Computing X 2014 Human Conputer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality" ], "authors": [ { "affiliation": "Saitama University", "fullName": "Seiji Tsunezaki", "givenName": "Seiji", "surname": "Tsunezaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University", "fullName": "Ryota Nomura", "givenName": "Ryota", "surname": "Nomura", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University", "fullName": "Takashi Komuro", "givenName": "Takashi", "surname": "Komuro", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Metropolitan College of Industrial Technology", "fullName": "Shoji Yamamoto", "givenName": "Shoji", "surname": "Yamamoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Chiba University", "fullName": "Norimichi Tsumura", "givenName": "Norimichi", "surname": "Tsumura", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "421-422", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699293", "articleId": "19F1LW7sJEc", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699323", "articleId": "19F1PIUziiQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2005/8929/0/01492784", "title": "Dynamic Texturing of Real Objects in an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492784/12OmNAnuTkI", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761511", "title": "Shape from self-calibration and Fast Marching Method", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761511/12OmNBTs7rO", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wqv/1993/3692/0/00262951", "title": "Learning and recognition of 3D objects from appearance", "doi": null, "abstractUrl": "/proceedings-article/wqv/1993/00262951/12OmNBTs7ym", "parentPublication": { "id": "proceedings/wqv/1993/3692/0", "title": "Proceedings IEEE Workshop on Qualitative Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492781", "title": "Dynamic texturing of real objects in an augmented reality system", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492781/12OmNwkhTh2", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223149", "title": "Extracting the shape and roughness of specular lobe objects using four light photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223149/12OmNwoxSc1", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460071", "title": "Shading derivation from an unspecified object for augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460071/12OmNzAohXY", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1992/2720/0/00220132", "title": "Inspecting specular lobe objects using four light sources", "doi": null, "abstractUrl": "/proceedings-article/robot/1992/00220132/12OmNzd7bWl", "parentPublication": { "id": "proceedings/robot/1992/2720/0", "title": "Proceedings 1992 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699176", "title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699176/19F1ToU9wNG", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a632", "title": "Material Reflectance Property Estimation of Complex Objects Using an Attention Network", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a632/1CJcD7RtQVq", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a384", "title": "An Empirical Study of Size Discrimination in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a384/1yeQWO0csfe", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1ToU9wNG", "doi": "10.1109/ISMAR-Adjunct.2018.00065", "title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality", "normalizedTitle": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality", "abstract": "In this paper, we propose a system that can reproduce the material appearance of real objects using mobile augmented reality (AR). Our proposed system allows a user to manipulate a virtual object, whose model is generated from the shape and reflectance of a real object, using the user's own hand. The shape of the real object is reconstructed by integrating depth images of the object, which are captured using an RGB-D camera from different directions. The reflectance of the object is obtained by estimating the parameters of a reflectance model from the reconstructed shape and color images, assuming that a single light source is attached to the camera. We measured the shape and reflectance of some real objects and presented the material appearance of the objects using mobile AR. It was confirmed that users were able to obtain the perception of materials from changes in gloss and burnish of the objects by rotating the objects using their own hand.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a system that can reproduce the material appearance of real objects using mobile augmented reality (AR). Our proposed system allows a user to manipulate a virtual object, whose model is generated from the shape and reflectance of a real object, using the user's own hand. The shape of the real object is reconstructed by integrating depth images of the object, which are captured using an RGB-D camera from different directions. The reflectance of the object is obtained by estimating the parameters of a reflectance model from the reconstructed shape and color images, assuming that a single light source is attached to the camera. We measured the shape and reflectance of some real objects and presented the material appearance of the objects using mobile AR. It was confirmed that users were able to obtain the perception of materials from changes in gloss and burnish of the objects by rotating the objects using their own hand.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a system that can reproduce the material appearance of real objects using mobile augmented reality (AR). Our proposed system allows a user to manipulate a virtual object, whose model is generated from the shape and reflectance of a real object, using the user's own hand. The shape of the real object is reconstructed by integrating depth images of the object, which are captured using an RGB-D camera from different directions. The reflectance of the object is obtained by estimating the parameters of a reflectance model from the reconstructed shape and color images, assuming that a single light source is attached to the camera. We measured the shape and reflectance of some real objects and presented the material appearance of the objects using mobile AR. It was confirmed that users were able to obtain the perception of materials from changes in gloss and burnish of the objects by rotating the objects using their own hand.", "fno": "08699176", "keywords": [ "Augmented Reality", "Image Colour Analysis", "Image Reconstruction", "Mobile Computing", "Object Reflectance", "RGB D Camera", "Shape Reconstruction", "Depth Images", "Color Images", "Reflectance Model", "Virtual Object", "Mobile Augmented Reality", "Reproducing Material Appearance", "Shape", "Cameras", "Shape Measurement", "Augmented Reality", "Image Reconstruction", "Light Sources", "Color", "Reflectance Property", "Reflectance Measurement", "Object Manipulation", "Mobile Display", "RGB D Camera", "Human Centered Computing", "Human Conputer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality" ], "authors": [ { "affiliation": "Saitama University", "fullName": "Seiji Tsunezaki", "givenName": "Seiji", "surname": "Tsunezaki", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University", "fullName": "Ryota Nomura", "givenName": "Ryota", "surname": "Nomura", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University", "fullName": "Takashi Komuro", "givenName": "Takashi", "surname": "Komuro", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Metropolitan College of Industrial Technology", "fullName": "Shoji Yamamoto", "givenName": "Shoji", "surname": "Yamamoto", "__typename": "ArticleAuthorType" }, { "affiliation": "Chiba University", "fullName": "Norimichi Tsumura", "givenName": "Norimichi", "surname": "Tsumura", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "196-197", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699280", "articleId": "19F1RBpcB0s", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699329", "articleId": "19F1N2RvNGU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2005/8929/0/01492784", "title": "Dynamic Texturing of Real Objects in an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492784/12OmNAnuTkI", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761511", "title": "Shape from self-calibration and Fast Marching Method", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761511/12OmNBTs7rO", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wqv/1993/3692/0/00262951", "title": "Learning and recognition of 3D objects from appearance", "doi": null, "abstractUrl": "/proceedings-article/wqv/1993/00262951/12OmNBTs7ym", "parentPublication": { "id": "proceedings/wqv/1993/3692/0", "title": "Proceedings IEEE Workshop on Qualitative Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492781", "title": "Dynamic texturing of real objects in an augmented reality system", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492781/12OmNwkhTh2", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223149", "title": "Extracting the shape and roughness of specular lobe objects using four light photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223149/12OmNwoxSc1", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460071", "title": "Shading derivation from an unspecified object for augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460071/12OmNzAohXY", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1992/2720/0/00220132", "title": "Inspecting specular lobe objects using four light sources", "doi": null, "abstractUrl": "/proceedings-article/robot/1992/00220132/12OmNzd7bWl", "parentPublication": { "id": "proceedings/robot/1992/2720/0", "title": "Proceedings 1992 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699239", "title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699239/19F1QemV928", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a632", "title": "Material Reflectance Property Estimation of Complex Objects Using an Attention Network", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a632/1CJcD7RtQVq", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a384", "title": "An Empirical Study of Size Discrimination in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a384/1yeQWO0csfe", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yBEZe3hqyQ", "title": "2021 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yBEZLFBWGA", "doi": "10.1109/CW52790.2021.00011", "title": "Illumination-aware Digital Image Compositing for Full-length Human Figures", "normalizedTitle": "Illumination-aware Digital Image Compositing for Full-length Human Figures", "abstract": "In this paper, a novel compositor to realize illumination-aware digital image compositing is proposed. Digital image compositing is a technique to re-synthesize a single image from multiple visual elements. Assembling separated visual elements leads to highly efficient content creation at risk of losing visual coherence. Visual coherence has two types: positional and illumination. Compositing while maintaining illumination coherence is considered a challenging task, because light transport information is needed to reproduce global illumination effects&#x2014;color bleeding, soft shadows, caustics, and so on. The pipeline of the proposed compositor incorporates shape reconstruction, as well as global illumination simulation and relighting; thus, the compositor can reproduce background-to-foreground color bleeding and soft shadows. It is empirically evaluated that the proposed compositor compares favorably with rendering both in terms of quality and speed. Because it is in principle impossible to render the foreground and background elements together, the proposed compositor is inevitably selected to generate images with the two types of visual coherence. This leads to efficient construction of photorealistic cyberworlds.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, a novel compositor to realize illumination-aware digital image compositing is proposed. Digital image compositing is a technique to re-synthesize a single image from multiple visual elements. Assembling separated visual elements leads to highly efficient content creation at risk of losing visual coherence. Visual coherence has two types: positional and illumination. Compositing while maintaining illumination coherence is considered a challenging task, because light transport information is needed to reproduce global illumination effects&#x2014;color bleeding, soft shadows, caustics, and so on. The pipeline of the proposed compositor incorporates shape reconstruction, as well as global illumination simulation and relighting; thus, the compositor can reproduce background-to-foreground color bleeding and soft shadows. It is empirically evaluated that the proposed compositor compares favorably with rendering both in terms of quality and speed. Because it is in principle impossible to render the foreground and background elements together, the proposed compositor is inevitably selected to generate images with the two types of visual coherence. This leads to efficient construction of photorealistic cyberworlds.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, a novel compositor to realize illumination-aware digital image compositing is proposed. Digital image compositing is a technique to re-synthesize a single image from multiple visual elements. Assembling separated visual elements leads to highly efficient content creation at risk of losing visual coherence. Visual coherence has two types: positional and illumination. Compositing while maintaining illumination coherence is considered a challenging task, because light transport information is needed to reproduce global illumination effects—color bleeding, soft shadows, caustics, and so on. The pipeline of the proposed compositor incorporates shape reconstruction, as well as global illumination simulation and relighting; thus, the compositor can reproduce background-to-foreground color bleeding and soft shadows. It is empirically evaluated that the proposed compositor compares favorably with rendering both in terms of quality and speed. Because it is in principle impossible to render the foreground and background elements together, the proposed compositor is inevitably selected to generate images with the two types of visual coherence. This leads to efficient construction of photorealistic cyberworlds.", "fno": "406500a017", "keywords": [ "Image Colour Analysis", "Image Reconstruction", "Lighting", "Rendering Computer Graphics", "Single Image", "Multiple Visual Elements", "Separated Visual Elements", "Visual Coherence", "Positional Illumination", "Illumination Coherence", "Global Illumination Effects Color", "Soft Shadows", "Compositor", "Global Illumination Simulation", "Relighting", "Illumination Aware Digital Image Compositing", "Visualization", "Image Color Analysis", "Digital Images", "Lighting", "Coherence", "Reflection", "Skin", "Image Processing", "Digital Image Compositing", "Visual Coherence", "Perception" ], "authors": [ { "affiliation": "Keio University,Center for Information and Computer Science,Yokohama,Japan", "fullName": "Masaru Ohkawara", "givenName": "Masaru", "surname": "Ohkawara", "__typename": "ArticleAuthorType" }, { "affiliation": "Keio University,Department of Information and Computer Science,Yokohama,Japan", "fullName": "Issei Fujishiro", "givenName": "Issei", "surname": "Fujishiro", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-09-01T00:00:00", "pubType": "proceedings", "pages": "17-24", "year": "2021", "issn": null, "isbn": "978-1-6654-4065-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "406500a009", "articleId": "1yBF45K2HQc", "__typename": "AdjacentArticleType" }, "next": { "fno": "406500a025", "articleId": "1yBF37laMN2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgiv/2014/5720/0/5720a083", "title": "Illumination Invariant Measuring of Skin Pigmentation", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2014/5720a083/12OmNCd2rHp", "parentPublication": { "id": "proceedings/cgiv/2014/5720/0", "title": "2014 11th International Conference on Computer Graphics, Imaging and Visualization (CGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2016/9036/0/9036a292", "title": "An LED-Based Tunable Illumination for Diverse Medical Applications", "doi": null, "abstractUrl": "/proceedings-article/cbms/2016/9036a292/12OmNwDSdHu", "parentPublication": { "id": "proceedings/cbms/2016/9036/0", "title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/03/v0353", "title": "Accurate Direct Illumination Using Iterative Adaptive Sampling", "doi": null, "abstractUrl": "/journal/tg/2006/03/v0353/13rRUIJuxpo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09904431", "title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights", "doi": null, "abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a044", "title": "Delta Path Tracing for Real-Time Global Illumination in Mixed Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a044/1MNgDFU0UVO", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a382", "title": "Realizing Pseudo Color Bleeding with a Deep Composite Image", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a382/1fHkmgEapVu", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2020/01/08951772", "title": "Origins of Global Illumination", "doi": null, "abstractUrl": "/magazine/cg/2020/01/08951772/1goL8Hzhdcs", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a189", "title": "Deep Consistent Illumination in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a189/1gyslmCJMjK", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09194085", "title": "Lightweight Bilateral Convolutional Neural Networks for Interactive Single-Bounce Diffuse Indirect Illumination", "doi": null, "abstractUrl": "/journal/tg/2022/04/09194085/1n0Ehetbdo4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a001", "title": "Foveated Instant Radiosity", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a001/1pysxhw4Bqw", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxWuirq", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNASILPn", "doi": "10.1109/ISMAR.2012.6402544", "title": "Real-time surface light-field capture for augmentation of planar specular surfaces", "normalizedTitle": "Real-time surface light-field capture for augmentation of planar specular surfaces", "abstract": "A single hand-held camera provides an easily accessible but potentially extremely powerful setup for augmented reality. Capabilities which previously required expensive and complicated infrastructure have gradually become possible from a live monocular video feed, such as accurate camera tracking and, most recently, dense 3D scene reconstruction. A new frontier is to work towards recovering the reflectance properties of general surfaces and the lighting configuration in a scene without the need for probes, omni-directional cameras or specialised light-field cameras. Specular lighting phenomena cause effects in a video stream which can lead current tracking and reconstruction algorithms to fail. However, the potential exists to measure and use these effects to estimate deeper physical details about an environment, enabling advanced scene understanding and more convincing AR. In this paper we present an algorithm for real-time surface light-field capture from a single hand-held camera, which is able to capture dense illumination information for general specular surfaces. Our system incorporates a guidance mechanism to help the user interactively during capture. We then split the light-field into its diffuse and specular components, and show that the specular component can be used for estimation of an environment map. This enables the convincing placement of an augmentation on a specular surface such as a shiny book, with realistic synthesized shadow, reflection and occlusion of specularities as the viewpoint changes. Our method currently works for planar scenes, but the surface light-field representation makes it ideal for future combination with dense 3D reconstruction methods.", "abstracts": [ { "abstractType": "Regular", "content": "A single hand-held camera provides an easily accessible but potentially extremely powerful setup for augmented reality. Capabilities which previously required expensive and complicated infrastructure have gradually become possible from a live monocular video feed, such as accurate camera tracking and, most recently, dense 3D scene reconstruction. A new frontier is to work towards recovering the reflectance properties of general surfaces and the lighting configuration in a scene without the need for probes, omni-directional cameras or specialised light-field cameras. Specular lighting phenomena cause effects in a video stream which can lead current tracking and reconstruction algorithms to fail. However, the potential exists to measure and use these effects to estimate deeper physical details about an environment, enabling advanced scene understanding and more convincing AR. In this paper we present an algorithm for real-time surface light-field capture from a single hand-held camera, which is able to capture dense illumination information for general specular surfaces. Our system incorporates a guidance mechanism to help the user interactively during capture. We then split the light-field into its diffuse and specular components, and show that the specular component can be used for estimation of an environment map. This enables the convincing placement of an augmentation on a specular surface such as a shiny book, with realistic synthesized shadow, reflection and occlusion of specularities as the viewpoint changes. Our method currently works for planar scenes, but the surface light-field representation makes it ideal for future combination with dense 3D reconstruction methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A single hand-held camera provides an easily accessible but potentially extremely powerful setup for augmented reality. Capabilities which previously required expensive and complicated infrastructure have gradually become possible from a live monocular video feed, such as accurate camera tracking and, most recently, dense 3D scene reconstruction. A new frontier is to work towards recovering the reflectance properties of general surfaces and the lighting configuration in a scene without the need for probes, omni-directional cameras or specialised light-field cameras. Specular lighting phenomena cause effects in a video stream which can lead current tracking and reconstruction algorithms to fail. However, the potential exists to measure and use these effects to estimate deeper physical details about an environment, enabling advanced scene understanding and more convincing AR. In this paper we present an algorithm for real-time surface light-field capture from a single hand-held camera, which is able to capture dense illumination information for general specular surfaces. Our system incorporates a guidance mechanism to help the user interactively during capture. We then split the light-field into its diffuse and specular components, and show that the specular component can be used for estimation of an environment map. This enables the convincing placement of an augmentation on a specular surface such as a shiny book, with realistic synthesized shadow, reflection and occlusion of specularities as the viewpoint changes. Our method currently works for planar scenes, but the surface light-field representation makes it ideal for future combination with dense 3D reconstruction methods.", "fno": "06402544", "keywords": [ "Cameras", "Lighting", "Real Time Systems", "Surface Treatment", "Image Color Analysis", "Surface Texture", "Light Sources", "AR", "Real Time", "Light Fields", "Illumination Estimation", "GPU", "SLAM" ], "authors": [ { "affiliation": "Imperial College London, UK", "fullName": "Jan Jachnik", "givenName": "Jan", "surname": "Jachnik", "__typename": "ArticleAuthorType" }, { "affiliation": "Imperial College London, UK", "fullName": "Richard A. Newcombe", "givenName": "Richard A.", "surname": "Newcombe", "__typename": "ArticleAuthorType" }, { "affiliation": "Imperial College London, UK", "fullName": "Andrew J. Davison", "givenName": "Andrew J.", "surname": "Davison", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "91-97", "year": "2012", "issn": null, "isbn": "978-1-4673-4660-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06402542", "articleId": "12OmNqNXEoZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "06402546", "articleId": "12OmNwJPN1r", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b735", "title": "Surface Normal Reconstruction from Specular Information in Light Field Data", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2012/4683/0/4683a361", "title": "Specular-Reduced Imaging for Inspection of Machined Surfaces", "doi": null, "abstractUrl": "/proceedings-article/crv/2012/4683a361/12OmNAle70a", "parentPublication": { "id": "proceedings/crv/2012/4683/0", "title": "2012 Ninth Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dim/1997/7943/0/79430335", "title": "Industrial Painting Inspection using Specular Sharpness", "doi": null, "abstractUrl": "/proceedings-article/3dim/1997/79430335/12OmNBTawk1", "parentPublication": { "id": "proceedings/3dim/1997/7943/0", "title": "3D Digital Imaging and Modeling, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1993/3880/0/00341164", "title": "Roughness and shape of specular lobe surfaces using photometric sampling method", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1993/00341164/12OmNCdBDR7", "parentPublication": { "id": "proceedings/cvpr/1993/3880/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1990/2062/1/00118070", "title": "Shape from shading via the fusion of specular and Lambertian image components", "doi": null, "abstractUrl": "/proceedings-article/icpr/1990/00118070/12OmNzUxO6U", "parentPublication": { "id": "proceedings/icpr/1990/2062/1", "title": "Proceedings 10th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1992/2720/0/00220132", "title": "Inspecting specular lobe objects using four light sources", "doi": null, "abstractUrl": "/proceedings-article/robot/1992/00220132/12OmNzd7bWl", "parentPublication": { "id": "proceedings/robot/1992/2720/0", "title": "Proceedings 1992 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/05/07869421", "title": "A Geometric Model for Specularity Prediction on Planar Surfaces with Multiple Light Sources", "doi": null, "abstractUrl": "/journal/tg/2018/05/07869421/13rRUwdIOUT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/06/07254196", "title": "Depth Estimation and Specular Removal for Glossy Surfaces Using Point and Line Consistency with Light-Field Cameras", "doi": null, "abstractUrl": "/journal/tp/2016/06/07254196/13rRUwdrdM1", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1988/01/i0044", "title": "Structured Highlight Inspection of Specular Surfaces", "doi": null, "abstractUrl": "/journal/tp/1988/01/i0044/13rRUygT7yR", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/06/09689957", "title": "Efficient Specular Glints Rendering With Differentiable Regularization", "doi": null, "abstractUrl": "/journal/tg/2023/06/09689957/1AlCfIlPhfy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyFCvPo", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNAoUTgY", "doi": "10.1109/ICCV.2013.311", "title": "Multi-view Normal Field Integration for 3D Reconstruction of Mirroring Objects", "normalizedTitle": "Multi-view Normal Field Integration for 3D Reconstruction of Mirroring Objects", "abstract": "In this paper, we present a novel, robust multi-view normal field integration technique for reconstructing the full 3D shape of mirroring objects. We employ a turntable-based setup with several cameras and displays. These are used to display illumination patterns which are reflected by the object surface. The pattern information observed in the cameras enables the calculation of individual volumetric normal fields for each combination of camera, display and turntable angle. As the pattern information might be blurred depending on the surface curvature or due to non-perfect mirroring surface characteristics, we locally adapt the decoding to the finest still resolvable pattern resolution. In complex real-world scenarios, the normal fields contain regions without observations due to occlusions and outliers due to interreflections and noise. Therefore, a robust reconstruction using only normal information is challenging. Via a non-parametric clustering of normal hypotheses derived for each point in the scene, we obtain both the most likely local surface normal and a local surface consistency estimate. This information is utilized in an iterative min-cut based variational approach to reconstruct the surface geometry.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel, robust multi-view normal field integration technique for reconstructing the full 3D shape of mirroring objects. We employ a turntable-based setup with several cameras and displays. These are used to display illumination patterns which are reflected by the object surface. The pattern information observed in the cameras enables the calculation of individual volumetric normal fields for each combination of camera, display and turntable angle. As the pattern information might be blurred depending on the surface curvature or due to non-perfect mirroring surface characteristics, we locally adapt the decoding to the finest still resolvable pattern resolution. In complex real-world scenarios, the normal fields contain regions without observations due to occlusions and outliers due to interreflections and noise. Therefore, a robust reconstruction using only normal information is challenging. Via a non-parametric clustering of normal hypotheses derived for each point in the scene, we obtain both the most likely local surface normal and a local surface consistency estimate. This information is utilized in an iterative min-cut based variational approach to reconstruct the surface geometry.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel, robust multi-view normal field integration technique for reconstructing the full 3D shape of mirroring objects. We employ a turntable-based setup with several cameras and displays. These are used to display illumination patterns which are reflected by the object surface. The pattern information observed in the cameras enables the calculation of individual volumetric normal fields for each combination of camera, display and turntable angle. As the pattern information might be blurred depending on the surface curvature or due to non-perfect mirroring surface characteristics, we locally adapt the decoding to the finest still resolvable pattern resolution. In complex real-world scenarios, the normal fields contain regions without observations due to occlusions and outliers due to interreflections and noise. Therefore, a robust reconstruction using only normal information is challenging. Via a non-parametric clustering of normal hypotheses derived for each point in the scene, we obtain both the most likely local surface normal and a local surface consistency estimate. This information is utilized in an iterative min-cut based variational approach to reconstruct the surface geometry.", "fno": "2840c504", "keywords": [ "Surface Reconstruction", "Cameras", "Three Dimensional Displays", "Light Sources", "Lighting", "Geometry", "Robustness", "Mirroring Objects", "3 D Reconstruction", "Multi View Normal Field Integration" ], "authors": [ { "affiliation": null, "fullName": "Michael Weinmann", "givenName": "Michael", "surname": "Weinmann", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Aljosa Osep", "givenName": "Aljosa", "surname": "Osep", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Roland Ruiters", "givenName": "Roland", "surname": "Ruiters", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Reinhard Klein", "givenName": "Reinhard", "surname": "Klein", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-12-01T00:00:00", "pubType": "proceedings", "pages": "2504-2511", "year": "2013", "issn": "1550-5499", "isbn": "978-1-4799-2840-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2840c496", "articleId": "12OmNC4O4Aa", "__typename": "AdjacentArticleType" }, "next": { "fno": "2840c512", "articleId": "12OmNwAKCLO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b735", "title": "Surface Normal Reconstruction from Specular Information in Light Field Data", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbg/2005/20/0/01500323", "title": "A practical structured light acquisition system for point-based geometry and texture", "doi": null, "abstractUrl": "/proceedings-article/pbg/2005/01500323/12OmNCdTeQ0", "parentPublication": { "id": "proceedings/pbg/2005/20/0", "title": "Point-Based Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2015/9711/0/5720a175", "title": "Surface Recovery: Fusion of Image and Point Cloud", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a175/12OmNxE2mUe", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851e369", "title": "3D Reconstruction of Transparent Objects with Position-Normal Consistency", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851e369/12OmNyKrH6Z", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019481", "title": "Near-surface lighting estimation and reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019481/12OmNzWx052", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/12/08456615", "title": "Height-from-Polarisation with Unknown Lighting or Albedo", "doi": null, "abstractUrl": "/journal/tp/2019/12/08456615/13rRUwh80CL", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/10/07006767", "title": "From Intensity Profile to Surface Normal: Photometric Stereo for Unknown Light Sources and Isotropic Reflectances", "doi": null, "abstractUrl": "/journal/tp/2015/10/07006767/13rRUxbTMAi", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122941", "title": "On Differential Photometric Reconstruction for Unknown, Isotropic BRDFs", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122941/13rRUygT7oc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b052", "title": "A Differential Volumetric Approach to Multi-View Photometric Stereo", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b052/1hVlAZv5zfG", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/09064908", "title": "Shape and Reflectance Reconstruction Using Concentric Multi-Spectral Light Field", "doi": null, "abstractUrl": "/journal/tp/2020/07/09064908/1iZGtGUiMhO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzXFowZ", "title": "2013 IEEE Workshop on Robot Vision (WORV 2013)", "acronym": "worv", "groupId": "1802687", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNBVrjoU", "doi": "10.1109/WORV.2013.6521920", "title": "Near surface light source estimation from a single view image", "normalizedTitle": "Near surface light source estimation from a single view image", "abstract": "Several techniques have been developed for estimating light source position in indoor or outdoor environment. However, those techniques assume that the light source can be approximated by a point, which cannot be applied safely to, for example, some case of Photometric Stereo reconstruction, when the light source is placed quite close to a small-size target, and hence the size of light source cannot be ignored. In this paper, we present a novel approach for estimating light source from single image of a scene that is illuminated by near surface light source. We propose to employ a shiny sphere and a Lambertion plate as light probe to locate light source position, where albedo variance of the Lambertian plate is used as the basis of the object function. We also illustrate the convexity of this object function and propose an efficient way to search the optimal value, i.e. source position. We test our calibration results on real images by means of Photometric Stereo reconstruction and image rendering, and both testing results show the accuracy of our estimation framework.", "abstracts": [ { "abstractType": "Regular", "content": "Several techniques have been developed for estimating light source position in indoor or outdoor environment. However, those techniques assume that the light source can be approximated by a point, which cannot be applied safely to, for example, some case of Photometric Stereo reconstruction, when the light source is placed quite close to a small-size target, and hence the size of light source cannot be ignored. In this paper, we present a novel approach for estimating light source from single image of a scene that is illuminated by near surface light source. We propose to employ a shiny sphere and a Lambertion plate as light probe to locate light source position, where albedo variance of the Lambertian plate is used as the basis of the object function. We also illustrate the convexity of this object function and propose an efficient way to search the optimal value, i.e. source position. We test our calibration results on real images by means of Photometric Stereo reconstruction and image rendering, and both testing results show the accuracy of our estimation framework.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Several techniques have been developed for estimating light source position in indoor or outdoor environment. However, those techniques assume that the light source can be approximated by a point, which cannot be applied safely to, for example, some case of Photometric Stereo reconstruction, when the light source is placed quite close to a small-size target, and hence the size of light source cannot be ignored. In this paper, we present a novel approach for estimating light source from single image of a scene that is illuminated by near surface light source. We propose to employ a shiny sphere and a Lambertion plate as light probe to locate light source position, where albedo variance of the Lambertian plate is used as the basis of the object function. We also illustrate the convexity of this object function and propose an efficient way to search the optimal value, i.e. source position. We test our calibration results on real images by means of Photometric Stereo reconstruction and image rendering, and both testing results show the accuracy of our estimation framework.", "fno": "06521920", "keywords": [ "Light Sources", "Estimation", "Lighting", "Shape", "Image Reconstruction", "Surface Reconstruction", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "Mech. & Autom. Eng. Dept., Chinese Univ. of Hong Kong, Hong Kong, China", "fullName": "Wu Yuan Xie", "givenName": null, "surname": "Wu Yuan Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "Mech. & Autom. Eng. Dept., Chinese Univ. of Hong Kong, Hong Kong, China", "fullName": "Chi-kit Ronald Chung", "givenName": "Chi-kit Ronald", "surname": "Chung", "__typename": "ArticleAuthorType" } ], "idPrefix": "worv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-01-01T00:00:00", "pubType": "proceedings", "pages": "96-101", "year": "2013", "issn": null, "isbn": "978-1-4673-5646-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06521919", "articleId": "12OmNvjyxNZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "06521921", "articleId": "12OmNzd7bAn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b735", "title": "Surface Normal Reconstruction from Specular Information in Light Field Data", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457e521", "title": "Semi-Calibrated Near Field Photometric Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457e521/12OmNB0X8uV", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1990/2062/1/00118064", "title": "Automatic sensor and light source positioning for machine vision", "doi": null, "abstractUrl": "/proceedings-article/icpr/1990/00118064/12OmNB8TUfH", "parentPublication": { "id": "proceedings/icpr/1990/2062/1", "title": "Proceedings 10th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1990/2062/1/00118069", "title": "Reconstructing shape from shading images under point light source illumination", "doi": null, "abstractUrl": "/proceedings-article/icpr/1990/00118069/12OmNBp52vd", "parentPublication": { "id": "proceedings/icpr/1990/2062/1", "title": "Proceedings 10th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imstw/2015/6732/0/07177862", "title": "Considerations for light sources: For semiconductor light sensor test", "doi": null, "abstractUrl": "/proceedings-article/imstw/2015/07177862/12OmNClQ0qG", "parentPublication": { "id": "proceedings/imstw/2015/6732/0", "title": "2015 20th International Mixed-Signal Testing Workshop (IMSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2018/2526/0/08368465", "title": "Near-light photometric stereo using circularly placed point light sources", "doi": null, "abstractUrl": "/proceedings-article/iccp/2018/08368465/12OmNqBbHSi", "parentPublication": { "id": "proceedings/iccp/2018/2526/0", "title": "2018 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/robot/1991/2163/0/00131737", "title": "Automatic planning of light source and camera placement for an active photometric stereo system", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131737/12OmNqyUUHw", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315037", "title": "Color alignment in texture mapping of images under point light source and general lighting condition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315037/12OmNywfKwz", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019481", "title": "Near-surface lighting estimation and reconstruction", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019481/12OmNzWx052", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2017/3220/1/08005893", "title": "Three Dimensional Surface Reconstruction Based on the Reference Object and PMS", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2017/08005893/17D45VTRoBX", "parentPublication": { "id": "proceedings/cse-euc/2017/3220/1", "title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvRU0cK", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNCd2rxc", "doi": "10.1109/ISMAR.2016.13", "title": "An Empirical Model for Specularity Prediction with Application to Dynamic Retexturing", "normalizedTitle": "An Empirical Model for Specularity Prediction with Application to Dynamic Retexturing", "abstract": "Specularities, which are often visible in images, may be problematic in computer vision since they depend on parameters which are difficult to estimate in practice. We present an empirical model called JOLIMAS: JOint LIght-MAterial Specularity, which allows specularity prediction. JOLIMAS is reconstructed from images of specular reflections observed on a planar surface and implicitly includes light and material properties which are intrinsic to specularities. This work was motivated by the observation that specularities have a conic shape on planar surfaces. A theoretical study on the well known illumination models of Phong and Blinn-Phong was conducted to support the accuracy of this hypothesis. A conic shape is obtained by projecting a quadric on a planar surface. We showed empirically the existence of a fixed quadric whose perspective projection fits the conic shaped specularity in the associated image. JOLIMAS predicts the complex phenomenon of specularity using a simple geometric approach with static parameters on the object material and on the light source shape. It is adapted to indoor light sources such as light bulbs or fluorescent lamps. The performance of the prediction was convincing on synthetic and real sequences. Additionally, we used the specularity prediction for dynamic retexturing and obtained convincing rendering results. Further results are presented as supplementary material.", "abstracts": [ { "abstractType": "Regular", "content": "Specularities, which are often visible in images, may be problematic in computer vision since they depend on parameters which are difficult to estimate in practice. We present an empirical model called JOLIMAS: JOint LIght-MAterial Specularity, which allows specularity prediction. JOLIMAS is reconstructed from images of specular reflections observed on a planar surface and implicitly includes light and material properties which are intrinsic to specularities. This work was motivated by the observation that specularities have a conic shape on planar surfaces. A theoretical study on the well known illumination models of Phong and Blinn-Phong was conducted to support the accuracy of this hypothesis. A conic shape is obtained by projecting a quadric on a planar surface. We showed empirically the existence of a fixed quadric whose perspective projection fits the conic shaped specularity in the associated image. JOLIMAS predicts the complex phenomenon of specularity using a simple geometric approach with static parameters on the object material and on the light source shape. It is adapted to indoor light sources such as light bulbs or fluorescent lamps. The performance of the prediction was convincing on synthetic and real sequences. Additionally, we used the specularity prediction for dynamic retexturing and obtained convincing rendering results. Further results are presented as supplementary material.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Specularities, which are often visible in images, may be problematic in computer vision since they depend on parameters which are difficult to estimate in practice. We present an empirical model called JOLIMAS: JOint LIght-MAterial Specularity, which allows specularity prediction. JOLIMAS is reconstructed from images of specular reflections observed on a planar surface and implicitly includes light and material properties which are intrinsic to specularities. This work was motivated by the observation that specularities have a conic shape on planar surfaces. A theoretical study on the well known illumination models of Phong and Blinn-Phong was conducted to support the accuracy of this hypothesis. A conic shape is obtained by projecting a quadric on a planar surface. We showed empirically the existence of a fixed quadric whose perspective projection fits the conic shaped specularity in the associated image. JOLIMAS predicts the complex phenomenon of specularity using a simple geometric approach with static parameters on the object material and on the light source shape. It is adapted to indoor light sources such as light bulbs or fluorescent lamps. The performance of the prediction was convincing on synthetic and real sequences. Additionally, we used the specularity prediction for dynamic retexturing and obtained convincing rendering results. Further results are presented as supplementary material.", "fno": "3641a044", "keywords": [ "Light Sources", "Computational Modeling", "Shape", "Predictive Models", "Image Reconstruction", "Surface Reconstruction", "Cameras" ], "authors": [ { "affiliation": null, "fullName": "Alexandre Morgand", "givenName": "Alexandre", "surname": "Morgand", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mohamed Tamaazousti", "givenName": "Mohamed", "surname": "Tamaazousti", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Adrien Bartoli", "givenName": "Adrien", "surname": "Bartoli", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "44-53", "year": "2016", "issn": null, "isbn": "978-1-5090-3641-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3641a037", "articleId": "12OmNrJAdMm", "__typename": "AdjacentArticleType" }, "next": { "fno": "3641a054", "articleId": "12OmNrFTr6j", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/crv/2013/4983/0/4983a356", "title": "Real-Time Specularity Detection Using Unnormalized Wiener Entropy", "doi": null, "abstractUrl": "/proceedings-article/crv/2013/4983a356/12OmNvDqsPj", "parentPublication": { "id": "proceedings/crv/2013/4983/0", "title": "2013 International Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671772", "title": "Delta Light Propagation Volumes for mixed reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671772/12OmNwkhTdN", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a251", "title": "Colour Helmholtz Stereopsis for Reconstruction of Complex Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a251/12OmNx3q73E", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismarw/2016/3740/0/07836506", "title": "Reflectance and Illumination Estimation for Realistic Augmentations of Real Scenes", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836506/12OmNx5GTXK", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761299", "title": "Specularity removal and relighting of 3D object model for virtual exhibition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761299/12OmNyuPKTC", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/07/07274730", "title": "The Information Available to a Moving Observer on Shape with Unknown, Isotropic BRDFs", "doi": null, "abstractUrl": "/journal/tp/2016/07/07274730/13rRUB6Sq1G", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/05/07869421", "title": "A Geometric Model for Specularity Prediction on Planar Surfaces with Multiple Light Sources", "doi": null, "abstractUrl": "/journal/tg/2018/05/07869421/13rRUwdIOUT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007318", "title": "A Multiple-View Geometric Model of Specularities on Non-Planar Shapes with Application to Dynamic Retexturing", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007318/13rRUxOve9O", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/02/09018202", "title": "Detecting Specular Reflections and Cast Shadows to Estimate Reflectance and Illumination of Dynamic Indoor Scenes", "doi": null, "abstractUrl": "/journal/tg/2022/02/09018202/1hN4BrDSVHi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/09064908", "title": "Shape and Reflectance Reconstruction Using Concentric Multi-Spectral Light Field", "doi": null, "abstractUrl": "/journal/tp/2020/07/09064908/1iZGtGUiMhO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYoKmw", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "acronym": "ismar", "groupId": "1000465", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNwkhTdN", "doi": "10.1109/ISMAR.2013.6671772", "title": "Delta Light Propagation Volumes for mixed reality", "normalizedTitle": "Delta Light Propagation Volumes for mixed reality", "abstract": "Indirect illumination is an important visual cue which has traditionally been neglected in mixed reality applications. We present Delta Light Propagation Volumes, a novel volumetric relighting method for real-time mixed reality applications which allows to simulate the effect of first bounce indirect illumination of synthetic objects onto a real geometry and vice versa. Inspired by Radiance Transfer Fields, we modify Light Propagation Volumes in such a way as to propagate the change in illumination caused by the introduction of a synthetic object into a real scene. This method combines real and virtual light in one representation, provides improved temporal coherence for indirect light compared to previous solutions and implicitly includes smooth shadows.", "abstracts": [ { "abstractType": "Regular", "content": "Indirect illumination is an important visual cue which has traditionally been neglected in mixed reality applications. We present Delta Light Propagation Volumes, a novel volumetric relighting method for real-time mixed reality applications which allows to simulate the effect of first bounce indirect illumination of synthetic objects onto a real geometry and vice versa. Inspired by Radiance Transfer Fields, we modify Light Propagation Volumes in such a way as to propagate the change in illumination caused by the introduction of a synthetic object into a real scene. This method combines real and virtual light in one representation, provides improved temporal coherence for indirect light compared to previous solutions and implicitly includes smooth shadows.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Indirect illumination is an important visual cue which has traditionally been neglected in mixed reality applications. We present Delta Light Propagation Volumes, a novel volumetric relighting method for real-time mixed reality applications which allows to simulate the effect of first bounce indirect illumination of synthetic objects onto a real geometry and vice versa. Inspired by Radiance Transfer Fields, we modify Light Propagation Volumes in such a way as to propagate the change in illumination caused by the introduction of a synthetic object into a real scene. This method combines real and virtual light in one representation, provides improved temporal coherence for indirect light compared to previous solutions and implicitly includes smooth shadows.", "fno": "06671772", "keywords": [ "Lighting", "Light Sources", "Image Reconstruction", "Geometry", "Surface Reconstruction", "Virtual Reality", "Cameras", "Real Time Global Illumination", "Mixed Reality" ], "authors": [ { "affiliation": "Fraunhofer IGD, Tech. Univ. Darmstadt, Darmstadt, Germany", "fullName": "Tobias Alexander Franke", "givenName": "Tobias Alexander", "surname": "Franke", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-10-01T00:00:00", "pubType": "proceedings", "pages": "125-132", "year": "2013", "issn": null, "isbn": "978-1-4799-2869-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06671771", "articleId": "12OmNySXEVs", "__typename": "AdjacentArticleType" }, "next": { "fno": "06671773", "articleId": "12OmNy7h36V", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2010/9343/0/05643556", "title": "Differential Instant Radiosity for mixed reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643556/12OmNAkWvti", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/worv/2013/5646/0/06521920", "title": "Near surface light source estimation from a single view image", "doi": null, "abstractUrl": "/proceedings-article/worv/2013/06521920/12OmNBVrjoU", "parentPublication": { "id": "proceedings/worv/2013/5646/0", "title": "2013 IEEE Workshop on Robot Vision (WORV 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223335", "title": "Light field projection for lighting reproduction", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223335/12OmNs0C9XZ", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156382", "title": "Efficient volume illumination with multiple light sources through selective light updates", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156382/12OmNvDZF6A", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2017/2089/0/2089a056", "title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples", "doi": null, "abstractUrl": "/proceedings-article/cw/2017/2089a056/12OmNvDqsQf", "parentPublication": { "id": "proceedings/cw/2017/2089/0", "title": "2017 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948407", "title": "Delta Voxel Cone Tracing", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948407/12OmNxG1yH8", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200c420", "title": "Virtual light transport matrices for non-line-of-sight imaging", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200c420/1BmLtCLCybe", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a481", "title": "Single Image Multi-Spectral Photometric Stereo Using a Split U-Shaped CNN", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a481/1iTveLKPju8", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/07/09064908", "title": "Shape and Reflectance Reconstruction Using Concentric Multi-Spectral Light Field", "doi": null, "abstractUrl": "/journal/tp/2020/07/09064908/1iZGtGUiMhO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2020/9274/0/927400a046", "title": "Screen-space VPL propagation for real-time indirect lighting", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2020/927400a046/1p2VyEubYTS", "parentPublication": { "id": "proceedings/sibgrapi/2020/9274/0", "title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mTD", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNxE2mUe", "doi": "10.1109/ICCVW.2015.32", "title": "Surface Recovery: Fusion of Image and Point Cloud", "normalizedTitle": "Surface Recovery: Fusion of Image and Point Cloud", "abstract": "The point cloud of the laser scanner is a rich source of information for high level tasks in computer vision such as traffic understanding. However, cost-effective laser scanners provide noisy and low resolution point cloud and they are prone to systematic errors. In this paper, we propose two surface recovery approaches based on geometry and brightness of the surface. The proposed approaches are tested in the realistic outdoor scenarios and the results show that both approaches have superior performance over the-state-of-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "The point cloud of the laser scanner is a rich source of information for high level tasks in computer vision such as traffic understanding. However, cost-effective laser scanners provide noisy and low resolution point cloud and they are prone to systematic errors. In this paper, we propose two surface recovery approaches based on geometry and brightness of the surface. The proposed approaches are tested in the realistic outdoor scenarios and the results show that both approaches have superior performance over the-state-of-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The point cloud of the laser scanner is a rich source of information for high level tasks in computer vision such as traffic understanding. However, cost-effective laser scanners provide noisy and low resolution point cloud and they are prone to systematic errors. In this paper, we propose two surface recovery approaches based on geometry and brightness of the surface. The proposed approaches are tested in the realistic outdoor scenarios and the results show that both approaches have superior performance over the-state-of-art methods.", "fno": "5720a175", "keywords": [ "Surface Reconstruction", "Three Dimensional Displays", "Image Reconstruction", "Geometry", "Lighting", "Light Sources", "Surface Emitting Lasers" ], "authors": [ { "affiliation": null, "fullName": "Siavash Hosseinyalamdary", "givenName": "Siavash", "surname": "Hosseinyalamdary", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Alper Yilmaz", "givenName": "Alper", "surname": "Yilmaz", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-12-01T00:00:00", "pubType": "proceedings", "pages": "175-183", "year": "2015", "issn": null, "isbn": "978-1-4673-9711-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5720a166", "articleId": "12OmNBajTL0", "__typename": "AdjacentArticleType" }, "next": { "fno": "5720a184", "articleId": "12OmNyQ7FUj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pbg/2005/20/0/01500322", "title": "Surface reconstruction with enriched reproducing kernel particle approximation", "doi": null, "abstractUrl": "/proceedings-article/pbg/2005/01500322/12OmNrkT7zM", "parentPublication": { "id": "proceedings/pbg/2005/20/0", "title": "Point-Based Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a313", "title": "Out-of-Core Surface Reconstruction from Large Point Sets for Infrastructure Inspection", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a313/12OmNwp74Lc", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2016/4840/0/4840a466", "title": "The Detecting Method of Building Deformation Based on Terrestrial Laser Point Cloud", "doi": null, "abstractUrl": "/proceedings-article/cis/2016/4840a466/12OmNxRF72t", "parentPublication": { "id": "proceedings/cis/2016/4840/0", "title": "2016 12th International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1993/3880/0/00340963", "title": "Fractal surface reconstruction for modeling natural terrain", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1993/00340963/12OmNy3RRJb", "parentPublication": { "id": "proceedings/cvpr/1993/3880/0", "title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460394", "title": "Robust segmentation for multiple planar surface extraction in laser scanning 3D point cloud data", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460394/12OmNzlUKNd", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcrait/2022/8192/0/819200a829", "title": "Feature Recognition and Forming Measurement of Weld Surface Based on Laser Vision", "doi": null, "abstractUrl": "/proceedings-article/gcrait/2022/819200a829/1HcnqBAJTEY", "parentPublication": { "id": "proceedings/gcrait/2022/8192/0", "title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2020/9986/0/998600a312", "title": "Study on rapid preparation of self-cleaning superhydrophobic 304 stainless steel surface using nanosecond laser", "doi": null, "abstractUrl": "/proceedings-article/aiam/2020/998600a312/1tweNcltgaI", "parentPublication": { "id": "proceedings/aiam/2020/9986/0", "title": "2020 2nd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmcce/2020/2314/0/231400c102", "title": "Weld Surface Imperfection Detection by 3D Reconstruction of Laser Displacement Sensing", "doi": null, "abstractUrl": "/proceedings-article/icmcce/2020/231400c102/1tzyHQxsPjG", "parentPublication": { "id": "proceedings/icmcce/2020/2314/0", "title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2021/1596/0/159600a336", "title": "Research on 360&#x00B0; 3D point cloud reconstruction technology based on turntable and line structured light stereo vision", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2021/159600a336/1wcdmqMmW4g", "parentPublication": { "id": "proceedings/aemcse/2021/1596/0", "title": "2021 4th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeas/2021/9768/0/976800a172", "title": "Point cloud data fitting-based reverse engineering technique for digital model updating of assembly structure", "doi": null, "abstractUrl": "/proceedings-article/icmeas/2021/976800a172/1zuuVnLCToQ", "parentPublication": { "id": "proceedings/icmeas/2021/9768/0", "title": "2021 7th International Conference on Mechanical Engineering and Automation Science (ICMEAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwtn3tc", "title": "2010 Fifth International Conference on Frontier of Computer Science and Technology (FCST 2010)", "acronym": "fcst", "groupId": "1001309", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNyNQSQn", "doi": "10.1109/FCST.2010.14", "title": "User-Controlled Geometric Feature Preserving Simplification", "normalizedTitle": "User-Controlled Geometric Feature Preserving Simplification", "abstract": "Many effective automatic surface simplification algorithms have been developed in order to produce approximations from a complex polygonal model. However, previous algorithms often have difficulty identifying and preserving important geometric features such as high-curvature regions when the model is simplified to a very low level of detail. In this paper, we define a new feature constraint quadric error metric and propose a user-controlled geometric feature preserving simplification that aims to improve this weakness. We apply quadric fitting method to estimate the parameters and the local geometry of the surface; then we present adaptive weighting of the quadric which is associated with each vertex, relying on the discrete differential-geometry properties of the vertex. Before the simplification process is executed, our algorithm requires users to specify several global parameters to control the quality of approximations and overall processing time, and then our algorithm can adaptively control the relative importance of different surface regions to implicitly reorder the edge collapses sequence. We also propose additional feature constraint quadrics to control the selection of new vertices' optimal positions for preserving geometric features and properties of the surface. This paper shows, with very little effort, our algorithm can preserve various geometric features of the original model and produce approximations with smaller differential-geometry errors than previous algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "Many effective automatic surface simplification algorithms have been developed in order to produce approximations from a complex polygonal model. However, previous algorithms often have difficulty identifying and preserving important geometric features such as high-curvature regions when the model is simplified to a very low level of detail. In this paper, we define a new feature constraint quadric error metric and propose a user-controlled geometric feature preserving simplification that aims to improve this weakness. We apply quadric fitting method to estimate the parameters and the local geometry of the surface; then we present adaptive weighting of the quadric which is associated with each vertex, relying on the discrete differential-geometry properties of the vertex. Before the simplification process is executed, our algorithm requires users to specify several global parameters to control the quality of approximations and overall processing time, and then our algorithm can adaptively control the relative importance of different surface regions to implicitly reorder the edge collapses sequence. We also propose additional feature constraint quadrics to control the selection of new vertices' optimal positions for preserving geometric features and properties of the surface. This paper shows, with very little effort, our algorithm can preserve various geometric features of the original model and produce approximations with smaller differential-geometry errors than previous algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many effective automatic surface simplification algorithms have been developed in order to produce approximations from a complex polygonal model. However, previous algorithms often have difficulty identifying and preserving important geometric features such as high-curvature regions when the model is simplified to a very low level of detail. In this paper, we define a new feature constraint quadric error metric and propose a user-controlled geometric feature preserving simplification that aims to improve this weakness. We apply quadric fitting method to estimate the parameters and the local geometry of the surface; then we present adaptive weighting of the quadric which is associated with each vertex, relying on the discrete differential-geometry properties of the vertex. Before the simplification process is executed, our algorithm requires users to specify several global parameters to control the quality of approximations and overall processing time, and then our algorithm can adaptively control the relative importance of different surface regions to implicitly reorder the edge collapses sequence. We also propose additional feature constraint quadrics to control the selection of new vertices' optimal positions for preserving geometric features and properties of the surface. This paper shows, with very little effort, our algorithm can preserve various geometric features of the original model and produce approximations with smaller differential-geometry errors than previous algorithms.", "fno": "05575931", "keywords": [ "Computational Geometry", "Computer Graphics", "Curve Fitting", "Feature Extraction", "Parameter Estimation", "User Controlled Geometric Feature Preserving Simplification", "Automatic Surface Simplification Algorithms", "Approximations", "Complex Polygonal Model", "Feature Constraint Quadric Error Metric", "Quadric Fitting Method", "Parameter Estimation", "Surface Geometry", "Discrete Differential Geometry Properties", "Vertex", "Processing Time", "Surface Regions", "Edge Collapses Sequence", "Measurement", "Approximation Methods", "Approximation Algorithms", "Geometry", "Mathematical Model", "Equations", "Surface Treatment", "Polygonal Surface Simplification", "Level Of Detail", "Differential Geometry", "Quadric Fitting", "Quadric Error Metrics" ], "authors": [ { "affiliation": null, "fullName": "Zhi Wang", "givenName": "Zhi", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hui-Ying Li", "givenName": "Hui-Ying", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "fcst", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-08-01T00:00:00", "pubType": "proceedings", "pages": "335-340", "year": "2010", "issn": "2159-6301", "isbn": "978-1-4244-7779-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05575930", "articleId": "12OmNwDACyO", "__typename": "AdjacentArticleType" }, "next": { "fno": "05575933", "articleId": "12OmNvq5jHI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iscid/2008/3311/2/3311b075", "title": "Research of Edge-Collapse-Based 3-D Model Simplification", "doi": null, "abstractUrl": "/proceedings-article/iscid/2008/3311b075/12OmNA14A9f", "parentPublication": { "id": "proceedings/iscid/2008/3311/2", "title": "2008 International Symposium on Computational Intelligence and Design", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2002/7498/0/7498pauly", "title": "Efficient Simplification of Point-Sampled Surfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2002/7498pauly/12OmNCy2L1K", "parentPublication": { "id": "proceedings/ieee-vis/2002/7498/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2007/2929/0/29290960", "title": "Mesh Simplification Algorithm Based on Quadrangle Collapse", "doi": null, "abstractUrl": "/proceedings-article/icig/2007/29290960/12OmNqJq4gh", "parentPublication": { "id": "proceedings/icig/2007/2929/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2002/1784/0/17840477", "title": "Subdivision Surface Simplification", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840477/12OmNviZldv", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icacte/2008/3489/0/3489a528", "title": "A New Mesh Simplification Algorithm Based on Quadric Error Metrics", "doi": null, "abstractUrl": "/proceedings-article/icacte/2008/3489a528/12OmNxcvh5S", "parentPublication": { "id": "proceedings/icacte/2008/3489/0", "title": "2008 International Conference on Advanced Computer Theory and Engineering (ICACTE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2000/0868/0/08680202", "title": "Subdivision Surface Fitting Using QEM-Based Mesh Simplification and Reconstruction of Approximated B-Spline Surfaces", "doi": null, "abstractUrl": "/proceedings-article/pg/2000/08680202/12OmNy49sFt", "parentPublication": { "id": "proceedings/pg/2000/0868/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2002/1784/0/17840276", "title": "Discrete Differential Error Metric for Surface Simplification", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840276/12OmNyTOss3", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2007/2929/0/29290954", "title": "Surface Simplification Using multi-edge mesh collapse", "doi": null, "abstractUrl": "/proceedings-article/icig/2007/29290954/12OmNykCceh", "parentPublication": { "id": "proceedings/icig/2007/2929/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/04015405", "title": "Streaming Simplification of Tetrahedral Meshes", "doi": null, "abstractUrl": "/journal/tg/2007/01/04015405/13rRUyY28Yk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2004/05/v0587", "title": "Simplification of Three-Dimensional Density Maps", "doi": null, "abstractUrl": "/journal/tg/2004/05/v0587/13rRUyYjKa3", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwLOYSy", "title": "1988 Second International Conference on Computer Vision", "acronym": "ccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "1988", "__typename": "ProceedingType" }, "article": { "id": "12OmNylsZA6", "doi": "10.1109/CCV.1988.590016", "title": "Geometry From Specularities", "normalizedTitle": "Geometry From Specularities", "abstract": null, "abstracts": [], "normalizedAbstract": null, "fno": "00590016", "keywords": [ "Optical Reflection", "Shape", "Image Edge Detection", "Machine Vision", "Photometry", "Computational Geometry", "Robots", "Computer Science", "Light Sources", "Stereo Vision" ], "authors": [ { "affiliation": "University of Oxford", "fullName": "A. Blake", "givenName": "A.", "surname": "Blake", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "G. Brelstaff", "givenName": "G.", "surname": "Brelstaff", "__typename": "ArticleAuthorType" } ], "idPrefix": "ccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1988-01-01T00:00:00", "pubType": "proceedings", "pages": "394,395,396,397,398,399,400,401,402,403", "year": "1988", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00590015", "articleId": "12OmNB8Cj41", "__typename": "AdjacentArticleType" }, "next": { "fno": "00590017", "articleId": "12OmNB0X8qf", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pmcvg/1999/0271/0/02710039", "title": "Generation of Diffuse and Specular Appearance from Photometric Images", "doi": null, "abstractUrl": "/proceedings-article/pmcvg/1999/02710039/12OmNA0vnOn", "parentPublication": { "id": "proceedings/pmcvg/1999/0271/0", "title": "Photometric Modeling for Computer Vision and Graphics, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761511", "title": "Shape from self-calibration and Fast Marching Method", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761511/12OmNBTs7rO", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2008/2357/0/04594691", "title": "Analysis of planar mirror catadioptric stereo systems based on epipolar geometry", "doi": null, "abstractUrl": "/proceedings-article/cit/2008/04594691/12OmNCwCLsz", "parentPublication": { "id": "proceedings/cit/2008/2357/0", "title": "2008 8th IEEE International Conference on Computer and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1989/1952/0/00037826", "title": "A theory of photometric stereo for a general class of reflectance maps", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1989/00037826/12OmNqJHFLT", "parentPublication": { "id": "proceedings/cvpr/1989/1952/0", "title": "1989 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1990/2057/0/00139546", "title": "Simultaneous estimation of shape and reflectance maps from photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/iccv/1990/00139546/12OmNrYlmHl", "parentPublication": { "id": "proceedings/iccv/1990/2057/0", "title": "Proceedings Third International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223149", "title": "Extracting the shape and roughness of specular lobe objects using four light photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223149/12OmNwoxSc1", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761181", "title": "π-SIFT: A photometric and Scale Invariant Feature Transform", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761181/12OmNy3AgoI", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/1992/2855/0/00223231", "title": "Active photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/1992/00223231/12OmNyLiuyj", "parentPublication": { "id": "proceedings/cvpr/1992/2855/0", "title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pbmcv/1995/7021/0/00514674", "title": "Am Illumination Planner for Convex and Concave Lambertian Polyhedral Objects", "doi": null, "abstractUrl": "/proceedings-article/pbmcv/1995/00514674/12OmNyTwRbr", "parentPublication": { "id": "proceedings/pbmcv/1995/7021/0", "title": "Proceedings of the Workshop on Physics-Based Modeling in Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2009/4420/0/05459381", "title": "Attached shadow coding: Estimating surface normals from shadows under unknown reflectance and lighting conditions", "doi": null, "abstractUrl": "/proceedings-article/iccv/2009/05459381/12OmNylsZWO", "parentPublication": { "id": "proceedings/iccv/2009/4420/0", "title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1hQqfuoOyHu", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1hVlAZv5zfG", "doi": "10.1109/ICCV.2019.00114", "title": "A Differential Volumetric Approach to Multi-View Photometric Stereo", "normalizedTitle": "A Differential Volumetric Approach to Multi-View Photometric Stereo", "abstract": "Highly accurate 3D volumetric reconstruction is still an open research topic where the main difficulty is usually related to merging some rough estimations with high frequency details. One of the most promising methods is the fusion between multi-view stereo and photometric stereo images. Beside the intrinsic difficulties that multi-view stereo and photometric stereo in order to work reliably, supplementary problems arise when considered together. In this work, we present a volumetric approach to the multi-view photometric stereo problem. The key point of our method is the signed distance field parameterisation and its relation to the surface normal. This is exploited in order to obtain a linear partial differential equation which is solved in a variational framework, that combines multiple images from multiple points of view in a single system. In addition, the volumetric approach is naturally implemented on an octree, which allows for fast ray-tracing that reliably alleviates occlusions and cast shadows. Our approach is evaluated on synthetic and real data-sets and achieves state-of-the-art results.", "abstracts": [ { "abstractType": "Regular", "content": "Highly accurate 3D volumetric reconstruction is still an open research topic where the main difficulty is usually related to merging some rough estimations with high frequency details. One of the most promising methods is the fusion between multi-view stereo and photometric stereo images. Beside the intrinsic difficulties that multi-view stereo and photometric stereo in order to work reliably, supplementary problems arise when considered together. In this work, we present a volumetric approach to the multi-view photometric stereo problem. The key point of our method is the signed distance field parameterisation and its relation to the surface normal. This is exploited in order to obtain a linear partial differential equation which is solved in a variational framework, that combines multiple images from multiple points of view in a single system. In addition, the volumetric approach is naturally implemented on an octree, which allows for fast ray-tracing that reliably alleviates occlusions and cast shadows. Our approach is evaluated on synthetic and real data-sets and achieves state-of-the-art results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Highly accurate 3D volumetric reconstruction is still an open research topic where the main difficulty is usually related to merging some rough estimations with high frequency details. One of the most promising methods is the fusion between multi-view stereo and photometric stereo images. Beside the intrinsic difficulties that multi-view stereo and photometric stereo in order to work reliably, supplementary problems arise when considered together. In this work, we present a volumetric approach to the multi-view photometric stereo problem. The key point of our method is the signed distance field parameterisation and its relation to the surface normal. This is exploited in order to obtain a linear partial differential equation which is solved in a variational framework, that combines multiple images from multiple points of view in a single system. In addition, the volumetric approach is naturally implemented on an octree, which allows for fast ray-tracing that reliably alleviates occlusions and cast shadows. Our approach is evaluated on synthetic and real data-sets and achieves state-of-the-art results.", "fno": "480300b052", "keywords": [ "Image Reconstruction", "Octrees", "Partial Differential Equations", "Ray Tracing", "Stereo Image Processing", "Photometric Stereo Images", "Multiview Photometric Stereo Problem", "Linear Partial Differential Equation", "Differential Volumetric Approach", "3 D Volumetric Reconstruction", "Ray Tracing", "Octrees", "Three Dimensional Displays", "Mathematical Model", "Image Reconstruction", "Light Sources", "Lighting", "Geometry", "Surface Reconstruction" ], "authors": [ { "affiliation": "cambridge university", "fullName": "Fotios Logothetis", "givenName": "Fotios", "surname": "Logothetis", "__typename": "ArticleAuthorType" }, { "affiliation": "Toshiba Research Europe Ltd", "fullName": "Roberto Mecca", "givenName": "Roberto", "surname": "Mecca", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Cambridge", "fullName": "Roberto Cipolla", "givenName": "Roberto", "surname": "Cipolla", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "1052-1061", "year": "2019", "issn": null, "isbn": "978-1-7281-4803-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "480300b042", "articleId": "1hQqmZxGLQc", "__typename": "AdjacentArticleType" }, "next": { "fno": "480300b062", "articleId": "1hVlRpT15wA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2016/0641/0/07477643", "title": "Unifying diffuse and specular reflections for the photometric stereo problem", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477643/12OmNAsBFHt", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457e521", "title": "Semi-Calibrated Near Field Photometric Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457e521/12OmNB0X8uV", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a028", "title": "x-Hour Outdoor Photometric Stereo", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a028/12OmNCesr5K", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2013/6463/0/06528306", "title": "Outdoor photometric stereo", "doi": null, "abstractUrl": "/proceedings-article/iccp/2013/06528306/12OmNrNh0Dh", "parentPublication": { "id": "proceedings/iccp/2013/6463/0", "title": "2013 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a259", "title": "Multi-view Photometric Stereo by Example", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a259/12OmNvqW6TF", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d478", "title": "Photometric Stereo with Small Angular Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d478/12OmNwE9OqB", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a115", "title": "Close-Range Photometric Stereo with Point Light Sources", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a115/12OmNx3ZjoX", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2015/6683/0/6683a302", "title": "Photometric Stereo in the Wild", "doi": null, "abstractUrl": "/proceedings-article/wacv/2015/6683a302/12OmNznkK53", "parentPublication": { "id": "proceedings/wacv/2015/6683/0", "title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122941", "title": "On Differential Photometric Reconstruction for Unknown, Isotropic BRDFs", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122941/13rRUygT7oc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a481", "title": "Single Image Multi-Spectral Photometric Stereo Using a Split U-Shaped CNN", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a481/1iTveLKPju8", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy2agR3", "title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing", "acronym": "colcom", "groupId": "1001767", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNASILUZ", "doi": "10.1109/COLCOM.2007.4553882", "title": "Keynote speaker", "normalizedTitle": "Keynote speaker", "abstract": "Social computing has emerged as a broad area of research in HCI and CSCW, encompassing systems that mediate social information across collectivities such as teams, communities, organizations, cohorts, populations, and markets. Such systems are likely to support and make visible social attributes such as identity, reputation, trust, accountability, presence, social roles, expertise, knowledge, and ownership. Social computing is transforming organizations and societies by creating a pervasive technical infrastructure that includes people, organizations, their relationships and activities as fundamental system components, enabling identity, behavior, social relationships, and experience to be used as resources. In this talk, I argue for a broad definition of social computing, selectively review emerging applications, and discuss current research within and beyond IBM that is driving and is driven by the emerging vision of social computing.", "abstracts": [ { "abstractType": "Regular", "content": "Social computing has emerged as a broad area of research in HCI and CSCW, encompassing systems that mediate social information across collectivities such as teams, communities, organizations, cohorts, populations, and markets. Such systems are likely to support and make visible social attributes such as identity, reputation, trust, accountability, presence, social roles, expertise, knowledge, and ownership. Social computing is transforming organizations and societies by creating a pervasive technical infrastructure that includes people, organizations, their relationships and activities as fundamental system components, enabling identity, behavior, social relationships, and experience to be used as resources. In this talk, I argue for a broad definition of social computing, selectively review emerging applications, and discuss current research within and beyond IBM that is driving and is driven by the emerging vision of social computing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Social computing has emerged as a broad area of research in HCI and CSCW, encompassing systems that mediate social information across collectivities such as teams, communities, organizations, cohorts, populations, and markets. Such systems are likely to support and make visible social attributes such as identity, reputation, trust, accountability, presence, social roles, expertise, knowledge, and ownership. Social computing is transforming organizations and societies by creating a pervasive technical infrastructure that includes people, organizations, their relationships and activities as fundamental system components, enabling identity, behavior, social relationships, and experience to be used as resources. In this talk, I argue for a broad definition of social computing, selectively review emerging applications, and discuss current research within and beyond IBM that is driving and is driven by the emerging vision of social computing.", "fno": "04553882", "keywords": [], "authors": [ { "affiliation": "IBM T.J. Watson Research Center, USA", "fullName": "Wendy Kellogg", "givenName": "Wendy", "surname": "Kellogg", "__typename": "ArticleAuthorType" } ], "idPrefix": "colcom", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-11-01T00:00:00", "pubType": "proceedings", "pages": "505", "year": "2007", "issn": null, "isbn": "978-1-4244-1318-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04553881", "articleId": "12OmNzuZUqn", "__typename": "AdjacentArticleType" }, "next": { "fno": "04553883", "articleId": "12OmNxETapj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/mdm/2010/4048/0/05489768", "title": "Keynote Speaker Abstracts", "doi": null, "abstractUrl": "/proceedings-article/mdm/2010/05489768/12OmNApu5k3", "parentPublication": { "id": "proceedings/mdm/2010/4048/0", "title": "2010 Eleventh International Conference on Mobile Data Management", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mse/2011/0548/0/05937072", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/mse/2011/05937072/12OmNBQkx3o", "parentPublication": { "id": "proceedings/mse/2011/0548/0", "title": "2011 IEEE International Conference on Microelectronic Systems Education (MSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/malware/2015/0317/0/07413676", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/malware/2015/07413676/12OmNCeK2j4", "parentPublication": { "id": "proceedings/malware/2015/0317/0", "title": "2015 10th International Conference on Malicious and Unwanted Software (MALWARE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscst/2005/2387/0/01553274", "title": "Research and emerging trends in social computing", "doi": null, "abstractUrl": "/proceedings-article/iscst/2005/01553274/12OmNwGZNFu", "parentPublication": { "id": "proceedings/iscst/2005/2387/0", "title": "2005 International Symposium on Collaborative Technologies and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iit/2015/8509/0/07381494", "title": "Keynote speaker II: Smart cities and social media", "doi": null, "abstractUrl": "/proceedings-article/iit/2015/07381494/12OmNx6g6jp", "parentPublication": { "id": "proceedings/iit/2015/8509/0", "title": "2015 11th International Conference on Innovations in Information Technology (IIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/malware/2016/4542/0/07888720", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/malware/2016/07888720/12OmNyXMQkZ", "parentPublication": { "id": "proceedings/malware/2016/4542/0", "title": "2016 11th International Conference on Malicious and Unwanted Software (MALWARE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/colcom/2007/1318/0/04553881", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/colcom/2007/04553881/12OmNzuZUqn", "parentPublication": { "id": "proceedings/colcom/2007/1318/0", "title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icime/2018/7616/0/761600a118", "title": "Research on Emotional Mobilization and Legitimation Strategy of Welfare Crowdfunding for Poverty Alleviation", "doi": null, "abstractUrl": "/proceedings-article/icime/2018/761600a118/17D45WODaqd", "parentPublication": { "id": "proceedings/icime/2018/7616/0", "title": "2018 International Joint Conference on Information, Media and Engineering (ICIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/respect/2019/0821/0/08985961", "title": "Vlog Commentary YouTube Influencers as Effective Advisors in College and Career Readiness for Minorities in Computing: An Exploratory Study", "doi": null, "abstractUrl": "/proceedings-article/respect/2019/08985961/1hrJFuZ3CMg", "parentPublication": { "id": "proceedings/respect/2019/0821/0", "title": "2019 Research on Equity and Sustained Participation in Engineering, Computing, and Technology (RESPECT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-companion/2020/7122/0/712200a264", "title": "Factors influencing Software Engineering Career Choice of Andean Indigenous", "doi": null, "abstractUrl": "/proceedings-article/icse-companion/2020/712200a264/1pcSMp7InMk", "parentPublication": { "id": "proceedings/icse-companion/2020/7122/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz2TCuO", "title": "Virtual Reality Conference, IEEE", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNB9t6vQ", "doi": "10.1109/VR.2012.6180863", "title": "Keynote presentation: Taking the \"virtual\" out of virtual reality", "normalizedTitle": "Keynote presentation: Taking the \"virtual\" out of virtual reality", "abstract": "Today's graphics programs cannot only produce stunning photo-realistic images or convincingly real scene displays for interactive exploration, they can also produce physical output -- thanks to the emergence of several different layered manufacturing technologies. For many design activities creating tangible models through some rapid-prototyping prcess is a new and crucial feedback loop for debugging the functionality or customer-appeal of a new product. Dr. Séquin has two decades of experience with creating mathematical visualization models and designs ranging from university buildings to abstract geometrical sculptures. Turning these virtual creations into physical realities, however, raises a whole new set of issues that are often overlooked in the initial virtual design phase.", "abstracts": [ { "abstractType": "Regular", "content": "Today's graphics programs cannot only produce stunning photo-realistic images or convincingly real scene displays for interactive exploration, they can also produce physical output -- thanks to the emergence of several different layered manufacturing technologies. For many design activities creating tangible models through some rapid-prototyping prcess is a new and crucial feedback loop for debugging the functionality or customer-appeal of a new product. Dr. Séquin has two decades of experience with creating mathematical visualization models and designs ranging from university buildings to abstract geometrical sculptures. Turning these virtual creations into physical realities, however, raises a whole new set of issues that are often overlooked in the initial virtual design phase.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Today's graphics programs cannot only produce stunning photo-realistic images or convincingly real scene displays for interactive exploration, they can also produce physical output -- thanks to the emergence of several different layered manufacturing technologies. For many design activities creating tangible models through some rapid-prototyping prcess is a new and crucial feedback loop for debugging the functionality or customer-appeal of a new product. Dr. Séquin has two decades of experience with creating mathematical visualization models and designs ranging from university buildings to abstract geometrical sculptures. Turning these virtual creations into physical realities, however, raises a whole new set of issues that are often overlooked in the initial virtual design phase.", "fno": "06180863", "keywords": [], "authors": [ { "affiliation": "University of California, Berkeley", "fullName": "Carlo H. Sequin", "givenName": "Carlo H.", "surname": "Sequin", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2012-03-01T00:00:00", "pubType": "proceedings", "pages": "xviii", "year": "2012", "issn": null, "isbn": "978-1-4673-1247-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06180862", "articleId": "12OmNAsTgQi", "__typename": "AdjacentArticleType" }, "next": { "fno": "06180864", "articleId": "12OmNCf1DlJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNA0dMMP", "title": "2011 IEEE International Conference on Microelectronic Systems Education (MSE)", "acronym": "mse", "groupId": "1000441", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNBQkx3o", "doi": "10.1109/MSE.2011.5937072", "title": "Keynote Speaker", "normalizedTitle": "Keynote Speaker", "abstract": "Mobile devices such as laptops, netbooks, tablets, smart phones and game consoles have become our de facto interface to the vast amount of information delivery and processing capabilities of the cloud. The move to mobility has been enabled by the dual forces of ubiquitous wireless connectivity combined with the increasing energy efficiency offered by Moore's law. Yet, a major component of the mobile remains largely untapped: the capability to interact with the world immediately around us. A third layer of information acquisition and processing devices -- commonly called the sensory swarm -- is emerging, enabled by even more pervasive wireless networking and the introduction of novel ultra-low power technologies. This gives rise to the true emergence of concepts such as cyber-physical and bio-cyber systems, immersive computing, and augmented reality. The functionality of the swarm arises from connections of devices, leading to a convergence between Moore's and Metcalfe's laws, in which scaling refers not any longer to the number of transistors per chip, but rather to the number of interconnected devices. Enabling this fascinating paradigm -- which represents true wireless ubiquity -- still requires major breakthroughs on a number of fronts. Equally important is the question of how to educate and train students with the broad background necessary to cope with the complexity of these emerging systems. This presentation will present some of perspectives on how this may be accomplished.", "abstracts": [ { "abstractType": "Regular", "content": "Mobile devices such as laptops, netbooks, tablets, smart phones and game consoles have become our de facto interface to the vast amount of information delivery and processing capabilities of the cloud. The move to mobility has been enabled by the dual forces of ubiquitous wireless connectivity combined with the increasing energy efficiency offered by Moore's law. Yet, a major component of the mobile remains largely untapped: the capability to interact with the world immediately around us. A third layer of information acquisition and processing devices -- commonly called the sensory swarm -- is emerging, enabled by even more pervasive wireless networking and the introduction of novel ultra-low power technologies. This gives rise to the true emergence of concepts such as cyber-physical and bio-cyber systems, immersive computing, and augmented reality. The functionality of the swarm arises from connections of devices, leading to a convergence between Moore's and Metcalfe's laws, in which scaling refers not any longer to the number of transistors per chip, but rather to the number of interconnected devices. Enabling this fascinating paradigm -- which represents true wireless ubiquity -- still requires major breakthroughs on a number of fronts. Equally important is the question of how to educate and train students with the broad background necessary to cope with the complexity of these emerging systems. This presentation will present some of perspectives on how this may be accomplished.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Mobile devices such as laptops, netbooks, tablets, smart phones and game consoles have become our de facto interface to the vast amount of information delivery and processing capabilities of the cloud. The move to mobility has been enabled by the dual forces of ubiquitous wireless connectivity combined with the increasing energy efficiency offered by Moore's law. Yet, a major component of the mobile remains largely untapped: the capability to interact with the world immediately around us. A third layer of information acquisition and processing devices -- commonly called the sensory swarm -- is emerging, enabled by even more pervasive wireless networking and the introduction of novel ultra-low power technologies. This gives rise to the true emergence of concepts such as cyber-physical and bio-cyber systems, immersive computing, and augmented reality. The functionality of the swarm arises from connections of devices, leading to a convergence between Moore's and Metcalfe's laws, in which scaling refers not any longer to the number of transistors per chip, but rather to the number of interconnected devices. Enabling this fascinating paradigm -- which represents true wireless ubiquity -- still requires major breakthroughs on a number of fronts. Equally important is the question of how to educate and train students with the broad background necessary to cope with the complexity of these emerging systems. This presentation will present some of perspectives on how this may be accomplished.", "fno": "05937072", "keywords": [], "authors": [ { "affiliation": "University of California at Berkeley", "fullName": "Jan Rabaey", "givenName": "Jan", "surname": "Rabaey", "__typename": "ArticleAuthorType" } ], "idPrefix": "mse", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2011-06-01T00:00:00", "pubType": "proceedings", "pages": "1-4", "year": "2011", "issn": null, "isbn": "978-1-4577-0548-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05937071", "articleId": "12OmNs0TKHb", "__typename": "AdjacentArticleType" }, "next": { "fno": "05937073", "articleId": "12OmNC3FGnG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAkWvHe", "title": "Proceedings Sixth IEEE International Symposium on High Assurance Systems Engineering. Special Topic: Impact of Networking", "acronym": "hase", "groupId": "1000319", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNCd2rF1", "doi": "10.1109/HASE.2001.10003", "title": "Keynote Speaker", "normalizedTitle": "Keynote Speaker", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "fno": "12750004", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Arnold W. Pittler", "givenName": "Arnold W.", "surname": "Pittler", "__typename": "ArticleAuthorType" } ], "idPrefix": "hase", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2001-10-01T00:00:00", "pubType": "proceedings", "pages": "4", "year": "2001", "issn": "1530-2059", "isbn": "0-7695-1275-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "12750003", "articleId": "12OmNxXUhO6", "__typename": "AdjacentArticleType" }, "next": { "fno": "12750005", "articleId": "12OmNARiM4C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAQJzKb", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "acronym": "pacificvis", "groupId": "1001657", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNvlPkAl", "doi": "10.1109/PACIFICVIS.2015.7156348", "title": "Keynote speaker", "normalizedTitle": "Keynote speaker", "abstract": "Provides an abstract of the keynote presentation and a brief professional biography of the presenter. The complete presentation was not made available for publication as part of the conference proceedings.", "abstracts": [ { "abstractType": "Regular", "content": "Provides an abstract of the keynote presentation and a brief professional biography of the presenter. The complete presentation was not made available for publication as part of the conference proceedings.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Provides an abstract of the keynote presentation and a brief professional biography of the presenter. The complete presentation was not made available for publication as part of the conference proceedings.", "fno": "07156348", "keywords": [], "authors": [ { "affiliation": "University of Chinese Academy of Sciences, China", "fullName": "Fei-Yue Wang", "givenName": null, "surname": "Fei-Yue Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "pacificvis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2015-04-01T00:00:00", "pubType": "proceedings", "pages": "xiii-xiii", "year": "2015", "issn": null, "isbn": "978-1-4673-6879-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07156347", "articleId": "12OmNAqkSF1", "__typename": "AdjacentArticleType" }, "next": { "fno": "07156349", "articleId": "12OmNznkJUv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dac/1982/020/0/01585468", "title": "Conference Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/dac/1982/01585468/12OmNBA9oyO", "parentPublication": { "id": "proceedings/dac/1982/020/0", "title": "19th Design Automation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dac/1983/0026/0/01585612", "title": "Conference Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/dac/1983/01585612/12OmNxUdv5H", "parentPublication": { "id": "proceedings/dac/1983/0026/0", "title": "Design Automation Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dac/1984/0542/0/01585757", "title": "Conference Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/dac/1984/01585757/12OmNzIl3C5", "parentPublication": { "id": "proceedings/dac/1984/0542/0", "title": "21st Design Automation Conference Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2006/2669/0/04031993", "title": "Keynote Speaker 1: Web Services at Amazon.com", "doi": null, "abstractUrl": "/proceedings-article/icws/2006/04031993/12OmNzdoMoU", "parentPublication": { "id": "proceedings/icws/2006/2669/0", "title": "2006 IEEE International Conference on Web Services (ICWS'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700z025", "title": "IEEE VR 2022 Keynote Speaker: Aliesha Staples", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700z025/1CJbTLn1Uuk", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gi/2019/2268/0/226800z013", "title": "GI 2019 Invited Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/gi/2019/226800z013/1DICzLycFjy", "parentPublication": { "id": "proceedings/gi/2019/2268/0", "title": "2019 IEEE/ACM International Workshop on Genetic Improvement (GI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2019/3510/0/555500a070", "title": "RAW 2019 Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2019/555500a070/1c2bBpRFZi8", "parentPublication": { "id": "proceedings/ipdpsw/2019/3510/0", "title": "2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2019/3510/0/555500a876", "title": "PAISE 2019 Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2019/555500a876/1c2bDp6I1IQ", "parentPublication": { "id": "proceedings/ipdpsw/2019/3510/0", "title": "2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2019/3510/0/555500a165", "title": "HiCOMB 2019 Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2019/555500a165/1c2bEYQGlEc", "parentPublication": { "id": "proceedings/ipdpsw/2019/3510/0", "title": "2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2019/3510/0/555500a395", "title": "HPBDC 2019 Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2019/555500a395/1c2bF2GycEg", "parentPublication": { "id": "proceedings/ipdpsw/2019/3510/0", "title": "2019 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBOllgg", "title": "Testing: Academic and Industrial Conference Practice and Research Techniques - MUTATION", "acronym": "taicpart-mutation", "groupId": "1001353", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNyOHFZr", "doi": "10.1109/TAIC.PART.2007.50", "title": "Keynote Speaker", "normalizedTitle": "Keynote Speaker", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "fno": "2984xxii", "keywords": [], "authors": [], "idPrefix": "taicpart-mutation", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2007-10-01T00:00:00", "pubType": "proceedings", "pages": "xxii", "year": "2007", "issn": null, "isbn": "0-7695-2984-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "2984xxi", "articleId": "12OmNwDACAw", "__typename": "AdjacentArticleType" }, "next": { "fno": "29840141", "articleId": "12OmNwvVrIa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy2agR3", "title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing", "acronym": "colcom", "groupId": "1001767", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuZUqn", "doi": "10.1109/COLCOM.2007.4553881", "title": "Keynote speaker", "normalizedTitle": "Keynote speaker", "abstract": "Software to support collaboration is an extraordinary challenge to research, develop, and use because it is positioned between an immovable object and an irresistible force. The immovable object is human nature, which includes social behaviors that evolved over the millions of years our ancestors lived in groups. Advances in semi-conductor technology — the irresistible force — have followed an unparalleled exponential curve that makes forecasting difficult or impossible. I will describe several perspectives on this dilemma that have come to seem particularly useful or important to me in the two decades since I began working in this area as a system and application developer.", "abstracts": [ { "abstractType": "Regular", "content": "Software to support collaboration is an extraordinary challenge to research, develop, and use because it is positioned between an immovable object and an irresistible force. The immovable object is human nature, which includes social behaviors that evolved over the millions of years our ancestors lived in groups. Advances in semi-conductor technology — the irresistible force — have followed an unparalleled exponential curve that makes forecasting difficult or impossible. I will describe several perspectives on this dilemma that have come to seem particularly useful or important to me in the two decades since I began working in this area as a system and application developer.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Software to support collaboration is an extraordinary challenge to research, develop, and use because it is positioned between an immovable object and an irresistible force. The immovable object is human nature, which includes social behaviors that evolved over the millions of years our ancestors lived in groups. Advances in semi-conductor technology — the irresistible force — have followed an unparalleled exponential curve that makes forecasting difficult or impossible. I will describe several perspectives on this dilemma that have come to seem particularly useful or important to me in the two decades since I began working in this area as a system and application developer.", "fno": "04553881", "keywords": [], "authors": [ { "affiliation": "Microsoft Research Redmond, Washington, USA", "fullName": "Jonathan Grudin", "givenName": "Jonathan", "surname": "Grudin", "__typename": "ArticleAuthorType" } ], "idPrefix": "colcom", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-11-01T00:00:00", "pubType": "proceedings", "pages": "504", "year": "2007", "issn": null, "isbn": "978-1-4244-1318-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04553880", "articleId": "12OmNAXxXgm", "__typename": "AdjacentArticleType" }, "next": { "fno": "04553882", "articleId": "12OmNASILUZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/colcom/2007/1318/0/04553882", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/colcom/2007/04553882/12OmNASILUZ", "parentPublication": { "id": "proceedings/colcom/2007/1318/0", "title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156347", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156347/12OmNAqkSF1", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mse/2011/0548/0/05937072", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/mse/2011/05937072/12OmNBQkx3o", "parentPublication": { "id": "proceedings/mse/2011/0548/0", "title": "2011 IEEE International Conference on Microelectronic Systems Education (MSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hase/2001/1275/0/12750004", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/hase/2001/12750004/12OmNCd2rF1", "parentPublication": { "id": "proceedings/hase/2001/1275/0", "title": "Proceedings Sixth IEEE International Symposium on High Assurance Systems Engineering. Special Topic: Impact of Networking", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/malware/2015/0317/0/07413676", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/malware/2015/07413676/12OmNCeK2j4", "parentPublication": { "id": "proceedings/malware/2015/0317/0", "title": "2015 10th International Conference on Malicious and Unwanted Software (MALWARE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156348", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156348/12OmNvlPkAl", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/biomedvis/1995/7198/0/7198ix", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/biomedvis/1995/7198ix/12OmNx7XH81", "parentPublication": { "id": "proceedings/biomedvis/1995/7198/0", "title": "Biomedical Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/taicpart-mutation/2007/2984/0/2984xxii", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/taicpart-mutation/2007/2984xxii/12OmNyOHFZr", "parentPublication": { "id": "proceedings/taicpart-mutation/2007/2984/0", "title": "Testing: Academic and Industrial Conference Practice and Research Techniques - MUTATION", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400z036", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400z036/1MrFQvu4EFi", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MNgk3BHlS0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1MNgtJP55y8", "doi": "10.1109/VR55154.2023.00011", "title": "Keynote Speaker: Digital Humans in Virtual Environment", "normalizedTitle": "Keynote Speaker: Digital Humans in Virtual Environment", "abstract": "In this talk, I will discuss recent technologies for creating digital humans to populate the virtual environment and their applications in video communication and collaboration. I will cover both photo-realistic humans (&#x201C;digital doubles&#x201D;) and 3D digital avatars and will share my thoughts about the coming content generation revolution brought by large GPT models (generative pretrained).", "abstracts": [ { "abstractType": "Regular", "content": "In this talk, I will discuss recent technologies for creating digital humans to populate the virtual environment and their applications in video communication and collaboration. I will cover both photo-realistic humans (&#x201C;digital doubles&#x201D;) and 3D digital avatars and will share my thoughts about the coming content generation revolution brought by large GPT models (generative pretrained).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this talk, I will discuss recent technologies for creating digital humans to populate the virtual environment and their applications in video communication and collaboration. I will cover both photo-realistic humans (“digital doubles”) and 3D digital avatars and will share my thoughts about the coming content generation revolution brought by large GPT models (generative pretrained).", "fno": "481500z025", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Baining Guo", "givenName": "Baining", "surname": "Guo", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2023-03-01T00:00:00", "pubType": "proceedings", "pages": "xv-xv", "year": "2023", "issn": null, "isbn": "979-8-3503-4815-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "481500z022", "articleId": "1MNgFjnBUZO", "__typename": "AdjacentArticleType" }, "next": { "fno": "481500z026", "articleId": "1MNgGmUozJe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2015/6879/0/07156347", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156347/12OmNAqkSF1", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/malware/2015/0317/0/07413676", "title": "Keynote Speaker", "doi": null, "abstractUrl": "/proceedings-article/malware/2015/07413676/12OmNCeK2j4", "parentPublication": { "id": "proceedings/malware/2015/0317/0", "title": "2015 10th International Conference on Malicious and Unwanted Software (MALWARE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/215820406", "title": "Tracking Multiple Humans in Crowded Environment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/215820406/12OmNwe2IsI", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/malware/2016/4542/0/07888720", "title": "Keynote speaker", "doi": null, "abstractUrl": "/proceedings-article/malware/2016/07888720/12OmNyXMQkZ", "parentPublication": { "id": "proceedings/malware/2016/4542/0", "title": "2016 11th International Conference on Malicious and Unwanted Software (MALWARE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/2/01315192", "title": "Tracking multiple humans in crowded environment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315192/12OmNzBOifB", "parentPublication": { "id": "proceedings/cvpr/2004/2158/2", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2016/1451/0/07465242", "title": "Keynote speaker: Visualization analysis and design", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2016/07465242/12OmNzkMlOj", "parentPublication": { "id": "proceedings/pacificvis/2016/1451/0", "title": "2016 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mse/2015/9913/0/07159999", "title": "Keynote speaker moving digital system design courses into the modern era, again", "doi": null, "abstractUrl": "/proceedings-article/mse/2015/07159999/12OmNzlD96i", "parentPublication": { "id": "proceedings/mse/2015/9913/0", "title": "2015 IEEE International Conference on Microelectronics Systems Education (MSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a233", "title": "Exploring Empathy with Digital Humans", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a233/1CJefJti16E", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2020/04/09117208", "title": "Do Virtual Humans Dream of Digital Sheep?", "doi": null, "abstractUrl": "/magazine/cg/2020/04/09117208/1kGgp8IKFEs", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2021/0424/0/09431045", "title": "Keynote: RoboTrust: Trustworthy interaction between humans and anthropomorphic service robots", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2021/09431045/1tROVnaOrxC", "parentPublication": { "id": "proceedings/percom-workshops/2021/0424/0", "title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MrFLC1Kdb2", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "acronym": "ictai", "groupId": "10097829", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1MrFQvu4EFi", "doi": "10.1109/ICTAI56018.2022.00008", "title": "Keynote Speaker", "normalizedTitle": "Keynote Speaker", "abstract": null, "abstracts": [], "normalizedAbstract": null, "fno": "974400z036", "keywords": [], "authors": [], "idPrefix": "ictai", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "xxxvi-xl", "year": "2022", "issn": null, "isbn": "979-8-3503-9744-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "974400z033", "articleId": "1MrFUyxZBBe", "__typename": "AdjacentArticleType" }, "next": { "fno": "974400a001", "articleId": "1MrG1xNgtws", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyNQSGO", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2007", "__typename": "ProceedingType" }, "article": { "id": "12OmNAXxWVj", "doi": "10.1109/CVPR.2007.382943", "title": "Capstone Talk", "normalizedTitle": "Capstone Talk", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "fno": "04269968", "keywords": [], "authors": [ { "affiliation": "University of Kentucky", "fullName": "Christopher Jaynes", "givenName": "Christopher", "surname": "Jaynes", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2007-06-01T00:00:00", "pubType": "proceedings", "pages": "12", "year": "2007", "issn": null, "isbn": "1-4244-1179-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04269967", "articleId": "12OmNxveNLu", "__typename": "AdjacentArticleType" }, "next": { "fno": "04269970", "articleId": "12OmNvAiStK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vast/2015/9783/0/07347623", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/1998/9093/0/9093xii", "title": "Capstone Address", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/1998/9093xii/12OmNBhZ4rc", "parentPublication": { "id": "proceedings/ieee-infovis/1998/9093/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04269990", "title": "Capstone Talk: Ultra-resolution Display and the Next Revolution in Computing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04269990/12OmNvjQ93j", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2016/5661/0/07883505", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04269967", "title": "Keynote Talk", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04269967/12OmNxveNLu", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2008/2637/0/04911601", "title": "Walk the Walk, Talk the Talk", "doi": null, "abstractUrl": "/proceedings-article/iswc/2008/04911601/12OmNyY4riw", "parentPublication": { "id": "proceedings/iswc/2008/2637/0", "title": "2008 12th IEEE International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/04/ttg2012040xii", "title": "Capstone Speaker", "doi": null, "abstractUrl": "/journal/tg/2012/04/ttg2012040xii/13rRUIM2VBE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg20100600xxv", "title": "VisWeek Capstone Address", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg20100600xxv/13rRUx0gefi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg201112xxiv", "title": "VisWeek Capstone Address", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg201112xxiv/13rRUynHuj6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvpNIpw", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "acronym": "vast", "groupId": "1001630", "volume": "0", "displayVolume": "0", "year": "2015", "__typename": "ProceedingType" }, "article": { "id": "12OmNBSSVi0", "doi": "10.1109/VAST.2015.7347623", "title": "VIS capstone address", "normalizedTitle": "VIS capstone address", "abstract": "How do computer architectures and physical architectures inform each other? This talk will explore the interconnection of data and visualization through an architectural and computational lens over the last 50 years, including the work of Steven Coons, Christopher Alexander, Richard Saul Wurman and others.", "abstracts": [ { "abstractType": "Regular", "content": "How do computer architectures and physical architectures inform each other? This talk will explore the interconnection of data and visualization through an architectural and computational lens over the last 50 years, including the work of Steven Coons, Christopher Alexander, Richard Saul Wurman and others.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "How do computer architectures and physical architectures inform each other? This talk will explore the interconnection of data and visualization through an architectural and computational lens over the last 50 years, including the work of Steven Coons, Christopher Alexander, Richard Saul Wurman and others.", "fno": "07347623", "keywords": [], "authors": [ { "affiliation": "Carnegie Mellon, USA", "fullName": "Molly Wright Steenson", "givenName": "Molly Wright", "surname": "Steenson", "__typename": "ArticleAuthorType" } ], "idPrefix": "vast", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2015-10-01T00:00:00", "pubType": "proceedings", "pages": "1-1", "year": "2015", "issn": null, "isbn": "978-1-4673-9783-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07347622", "articleId": "12OmNzaQofT", "__typename": "AdjacentArticleType" }, "next": { "fno": "07347624", "articleId": "12OmNyen1xA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ieee-infovis/1998/9093/0/9093xii", "title": "Capstone Address", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/1998/9093xii/12OmNBhZ4rc", "parentPublication": { "id": "proceedings/ieee-infovis/1998/9093/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2015/9785/0/07429483", "title": "VIS capstone address: Architectures physical and digital", "doi": null, "abstractUrl": "/proceedings-article/scivis/2015/07429483/12OmNx6Piuq", "parentPublication": { "id": "proceedings/scivis/2015/9785/0", "title": "2015 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2016/5661/0/07883505", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892d757", "title": "Thriving Systems Theory: An Emergent Information Systems Design Theory", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892d757/12OmNym2bU5", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2015/9783/0/07347622", "title": "VIS keynote address", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347622/12OmNzaQofT", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2016/5661/0/07883504", "title": "VIS keynote address", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883504/12OmNzlUKKq", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/1997/01/s1053", "title": "Using Patterns to Improve Our Architectural Vision", "doi": null, "abstractUrl": "/magazine/so/1997/01/s1053/13rRUILtJjA", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/sp/2011/01/msp2011010091", "title": "Directions in Incident Detection and Response", "doi": null, "abstractUrl": "/magazine/sp/2011/01/msp2011010091/13rRUxBa5vt", "parentPublication": { "id": "mags/sp", "title": "IEEE Security & Privacy", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585625", "title": "VIS Capstone Address Data Humanism: The Revolution will be Visualized", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585625/17D45WXIkHy", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2019/2284/0/08986953", "title": "VIS Capstone Address: Visualizing Temporality and Chronologies for the Humanities", "doi": null, "abstractUrl": "/proceedings-article/vast/2019/08986953/1ifhkTytY9a", "parentPublication": { "id": "proceedings/vast/2019/2284/0", "title": "2019 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBEGYG2", "title": "Information Visualization, IEEE Symposium on", "acronym": "ieee-infovis", "groupId": "1000371", "volume": "0", "displayVolume": "0", "year": "1998", "__typename": "ProceedingType" }, "article": { "id": "12OmNBhZ4rc", "doi": "10.1109/INFOVIS.1998.10000", "title": "Capstone Address", "normalizedTitle": "Capstone Address", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "fno": "9093xii", "keywords": [], "authors": [], "idPrefix": "ieee-infovis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1998-10-01T00:00:00", "pubType": "proceedings", "pages": "xii", "year": "1998", "issn": "1522-404X", "isbn": "0-8186-9093-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "9093xi", "articleId": "12OmNzcxZ26", "__typename": "AdjacentArticleType" }, "next": { "fno": "9093xiii", "articleId": "12OmNAXxX9S", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vast/2015/9783/0/07347623", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2015/9785/0/07429483", "title": "VIS capstone address: Architectures physical and digital", "doi": null, "abstractUrl": "/proceedings-article/scivis/2015/07429483/12OmNx6Piuq", "parentPublication": { "id": "proceedings/scivis/2015/9785/0", "title": "2015 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2016/5661/0/07883505", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1997/8262/0/82620016", "title": "Capstone Address: Dissolving Descartes: Perception and the Construction of Reality", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1997/82620016/12OmNzCWFZW", "parentPublication": { "id": "proceedings/ieee-vis/1997/8262/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg20100600xxv", "title": "VisWeek Capstone Address", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg20100600xxv/13rRUx0gefi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg201112xxiv", "title": "VisWeek Capstone Address", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg201112xxiv/13rRUynHuj6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585625", "title": "VIS Capstone Address Data Humanism: The Revolution will be Visualized", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585625/17D45WXIkHy", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823613", "title": "SciVis 2018 Capstone Address", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823613/1d5kwNLsKhW", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2019/2284/0/08986953", "title": "VIS Capstone Address: Visualizing Temporality and Chronologies for the Humanities", "doi": null, "abstractUrl": "/proceedings-article/vast/2019/08986953/1ifhkTytY9a", "parentPublication": { "id": "proceedings/vast/2019/2284/0", "title": "2019 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrMHOdd", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "acronym": "vast", "groupId": "1001630", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNxFJXuy", "doi": "10.1109/VAST.2016.7883505", "title": "VIS capstone address", "normalizedTitle": "VIS capstone address", "abstract": "Useful as each of them can be, a large body of tips and tricks is impossible to remember, at least in a practical, usable way, unless it is structured into a balanced, meaningful hierarchy. This talk proposes and illustrates three simple yet solid ideas that lead to more effective communication and that underpin every other guideline: easy to remember, readily applicable, and always relevant—in short, valuable for the rest of your life.", "abstracts": [ { "abstractType": "Regular", "content": "Useful as each of them can be, a large body of tips and tricks is impossible to remember, at least in a practical, usable way, unless it is structured into a balanced, meaningful hierarchy. This talk proposes and illustrates three simple yet solid ideas that lead to more effective communication and that underpin every other guideline: easy to remember, readily applicable, and always relevant—in short, valuable for the rest of your life.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Useful as each of them can be, a large body of tips and tricks is impossible to remember, at least in a practical, usable way, unless it is structured into a balanced, meaningful hierarchy. This talk proposes and illustrates three simple yet solid ideas that lead to more effective communication and that underpin every other guideline: easy to remember, readily applicable, and always relevant—in short, valuable for the rest of your life.", "fno": "07883505", "keywords": [], "authors": [ { "affiliation": "Louvain, Belgium", "fullName": "Jean-luc Doumont", "givenName": "Jean-luc", "surname": "Doumont", "__typename": "ArticleAuthorType" } ], "idPrefix": "vast", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2016-10-01T00:00:00", "pubType": "proceedings", "pages": "xiii-xiii", "year": "2016", "issn": null, "isbn": "978-1-5090-5661-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07883504", "articleId": "12OmNzlUKKq", "__typename": "AdjacentArticleType" }, "next": { "fno": "07883506", "articleId": "12OmNzJbQTg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYXWAF", "title": "2016 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNzV70mm", "doi": "10.1109/VR.2016.7504680", "title": "Capstone speaker: Agents? Seriously", "normalizedTitle": "Capstone speaker: Agents? Seriously", "abstract": "Virtual agents, aka embodied conversational agents or virtual humans, are anthropomorphic animated characters that engage people in simulated face-to-face conversation. Applications of virtual agents have moved beyond entertainment and demos to real-world, serious interventions in health, training, education, and other domains. In this talk, I will focus on several conversational agents that have been developed as virtual health counselors and evaluated in large-scale randomized clinical trials. These interventions span cancer self-care, antipsychotic medication adherence, breastfeeding promotion, depression counseling, meditation and yoga guidance for treating chronic pain, preconception care promotion, exercise promotion for geriatrics patients, and more. I will discuss the technical challenges in building conversational agents for the real world, capable of interacting with thousands of patients for months or years, and the evidence base of their efficacy. I will also present a vision for how such agents can be effectively deployed in VR, and a range of promising serious applications that could be deployed using this technology.", "abstracts": [ { "abstractType": "Regular", "content": "Virtual agents, aka embodied conversational agents or virtual humans, are anthropomorphic animated characters that engage people in simulated face-to-face conversation. Applications of virtual agents have moved beyond entertainment and demos to real-world, serious interventions in health, training, education, and other domains. In this talk, I will focus on several conversational agents that have been developed as virtual health counselors and evaluated in large-scale randomized clinical trials. These interventions span cancer self-care, antipsychotic medication adherence, breastfeeding promotion, depression counseling, meditation and yoga guidance for treating chronic pain, preconception care promotion, exercise promotion for geriatrics patients, and more. I will discuss the technical challenges in building conversational agents for the real world, capable of interacting with thousands of patients for months or years, and the evidence base of their efficacy. I will also present a vision for how such agents can be effectively deployed in VR, and a range of promising serious applications that could be deployed using this technology.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Virtual agents, aka embodied conversational agents or virtual humans, are anthropomorphic animated characters that engage people in simulated face-to-face conversation. Applications of virtual agents have moved beyond entertainment and demos to real-world, serious interventions in health, training, education, and other domains. In this talk, I will focus on several conversational agents that have been developed as virtual health counselors and evaluated in large-scale randomized clinical trials. These interventions span cancer self-care, antipsychotic medication adherence, breastfeeding promotion, depression counseling, meditation and yoga guidance for treating chronic pain, preconception care promotion, exercise promotion for geriatrics patients, and more. I will discuss the technical challenges in building conversational agents for the real world, capable of interacting with thousands of patients for months or years, and the evidence base of their efficacy. I will also present a vision for how such agents can be effectively deployed in VR, and a range of promising serious applications that could be deployed using this technology.", "fno": "07504680", "keywords": [], "authors": [ { "affiliation": "College Of Computer And Information Science, Northeastern University", "fullName": "Timothy Bickmore", "givenName": "Timothy", "surname": "Bickmore", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "xviii-xviii", "year": "2016", "issn": "2375-5334", "isbn": "978-1-5090-0836-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07504679", "articleId": "12OmNC1GugS", "__typename": "AdjacentArticleType" }, "next": { "fno": "07504681", "articleId": "12OmNBkP3CY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1d5kwA5AULe", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "acronym": "scivis", "groupId": "1811924", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "1d5kwNLsKhW", "doi": "10.1109/SciVis.2018.8823613", "title": "SciVis 2018 Capstone Address", "normalizedTitle": "SciVis 2018 Capstone Address", "abstract": null, "abstracts": [], "normalizedAbstract": null, "fno": "08823613", "keywords": [], "authors": [], "idPrefix": "scivis", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "i-i", "year": "2018", "issn": null, "isbn": "978-1-5386-6882-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08823806", "articleId": "1d5ky31c78A", "__typename": "AdjacentArticleType" }, "next": { "fno": "08823624", "articleId": "1d5kypMe6Ji", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vast/2015/9783/0/07347623", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0", "parentPublication": { "id": "proceedings/vast/2015/9783/0", "title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/1998/9093/0/9093xii", "title": "Capstone Address", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/1998/9093xii/12OmNBhZ4rc", "parentPublication": { "id": "proceedings/ieee-infovis/1998/9093/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2016/5661/0/07883505", "title": "VIS capstone address", "doi": null, "abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy", "parentPublication": { "id": "proceedings/vast/2016/5661/0", "title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg20100600xxv", "title": "VisWeek Capstone Address", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg20100600xxv/13rRUx0gefi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/12/ttg201112xxiv", "title": "VisWeek Capstone Address", "doi": null, "abstractUrl": "/journal/tg/2011/12/ttg201112xxiv/13rRUynHuj6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823810", "title": "SciVis 2018 Preface", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823810/1d5kxpd5V9C", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823797", "title": "SciVis 2018 Short Paper Reviewers", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823797/1d5kyLKIH4Y", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823593", "title": "SciVis 2018 Committee", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823593/1d5kytrANUY", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823787", "title": "SciVis 2018 Committees", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823787/1d5kz31HeQo", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNz2TCuO", "title": "Virtual Reality Conference, IEEE", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNCf1DlJ", "doi": "10.1109/VR.2012.6180864", "title": "Banquet presentation: What's next?: The third wave in computer graphics and interactive techniques", "normalizedTitle": "Banquet presentation: What's next?: The third wave in computer graphics and interactive techniques", "abstract": "As a person involved in computer graphics since 1969, I have participated in the evolution of the field in a scientific laboratory, an automotive company, and an aerospace company. This set of experiences has provided me a perspective quite different from that of the arts and entertainment industries that drive the majority of current graphics advances. I've been lucky to have had the opportunity to work at all levels of graphics and user interface development, from computer animation to bit-level device drivers to user interface management systems to developing an integrated, full-scale CAD/CAM system. In addition, I've worked across all aspects of product development in aerospace and automotive. This has given me broad insight into product development-based interactive graphics. While my experience is predominantly in product development and manufacturing, I see clear parallels between the value of graphics in aerospace and automotive and value in other industries (e.g., animation, art, games, medicine, oil and gas, mapping). To develop the concept of the third wave, I'll examine some of the significant advances industry has made to computer graphics and directions I see needed because of issues with the current state-of-the-art. I believe that computer graphics, including virtual reality, is at an innovation plateau and ready for the next wave of innovation.", "abstracts": [ { "abstractType": "Regular", "content": "As a person involved in computer graphics since 1969, I have participated in the evolution of the field in a scientific laboratory, an automotive company, and an aerospace company. This set of experiences has provided me a perspective quite different from that of the arts and entertainment industries that drive the majority of current graphics advances. I've been lucky to have had the opportunity to work at all levels of graphics and user interface development, from computer animation to bit-level device drivers to user interface management systems to developing an integrated, full-scale CAD/CAM system. In addition, I've worked across all aspects of product development in aerospace and automotive. This has given me broad insight into product development-based interactive graphics. While my experience is predominantly in product development and manufacturing, I see clear parallels between the value of graphics in aerospace and automotive and value in other industries (e.g., animation, art, games, medicine, oil and gas, mapping). To develop the concept of the third wave, I'll examine some of the significant advances industry has made to computer graphics and directions I see needed because of issues with the current state-of-the-art. I believe that computer graphics, including virtual reality, is at an innovation plateau and ready for the next wave of innovation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As a person involved in computer graphics since 1969, I have participated in the evolution of the field in a scientific laboratory, an automotive company, and an aerospace company. This set of experiences has provided me a perspective quite different from that of the arts and entertainment industries that drive the majority of current graphics advances. I've been lucky to have had the opportunity to work at all levels of graphics and user interface development, from computer animation to bit-level device drivers to user interface management systems to developing an integrated, full-scale CAD/CAM system. In addition, I've worked across all aspects of product development in aerospace and automotive. This has given me broad insight into product development-based interactive graphics. While my experience is predominantly in product development and manufacturing, I see clear parallels between the value of graphics in aerospace and automotive and value in other industries (e.g., animation, art, games, medicine, oil and gas, mapping). To develop the concept of the third wave, I'll examine some of the significant advances industry has made to computer graphics and directions I see needed because of issues with the current state-of-the-art. I believe that computer graphics, including virtual reality, is at an innovation plateau and ready for the next wave of innovation.", "fno": "06180864", "keywords": [], "authors": [ { "affiliation": "Boeing", "fullName": "David J. Kasik", "givenName": "David J.", "surname": "Kasik", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2012-03-01T00:00:00", "pubType": "proceedings", "pages": "xix", "year": "2012", "issn": null, "isbn": "978-1-4673-1247-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06180863", "articleId": "12OmNB9t6vQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "06180865", "articleId": "12OmNyY4rxe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBTawvA", "title": "2016 UKSim-AMSS 18th International Conference on Computer Modelling and Simulation (UKSim)", "acronym": "uksim", "groupId": "1001885", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNyUWR0x", "doi": "10.1109/UKSim.2016.59", "title": "Keynote Speaker I: Computing for Big Science: Gravitational Wave Detection", "normalizedTitle": "Keynote Speaker I: Computing for Big Science: Gravitational Wave Detection", "abstract": "Summary form only given. Computing has been playing an important role in the recent LIGO Gravitational Waves (GWs) detection. A graphics processing unit (GPU)-accelerated algorithm developed by Tsinghua University to search for GWs will be introduced. The aim is to facilitate fast detection of GWs with a minimum delay to allow prompt electromagnetic follow-up observations. To maximize the GPU acceleration, an efficient batched parallel computing model significantly reduces the number of synchronizations and optimizes the usage of the memory and hardware resource. The code is tested on the CUDA `Fermi' architecture in a GTX 480 graphics card and its performance is compared with a single core of Intel Core i7 920 (2.67 GHz). A 58-fold speedup is achieved while giving results in close agreement with the CPU implementation. This result indicates that it is possible to conduct a full search for GWs from compact binary coalescence in real time with only one desktop computer equipped with a Fermi GPU card for the initial LIGO detectors which in the past required more than 100 CPUs.", "abstracts": [ { "abstractType": "Regular", "content": "Summary form only given. Computing has been playing an important role in the recent LIGO Gravitational Waves (GWs) detection. A graphics processing unit (GPU)-accelerated algorithm developed by Tsinghua University to search for GWs will be introduced. The aim is to facilitate fast detection of GWs with a minimum delay to allow prompt electromagnetic follow-up observations. To maximize the GPU acceleration, an efficient batched parallel computing model significantly reduces the number of synchronizations and optimizes the usage of the memory and hardware resource. The code is tested on the CUDA `Fermi' architecture in a GTX 480 graphics card and its performance is compared with a single core of Intel Core i7 920 (2.67 GHz). A 58-fold speedup is achieved while giving results in close agreement with the CPU implementation. This result indicates that it is possible to conduct a full search for GWs from compact binary coalescence in real time with only one desktop computer equipped with a Fermi GPU card for the initial LIGO detectors which in the past required more than 100 CPUs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Summary form only given. Computing has been playing an important role in the recent LIGO Gravitational Waves (GWs) detection. A graphics processing unit (GPU)-accelerated algorithm developed by Tsinghua University to search for GWs will be introduced. The aim is to facilitate fast detection of GWs with a minimum delay to allow prompt electromagnetic follow-up observations. To maximize the GPU acceleration, an efficient batched parallel computing model significantly reduces the number of synchronizations and optimizes the usage of the memory and hardware resource. The code is tested on the CUDA `Fermi' architecture in a GTX 480 graphics card and its performance is compared with a single core of Intel Core i7 920 (2.67 GHz). A 58-fold speedup is achieved while giving results in close agreement with the CPU implementation. This result indicates that it is possible to conduct a full search for GWs from compact binary coalescence in real time with only one desktop computer equipped with a Fermi GPU card for the initial LIGO detectors which in the past required more than 100 CPUs.", "fno": "07796676", "keywords": [ "Astronomy Computing", "Batch Processing Computers", "Graphics Processing Units", "Gravitational Wave Detectors", "Parallel Algorithms", "Parallel Architectures", "Performance Evaluation", "Resource Allocation", "Storage Management", "Big Science", "LIGO Gravitational Wave Detection", "LIGO GW Detection", "Graphics Processing Unit", "GPU Accelerated Algorithm", "Tsinghua University", "Electromagnetic Follow Up Observations", "Batched Parallel Computing Model", "Synchronizations", "Memory Usage Optimization", "Hardware Resource Optimization", "CUDA Fermi Architecture", "GTX 480 Graphics Card", "CPU Implementation", "Binary Coalescence", "Desktop Computer", "Fermi GPU Card", "Graphics Processing Units", "Computers", "Government", "Electronic Mail", "Algorithm Design And Analysis", "Delays", "Electromagnetics" ], "authors": [ { "affiliation": null, "fullName": "Frank Wang", "givenName": "Frank", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "uksim", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-04-01T00:00:00", "pubType": "proceedings", "pages": "3-3", "year": "2016", "issn": null, "isbn": "978-1-5090-0888-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07796675", "articleId": "12OmNxFsmAy", "__typename": "AdjacentArticleType" }, "next": { "fno": "07796677", "articleId": "12OmNBzAckb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icvrv/2012/4836/0/4836a050", "title": "GPU Based Compression and Rendering of Massive Aircraft CAD Models", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2012/4836a050/12OmNBaBuS8", "parentPublication": { "id": "proceedings/icvrv/2012/4836/0", "title": "2012 International Conference on Virtual Reality and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsn-w/2015/8044/0/8044a141", "title": "The Use of a Graphic Processing Unit (GPU) in a Real Time Visual Odometry Application", "doi": null, "abstractUrl": "/proceedings-article/dsn-w/2015/8044a141/12OmNy5R3EC", "parentPublication": { "id": "proceedings/dsn-w/2015/8044/0", "title": "2015 IEEE International Conference on Dependable Systems and Networks Workshops (DSN-W)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2016/1611/0/07822647", "title": "Constructing a GPU cluster platform based on multiple NVIDIA Jetson TK1", "doi": null, "abstractUrl": "/proceedings-article/bibm/2016/07822647/12OmNyRPgqx", "parentPublication": { "id": "proceedings/bibm/2016/1611/0", "title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vts/2018/3774/0/08368665", "title": "Multi-faceted microarchitecture level reliability characterization for NVIDIA and AMD GPUs", "doi": null, "abstractUrl": "/proceedings-article/vts/2018/08368665/12OmNz5JC7L", "parentPublication": { "id": "proceedings/vts/2018/3774/0", "title": "2018 IEEE 36th VLSI Test Symposium (VTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2012/4903/0/4903a896", "title": "Accelerating Volkov's Hybrid Implementation of Cholesky Factorization on a Fermi GPU", "doi": null, "abstractUrl": "/proceedings-article/icpads/2012/4903a896/12OmNzkuKKY", "parentPublication": { "id": "proceedings/icpads/2012/4903/0", "title": "Parallel and Distributed Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2017/01/07445236", "title": "Dissecting GPU Memory Hierarchy Through Microbenchmarking", "doi": null, "abstractUrl": "/journal/td/2017/01/07445236/13rRUytWF95", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2022/5444/0/544400b078", "title": "A GPU-Accelerated AMR Solver for Gravitational Wave Propagation", "doi": null, "abstractUrl": "/proceedings-article/sc/2022/544400b078/1I0bTbwZ73O", "parentPublication": { "id": "proceedings/sc/2022/5444/0/", "title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956104", "title": "Convolutional Transformer for Fast and Accurate Gravitational Wave Detection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956104/1IHpxo2DR28", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2022/5444/0/544400b078", "title": "A GPU-Accelerated AMR Solver for Gravitational Wave Propagation", "doi": null, "abstractUrl": "/proceedings-article/sc/2022/544400b078/1L07oR45AXK", "parentPublication": { "id": "proceedings/sc/2022/5444/0/", "title": "SC22: International Conference for High Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2020/7445/0/09150383", "title": "AsHES 2020 Keynote Speaker (5:30 pm CDT)", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2020/09150383/1lPGFOQuy4M", "parentPublication": { "id": "proceedings/ipdpsw/2020/7445/0", "title": "2020 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqJ8ta1", "title": "Fault-Tolerant Computing, International Symposium on", "acronym": "ftcs", "groupId": "1000287", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNz2kqef", "doi": "10.1109/FTCS.1999.10003", "title": "Banquet Speaker: Harrison Hagan Schmitt", "normalizedTitle": "Banquet Speaker: Harrison Hagan Schmitt", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "fno": "0213xvii", "keywords": [], "authors": [], "idPrefix": "ftcs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-06-01T00:00:00", "pubType": "proceedings", "pages": "xvii", "year": "1999", "issn": "0731-3071", "isbn": "0-7695-0213-X", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0213xvi", "articleId": "12OmNy2rRXP", "__typename": "AdjacentArticleType" }, "next": { "fno": "0213xix", "articleId": "12OmNzl3WPm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dsd/2016/2817/0/2817a372", "title": "Does Cascading Schmitt-Trigger Stages Improve the Metastable Behavior?", "doi": null, "abstractUrl": "/proceedings-article/dsd/2016/2817a372/12OmNButq0z", "parentPublication": { "id": "proceedings/dsd/2016/2817/0", "title": "2016 Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dft/2006/2706/0/27060327", "title": "Soft Error Masking Circuit and Latch Using Schmitt Trigger Circuit", "doi": null, "abstractUrl": "/proceedings-article/dft/2006/27060327/12OmNClQ0pl", "parentPublication": { "id": "proceedings/dft/2006/2706/0", "title": "2006 21st IEEE International Symposium On Defect and Fault Tolerance in VLSI Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/async/2016/9007/0/9007a057", "title": "The Metastable Behavior of a Schmitt-Trigger", "doi": null, "abstractUrl": "/proceedings-article/async/2016/9007a057/12OmNrHB1Tc", "parentPublication": { "id": "proceedings/async/2016/9007/0", "title": "2016 22nd IEEE International Symposium on Asynchronous Circuits and Systems (ASYNC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mmbia/1996/7367/0/73670171", "title": "Speaker:", "doi": null, "abstractUrl": "/proceedings-article/mmbia/1996/73670171/12OmNwoPtsz", "parentPublication": { "id": "proceedings/mmbia/1996/7367/0", "title": "Mathematical Methods in Biomedical Image Analysis, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a200", "title": "Neuron-MOS Based Schmitt Trigger with Controllable Hysteresis", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a200/12OmNyjccy5", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlsid/2010/3928/0/3928z037", "title": "Banquet Talk", "doi": null, "abstractUrl": "/proceedings-article/vlsid/2010/3928z037/12OmNzFdt6m", "parentPublication": { "id": "proceedings/vlsid/2010/3928/0", "title": "VLSI Design, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mwscas/2009/4479/0/05236141", "title": "Current regenerative Schmitt triggers with tunable hysteresis", "doi": null, "abstractUrl": "/proceedings-article/mwscas/2009/05236141/12OmNzdoMWc", "parentPublication": { "id": "proceedings/mwscas/2009/4479/0", "title": "Circuits and Systems, Midwest Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2012/07/05875918", "title": "Independently-controlled-gate FinFET Schmitt trigger sub-threshold SRAMs", "doi": null, "abstractUrl": "/journal/si/2012/07/05875918/13rRUNvya75", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/04/ttg201204xiii", "title": "Banquet Speaker: What's Next?: The Third Wave in Computer Graphics and Interactive Techniques", "doi": null, "abstractUrl": "/journal/tg/2012/04/ttg201204xiii/13rRUy2YLYt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/async/2019/4747/0/474700a124", "title": "Efficient Metastability Characterization for Schmitt-Triggers", "doi": null, "abstractUrl": "/proceedings-article/async/2019/474700a124/1dIKP9LGgDe", "parentPublication": { "id": "proceedings/async/2019/4747/0", "title": "2019 25th IEEE International Symposium on Asynchronous Circuits and Systems (ASYNC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrFTr6L", "title": "VLSI Design, International Conference on", "acronym": "vlsid", "groupId": "1000799", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNzFdt6m", "doi": "10.1109/VLSI.Design.2010.99", "title": "Banquet Talk", "normalizedTitle": "Banquet Talk", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "fno": "3928z037", "keywords": [], "authors": [], "idPrefix": "vlsid", "isOpenAccess": true, "showRecommendedArticles": false, "showBuyMe": false, "hasPdf": true, "pubDate": "2010-01-01T00:00:00", "pubType": "proceedings", "pages": "xxxvii-xxxviii", "year": "2010", "issn": "1063-9667", "isbn": "978-0-7695-3928-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3928z035", "articleId": "12OmNx3HI5V", "__typename": "AdjacentArticleType" }, "next": { "fno": "3928z039", "articleId": "12OmNrFkeWo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNrYlmQ2", "title": "Communication Software and Networks, International Conference on", "acronym": "iccsn", "groupId": "1002780", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNAoDi2Q", "doi": "10.1109/ICCSN.2009.122", "title": "Research on the Stability of Load Balancing Algorithm for Scalable Parallel Computing", "normalizedTitle": "Research on the Stability of Load Balancing Algorithm for Scalable Parallel Computing", "abstract": "In parallel cluster computing, an unscalable or unstable load balancing algorithm can intensely affect the performance of computing. To aim at this case, this paper puts forward a linear dynamic load balancing model and analyzes the stability of this linear model on the condition of existing time delay. Base on analyzing results, this paper uses a load balancing gain to control this model with the increasing system scale. In the end, a more useful nonlinear model is proposed and the simulation results are given to compare with analyzing results and other load balancing methods.", "abstracts": [ { "abstractType": "Regular", "content": "In parallel cluster computing, an unscalable or unstable load balancing algorithm can intensely affect the performance of computing. To aim at this case, this paper puts forward a linear dynamic load balancing model and analyzes the stability of this linear model on the condition of existing time delay. Base on analyzing results, this paper uses a load balancing gain to control this model with the increasing system scale. In the end, a more useful nonlinear model is proposed and the simulation results are given to compare with analyzing results and other load balancing methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In parallel cluster computing, an unscalable or unstable load balancing algorithm can intensely affect the performance of computing. To aim at this case, this paper puts forward a linear dynamic load balancing model and analyzes the stability of this linear model on the condition of existing time delay. Base on analyzing results, this paper uses a load balancing gain to control this model with the increasing system scale. In the end, a more useful nonlinear model is proposed and the simulation results are given to compare with analyzing results and other load balancing methods.", "fno": "3522a309", "keywords": [ "Scalability", "Stability", "Load Balancing Gain", "Time Delay", "Parallel Cluster Computing" ], "authors": [ { "affiliation": null, "fullName": "Qingyang Meng", "givenName": "Qingyang", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianzhong Qiao", "givenName": "Jianzhong", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Liu", "givenName": "Jun", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shukuan Lin", "givenName": "Shukuan", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccsn", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-02-01T00:00:00", "pubType": "proceedings", "pages": "309-312", "year": "2009", "issn": null, "isbn": "978-0-7695-3522-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3522a304", "articleId": "12OmNxGja7X", "__typename": "AdjacentArticleType" }, "next": { "fno": "3522a313", "articleId": "12OmNxw5B3l", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cso/2009/3605/1/3605a068", "title": "Adaptive Control of Stable Load Balancing Algorithm for Parallel Cluster Computing", "doi": null, "abstractUrl": "/proceedings-article/cso/2009/3605a068/12OmNC3FG4o", "parentPublication": { "id": "cso/2009/3605/1", "title": "2009 International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/apscc/2008/3473/0/3473a707", "title": "An Load Balancing Strategy of FTP Server-Cluster Based on JXTA", "doi": null, "abstractUrl": "/proceedings-article/apscc/2008/3473a707/12OmNrIaeks", "parentPublication": { "id": "proceedings/apscc/2008/3473/0", "title": "2008 IEEE Asia-Pacific Services Computing Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2009/3610/1/3610a253", "title": "The Effect of Execution-Time/Delay Ratio on Stability of Load Balancing Control", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2009/3610a253/12OmNs0TKMc", "parentPublication": { "id": "proceedings/nswctc/2009/3610/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgec/2010/4281/0/4281a321", "title": "Immune Genetic Algorithm-based Load Balancing in Web Cluster", "doi": null, "abstractUrl": "/proceedings-article/icgec/2010/4281a321/12OmNvnOwy3", "parentPublication": { "id": "proceedings/icgec/2010/4281/0", "title": "Genetic and Evolutionary Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waina/2013/4952/0/4952a649", "title": "TeraScaler ELB-an Algorithm of Prediction-Based Elastic Load Balancing Resource Management in Cloud Computing", "doi": null, "abstractUrl": "/proceedings-article/waina/2013/4952a649/12OmNwpoFLx", "parentPublication": { "id": "proceedings/waina/2013/4952/0", "title": "2013 27th International Conference on Advanced Information Networking and Applications Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2007/2875/4/28750809", "title": "A Load Balancing Strategy in Web Cluster System", "doi": null, "abstractUrl": "/proceedings-article/icnc/2007/28750809/12OmNx3q6U9", "parentPublication": { "id": "proceedings/icnc/2007/2875/4", "title": "Third International Conference on Natural Computation (ICNC 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1996/7352/0/73520412", "title": "Stability Analysis of a Load Balancing Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ssst/1996/73520412/12OmNyFCvRk", "parentPublication": { "id": "proceedings/ssst/1996/7352/0", "title": "Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dbta/2009/3604/0/3604a348", "title": "Load Balancing System Applied Research", "doi": null, "abstractUrl": "/proceedings-article/dbta/2009/3604a348/12OmNyRg4yv", "parentPublication": { "id": "proceedings/dbta/2009/3604/0", "title": "2009 First International Workshop on Database Technology and Applications, DBTA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsn/2010/3961/0/3961a449", "title": "A New Load Balancing Algorithm in Parallel Computing", "doi": null, "abstractUrl": "/proceedings-article/iccsn/2010/3961a449/12OmNyY4ruN", "parentPublication": { "id": "proceedings/iccsn/2010/3961/0", "title": "Communication Software and Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/1/3498a404", "title": "A Dynamic Load Balancing Method Based on Stability Analysis", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498a404/12OmNz5s0Mc", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzahckE", "title": "Dependable Computing, Latin-American Symposium on", "acronym": "ladc", "groupId": "1002875", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNAtK4r6", "doi": "10.1109/LADC.2011.25", "title": "Load Balancing for Internet Distributed Services Using Limited Redirection Rates", "normalizedTitle": "Load Balancing for Internet Distributed Services Using Limited Redirection Rates", "abstract": "The Internet has become the universal support for computer applications. This increases the need for solutions that provide dependability and QoS for web applications. The replication of web servers on geographically distributed data centers allows the service provider to tolerate disastrous failures and to improve the response times perceived by clients. A key issue for good performance of worldwide distributed web services is the efficiency of the load balancing mechanism used to distribute client requests among the replicated servers. Load balancing can reduce the need for over-provision of resources, and help tolerate abrupt load peaks and/or partial failures through load conditioning. In this paper, we propose a new load balancing solution that reduces service response times by redirecting requests to the closest remote servers without overloading them. We also describe a middle ware that implements this protocol and present the results of a set of simulations that show its usefulness.", "abstracts": [ { "abstractType": "Regular", "content": "The Internet has become the universal support for computer applications. This increases the need for solutions that provide dependability and QoS for web applications. The replication of web servers on geographically distributed data centers allows the service provider to tolerate disastrous failures and to improve the response times perceived by clients. A key issue for good performance of worldwide distributed web services is the efficiency of the load balancing mechanism used to distribute client requests among the replicated servers. Load balancing can reduce the need for over-provision of resources, and help tolerate abrupt load peaks and/or partial failures through load conditioning. In this paper, we propose a new load balancing solution that reduces service response times by redirecting requests to the closest remote servers without overloading them. We also describe a middle ware that implements this protocol and present the results of a set of simulations that show its usefulness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Internet has become the universal support for computer applications. This increases the need for solutions that provide dependability and QoS for web applications. The replication of web servers on geographically distributed data centers allows the service provider to tolerate disastrous failures and to improve the response times perceived by clients. A key issue for good performance of worldwide distributed web services is the efficiency of the load balancing mechanism used to distribute client requests among the replicated servers. Load balancing can reduce the need for over-provision of resources, and help tolerate abrupt load peaks and/or partial failures through load conditioning. In this paper, we propose a new load balancing solution that reduces service response times by redirecting requests to the closest remote servers without overloading them. We also describe a middle ware that implements this protocol and present the results of a set of simulations that show its usefulness.", "fno": "4320a156", "keywords": [ "Web Services", "Load Balancing" ], "authors": [ { "affiliation": null, "fullName": "Alan Massaru Nakai", "givenName": "Alan Massaru", "surname": "Nakai", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Edmundo Madeira", "givenName": "Edmundo", "surname": "Madeira", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Luiz E. Buzato", "givenName": "Luiz E.", "surname": "Buzato", "__typename": "ArticleAuthorType" } ], "idPrefix": "ladc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-04-01T00:00:00", "pubType": "proceedings", "pages": "156-165", "year": "2011", "issn": null, "isbn": "978-0-7695-4320-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4320a146", "articleId": "12OmNzGlRBy", "__typename": "AdjacentArticleType" }, "next": { "fno": "4320a166", "articleId": "12OmNxYbT4b", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdcs/1998/8292/0/82920295", "title": "Dynamic Load Balancing in Geographically Distributed Heterogeneous Web Servers", "doi": null, "abstractUrl": "/proceedings-article/icdcs/1998/82920295/12OmNB9t6kq", "parentPublication": { "id": "proceedings/icdcs/1998/8292/0", "title": "Proceedings. 18th International Conference on Distributed Computing Systems (Cat. No.98CB36183)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/2017/1792/0/1792a583", "title": "Distributed Load Balancing in Key-Value Networked Caches", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2017/1792a583/12OmNBkP3Fm", "parentPublication": { "id": "proceedings/icdcs/2017/1792/0", "title": "2017 IEEE 37th International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdcs/1999/0222/0/02220528", "title": "Redirection Algorithms for Load Sharing in Distributed Web-server Systems", "doi": null, "abstractUrl": "/proceedings-article/icdcs/1999/02220528/12OmNqJ8tbt", "parentPublication": { "id": "proceedings/icdcs/1999/0222/0", "title": "19th IEEE International Conference on Distributed Computing Systems (ICDCS'99)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2008/3449/0/3449a147", "title": "Dynamic Load Balancing in Web Cache Cluster", "doi": null, "abstractUrl": "/proceedings-article/gcc/2008/3449a147/12OmNvIfDRe", "parentPublication": { "id": "proceedings/gcc/2008/3449/0", "title": "2008 Seventh International Conference on Grid and Cooperative Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cluster/2001/1116/0/11160274", "title": "Approximation Algorithms for Data Distribution with Load Balancing of Web Servers", "doi": null, "abstractUrl": "/proceedings-article/cluster/2001/11160274/12OmNvTTcfB", "parentPublication": { "id": "proceedings/cluster/2001/1116/0", "title": "Third IEEE International Conference on Cluster Computing (CLUSTER'01)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2007/2875/4/28750809", "title": "A Load Balancing Strategy in Web Cluster System", "doi": null, "abstractUrl": "/proceedings-article/icnc/2007/28750809/12OmNx3q6U9", "parentPublication": { "id": "proceedings/icnc/2007/2875/4", "title": "Third International Conference on Natural Computation (ICNC 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aina/2004/2051/2/205120321", "title": "Balancing Workload based on Content Types for Scalable Web Server Clusters", "doi": null, "abstractUrl": "/proceedings-article/aina/2004/205120321/12OmNx5GU8k", "parentPublication": { "id": "proceedings/aina/2004/2051/2", "title": "18th International Conference on Advanced Information Networking and Applications, 2004. AINA 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2009/3859/1/3859a449", "title": "A Weighted Metric Based Adaptive Algorithm for Web Server Load Balancing", "doi": null, "abstractUrl": "/proceedings-article/iita/2009/3859a449/12OmNxy4MX1", "parentPublication": { "id": "proceedings/iita/2009/3859/1", "title": "2009 Third International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dbta/2009/3604/0/3604a348", "title": "Load Balancing System Applied Research", "doi": null, "abstractUrl": "/proceedings-article/dbta/2009/3604a348/12OmNyRg4yv", "parentPublication": { "id": "proceedings/dbta/2009/3604/0", "title": "2009 First International Workshop on Database Technology and Applications, DBTA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seus-wccia/2006/2560/0/25600251", "title": "DNS-Based Load Balancing in Distributed Web-server Systems", "doi": null, "abstractUrl": "/proceedings-article/seus-wccia/2006/25600251/12OmNynsbBx", "parentPublication": { "id": "proceedings/seus-wccia/2006/2560/0", "title": "Software Technologies for Future Embedded and Ubiquitous Systems, and International Workshop on Collaborative Computing, Integration, and Assurance, The IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwt5shw", "title": "Proceedings. Eighth IEEE International Enterprise Distributed Object Computing Conference, 2004. EDOC 2004.", "acronym": "edoc", "groupId": "1000274", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNB1eJBR", "doi": "10.1109/EDOC.2004.10020", "title": "Evaluating the Performance of Middleware Load Balancing Strategies", "normalizedTitle": "Evaluating the Performance of Middleware Load Balancing Strategies", "abstract": "This paper presents three contributions to research on middleware load balancing. First, it describes the design of Cygnus, which is an extensible open-source middleware framework developed to support adaptive and non-adaptive load balancing strategies. Key features of Cygnus are its ability to make load balancing decisions based on application-defined load metrics, dynamically (re)configure load balancing strategies at run-time, and transparently add load balancing support to client and server applications. Second, it describes the design of LBPerf, an open-source middleware load balancing benchmarking toolkit developed to evaluate load balancing strategies at the middleware level. Third, it presents the results of experiments that systematically evaluate the performance of adaptive load balancing strategies implemented using the Cygnus middleware framework using workloads generated by LBPerf. The workloads used in our experiments are based on models of CPU-bound requests that are representative of a broad range of distributed applications. Our experiments with LBPerf illustrate the need for evaluating different adaptive and non-adaptive load balancing strategies under different workload conditions. In addition to assisting in choosing a suitable load balancing strategy for a particular class of distributed applications, our empirical results help configure run-time parameters properly and analyze their behavior in the presence of different workloads. Our results also indicate that integrating Cygnus into distributed applications can improve their scalability, while incurring minimal run-time overhead. As a result, developers can concentrate on their core application behavior, rather than wrestling with complex middleware mechanisms needed to enhance the scalability of their distributed applications.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents three contributions to research on middleware load balancing. First, it describes the design of Cygnus, which is an extensible open-source middleware framework developed to support adaptive and non-adaptive load balancing strategies. Key features of Cygnus are its ability to make load balancing decisions based on application-defined load metrics, dynamically (re)configure load balancing strategies at run-time, and transparently add load balancing support to client and server applications. Second, it describes the design of LBPerf, an open-source middleware load balancing benchmarking toolkit developed to evaluate load balancing strategies at the middleware level. Third, it presents the results of experiments that systematically evaluate the performance of adaptive load balancing strategies implemented using the Cygnus middleware framework using workloads generated by LBPerf. The workloads used in our experiments are based on models of CPU-bound requests that are representative of a broad range of distributed applications. Our experiments with LBPerf illustrate the need for evaluating different adaptive and non-adaptive load balancing strategies under different workload conditions. In addition to assisting in choosing a suitable load balancing strategy for a particular class of distributed applications, our empirical results help configure run-time parameters properly and analyze their behavior in the presence of different workloads. Our results also indicate that integrating Cygnus into distributed applications can improve their scalability, while incurring minimal run-time overhead. As a result, developers can concentrate on their core application behavior, rather than wrestling with complex middleware mechanisms needed to enhance the scalability of their distributed applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents three contributions to research on middleware load balancing. First, it describes the design of Cygnus, which is an extensible open-source middleware framework developed to support adaptive and non-adaptive load balancing strategies. Key features of Cygnus are its ability to make load balancing decisions based on application-defined load metrics, dynamically (re)configure load balancing strategies at run-time, and transparently add load balancing support to client and server applications. Second, it describes the design of LBPerf, an open-source middleware load balancing benchmarking toolkit developed to evaluate load balancing strategies at the middleware level. Third, it presents the results of experiments that systematically evaluate the performance of adaptive load balancing strategies implemented using the Cygnus middleware framework using workloads generated by LBPerf. The workloads used in our experiments are based on models of CPU-bound requests that are representative of a broad range of distributed applications. Our experiments with LBPerf illustrate the need for evaluating different adaptive and non-adaptive load balancing strategies under different workload conditions. In addition to assisting in choosing a suitable load balancing strategy for a particular class of distributed applications, our empirical results help configure run-time parameters properly and analyze their behavior in the presence of different workloads. Our results also indicate that integrating Cygnus into distributed applications can improve their scalability, while incurring minimal run-time overhead. As a result, developers can concentrate on their core application behavior, rather than wrestling with complex middleware mechanisms needed to enhance the scalability of their distributed applications.", "fno": "22140135", "keywords": [ "Middleware Load Balancing", "Adaptive Load Balancing Strategies" ], "authors": [ { "affiliation": "Vanderbilt University, Nashville, TN, USA", "fullName": "Jaiganesh Balasubramanian", "givenName": "Jaiganesh", "surname": "Balasubramanian", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University, Nashville, TN, USA", "fullName": "Douglas C. Schmidt", "givenName": "Douglas C.", "surname": "Schmidt", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University, Nashville, TN, USA", "fullName": "Lawrence Dowdy", "givenName": "Lawrence", "surname": "Dowdy", "__typename": "ArticleAuthorType" }, { "affiliation": "Vanderbilt University, Nashville, TN, USA", "fullName": "Ossama Othman", "givenName": "Ossama", "surname": "Othman", "__typename": "ArticleAuthorType" } ], "idPrefix": "edoc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-09-01T00:00:00", "pubType": "proceedings", "pages": "135-146", "year": "2004", "issn": "1541-7719", "isbn": "0-7695-2214-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01342522", "articleId": "12OmNz6iOn1", "__typename": "AdjacentArticleType" }, "next": { "fno": "01342523", "articleId": "12OmNxFJXT6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/hpdc/1996/7582/0/75820282", "title": "Customized dynamic load balancing for a network of workstations", "doi": null, "abstractUrl": "/proceedings-article/hpdc/1996/75820282/12OmNAhOUNc", "parentPublication": { "id": "proceedings/hpdc/1996/7582/0", "title": "Proceedings of 5th IEEE International Symposium on High Performance Distributed Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edoc/2004/2214/0/01342511", "title": "Evaluating the performance of middleware load balancing strategies", "doi": null, "abstractUrl": "/proceedings-article/edoc/2004/01342511/12OmNCbU2UI", "parentPublication": { "id": "proceedings/edoc/2004/2214/0", "title": "Proceedings. Eighth IEEE International Enterprise Distributed Object Computing Conference, 2004. EDOC 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/delta/2008/3110/0/3110a435", "title": "Integrating Dynamic Load Balancing Strategies into the Car-Network", "doi": null, "abstractUrl": "/proceedings-article/delta/2008/3110a435/12OmNqFrGrC", "parentPublication": { "id": "proceedings/delta/2008/3110/0", "title": "Electronic Design, Test and Applications, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2008/3449/0/3449a147", "title": "Dynamic Load Balancing in Web Cache Cluster", "doi": null, "abstractUrl": "/proceedings-article/gcc/2008/3449a147/12OmNvIfDRe", "parentPublication": { "id": "proceedings/gcc/2008/3449/0", "title": "2008 Seventh International Conference on Grid and Cooperative Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hipc/1996/7557/0/75570263", "title": "Load balancing strategies for symbolic vision computations", "doi": null, "abstractUrl": "/proceedings-article/hipc/1996/75570263/12OmNwE9OCq", "parentPublication": { "id": "proceedings/hipc/1996/7557/0", "title": "Proceedings of 3rd International Conference on High Performance Computing (HiPC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2008/3099/0/3099b263", "title": "Classification of Dynamic Load Balancing Strategies in a Network of Workstations", "doi": null, "abstractUrl": "/proceedings-article/itng/2008/3099b263/12OmNyVes4x", "parentPublication": { "id": "proceedings/itng/2008/3099/0", "title": "2008 5th International Conference on Information Technology: New Generation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sccc/1997/8052/0/80520246", "title": "Load balancing and communication optimization for parallel adaptive finite element methods", "doi": null, "abstractUrl": "/proceedings-article/sccc/1997/80520246/12OmNym2c3S", "parentPublication": { "id": "proceedings/sccc/1997/8052/0", "title": "Proceedings 17th International Conference of the Chilean Computer Science Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pd/1996/03/p3025", "title": "How Network Topology Affects Dynamic Load Balancing", "doi": null, "abstractUrl": "/magazine/pd/1996/03/p3025/13rRUNvya6L", "parentPublication": { "id": "mags/pd", "title": "IEEE Concurrency (out of print)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/1993/09/l0979", "title": "Strategies for Dynamic Load Balancing on Highly Parallel Computers", "doi": null, "abstractUrl": "/journal/td/1993/09/l0979/13rRUNvya8O", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pd/1999/03/p3058", "title": "Improved Strategies for Dynamic Load Balancing", "doi": null, "abstractUrl": "/magazine/pd/1999/03/p3058/13rRUy0HYOw", "parentPublication": { "id": "mags/pd", "title": "IEEE Concurrency (out of print)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1lgop7Lmd4Q", "title": "2009 International Joint Conference on Computational Sciences and Optimization", "acronym": "cso", "groupId": "1002829", "volume": "1", "displayVolume": "1", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNC3FG4o", "doi": "10.1109/CSO.2009.13", "title": "Adaptive Control of Stable Load Balancing Algorithm for Parallel Cluster Computing", "normalizedTitle": "Adaptive Control of Stable Load Balancing Algorithm for Parallel Cluster Computing", "abstract": "In this paper, the linear and nonlinear models are put forward first to describe load balancing system for parallel cluster computing. Through analyzing the stability of linear model, this paper finds the asymptotic stable condition of load balancing system with some assumptions, and the simulation results are given to compare the results in theory later. Based on the analyzing and simulation results, an adaptive load balancing algorithm(ALBA) which considers the delay, execution time and scalability of system is presented to enhance the performance of parallel computing. At last, the simulation for ALBA is given to prove the practicability of the load balancing algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, the linear and nonlinear models are put forward first to describe load balancing system for parallel cluster computing. Through analyzing the stability of linear model, this paper finds the asymptotic stable condition of load balancing system with some assumptions, and the simulation results are given to compare the results in theory later. Based on the analyzing and simulation results, an adaptive load balancing algorithm(ALBA) which considers the delay, execution time and scalability of system is presented to enhance the performance of parallel computing. At last, the simulation for ALBA is given to prove the practicability of the load balancing algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, the linear and nonlinear models are put forward first to describe load balancing system for parallel cluster computing. Through analyzing the stability of linear model, this paper finds the asymptotic stable condition of load balancing system with some assumptions, and the simulation results are given to compare the results in theory later. Based on the analyzing and simulation results, an adaptive load balancing algorithm(ALBA) which considers the delay, execution time and scalability of system is presented to enhance the performance of parallel computing. At last, the simulation for ALBA is given to prove the practicability of the load balancing algorithm.", "fno": "3605a068", "keywords": [ "Adaptive Control", "Load Balancing System", "Stability", "Delay", "Execution Time", "Scalability" ], "authors": [ { "affiliation": null, "fullName": "Qingyang Meng", "givenName": "Qingyang", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianzhong Qiao", "givenName": "Jianzhong", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Liu", "givenName": "Jun", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sukuan Lin", "givenName": "Sukuan", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "cso", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "68-72", "year": "2009", "issn": null, "isbn": "978-0-7695-3605-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3605a063", "articleId": "12OmNzwpUbF", "__typename": "AdjacentArticleType" }, "next": { "fno": "3605a073", "articleId": "12OmNxGja22", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccsn/2009/3522/0/3522a309", "title": "Research on the Stability of Load Balancing Algorithm for Scalable Parallel Computing", "doi": null, "abstractUrl": "/proceedings-article/iccsn/2009/3522a309/12OmNAoDi2Q", "parentPublication": { "id": "proceedings/iccsn/2009/3522/0", "title": "Communication Software and Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2001/0990/1/099010076", "title": "Dynamic Load-Balancing Using Prediction in a Parallel Object-oriented System", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2001/099010076/12OmNqFJhHC", "parentPublication": { "id": "proceedings/ipdps/2001/0990/1", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2009/3610/1/3610a253", "title": "The Effect of Execution-Time/Delay Ratio on Stability of Load Balancing Control", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2009/3610a253/12OmNs0TKMc", "parentPublication": { "id": "proceedings/nswctc/2009/3610/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2008/3449/0/3449a147", "title": "Dynamic Load Balancing in Web Cache Cluster", "doi": null, "abstractUrl": "/proceedings-article/gcc/2008/3449a147/12OmNvIfDRe", "parentPublication": { "id": "proceedings/gcc/2008/3449/0", "title": "2008 Seventh International Conference on Grid and Cooperative Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2006/0054/0/01639292", "title": "A new analytical method for parallel, diffusion-type load balancing", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2006/01639292/12OmNvRU0lu", "parentPublication": { "id": "proceedings/ipdps/2006/0054/0", "title": "Proceedings 20th IEEE International Parallel & Distributed Processing Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2010/4157/0/4157a436", "title": "Hierarchical Load Balancing for Charm++ Applications on Large Supercomputers", "doi": null, "abstractUrl": "/proceedings-article/icppw/2010/4157a436/12OmNvT2oYo", "parentPublication": { "id": "proceedings/icppw/2010/4157/0", "title": "2010 39th International Conference on Parallel Processing Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2007/2875/4/28750809", "title": "A Load Balancing Strategy in Web Cluster System", "doi": null, "abstractUrl": "/proceedings-article/icnc/2007/28750809/12OmNx3q6U9", "parentPublication": { "id": "proceedings/icnc/2007/2875/4", "title": "Third International Conference on Natural Computation (ICNC 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsn/2010/3961/0/3961a449", "title": "A New Load Balancing Algorithm in Parallel Computing", "doi": null, "abstractUrl": "/proceedings-article/iccsn/2010/3961a449/12OmNyY4ruN", "parentPublication": { "id": "proceedings/iccsn/2010/3961/0", "title": "Communication Software and Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/1/3498a404", "title": "A Dynamic Load Balancing Method Based on Stability Analysis", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498a404/12OmNz5s0Mc", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dese/2011/4593/0/4593a581", "title": "Load Balancing Mathematical Model", "doi": null, "abstractUrl": "/proceedings-article/dese/2011/4593a581/12OmNzG4guL", "parentPublication": { "id": "proceedings/dese/2011/4593/0", "title": "2011 Developments in E-systems Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzUPpvS", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "acronym": "nswctc", "groupId": "1002716", "volume": "1", "displayVolume": "1", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNs0TKMc", "doi": "10.1109/NSWCTC.2009.90", "title": "The Effect of Execution-Time/Delay Ratio on Stability of Load Balancing Control", "normalizedTitle": "The Effect of Execution-Time/Delay Ratio on Stability of Load Balancing Control", "abstract": "Many factors can affect the stability of load balancing algorithm in parallel cluster computing, among them, a very important one is the ratio of execution-time to delay. This paper analyzes the effect of execution-time/delay on stability of load balancing algorithm in both theory and experiment view, and then this paper uses a load balancing gain to control this kind of effect for enhancing the performance of parallel cluster computing. The simulation results for different execution-time/ delay value are given to compare with the theory results in the end of paper.", "abstracts": [ { "abstractType": "Regular", "content": "Many factors can affect the stability of load balancing algorithm in parallel cluster computing, among them, a very important one is the ratio of execution-time to delay. This paper analyzes the effect of execution-time/delay on stability of load balancing algorithm in both theory and experiment view, and then this paper uses a load balancing gain to control this kind of effect for enhancing the performance of parallel cluster computing. The simulation results for different execution-time/ delay value are given to compare with the theory results in the end of paper.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many factors can affect the stability of load balancing algorithm in parallel cluster computing, among them, a very important one is the ratio of execution-time to delay. This paper analyzes the effect of execution-time/delay on stability of load balancing algorithm in both theory and experiment view, and then this paper uses a load balancing gain to control this kind of effect for enhancing the performance of parallel cluster computing. The simulation results for different execution-time/ delay value are given to compare with the theory results in the end of paper.", "fno": "3610a253", "keywords": [ "Execution Time Delay Ratio", "Stability", "Load Balancing Control", "Parallel Cluster Computing" ], "authors": [ { "affiliation": null, "fullName": "Qingyang Meng", "givenName": "Qingyang", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Su Ge", "givenName": "Su", "surname": "Ge", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianzhong Qiao", "givenName": "Jianzhong", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shukuan Lin", "givenName": "Shukuan", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "nswctc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "253-257", "year": "2009", "issn": null, "isbn": "978-0-7695-3610-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3610a249", "articleId": "12OmNvjyxIH", "__typename": "AdjacentArticleType" }, "next": { "fno": "3610a258", "articleId": "12OmNx0A7Pp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccsn/2009/3522/0/3522a309", "title": "Research on the Stability of Load Balancing Algorithm for Scalable Parallel Computing", "doi": null, "abstractUrl": "/proceedings-article/iccsn/2009/3522a309/12OmNAoDi2Q", "parentPublication": { "id": "proceedings/iccsn/2009/3522/0", "title": "Communication Software and Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2009/3605/1/3605a068", "title": "Adaptive Control of Stable Load Balancing Algorithm for Parallel Cluster Computing", "doi": null, "abstractUrl": "/proceedings-article/cso/2009/3605a068/12OmNC3FG4o", "parentPublication": { "id": "cso/2009/3605/1", "title": "2009 International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2008/3449/0/3449a147", "title": "Dynamic Load Balancing in Web Cache Cluster", "doi": null, "abstractUrl": "/proceedings-article/gcc/2008/3449a147/12OmNvIfDRe", "parentPublication": { "id": "proceedings/gcc/2008/3449/0", "title": "2008 Seventh International Conference on Grid and Cooperative Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipps/1993/3442/0/0262887", "title": "A load balancing strategy for prioritized execution of tasks", "doi": null, "abstractUrl": "/proceedings-article/ipps/1993/0262887/12OmNwDACFl", "parentPublication": { "id": "proceedings/ipps/1993/3442/0", "title": "Parallel Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1996/7352/0/73520412", "title": "Stability Analysis of a Load Balancing Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ssst/1996/73520412/12OmNyFCvRk", "parentPublication": { "id": "proceedings/ssst/1996/7352/0", "title": "Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dbta/2009/3604/0/3604a348", "title": "Load Balancing System Applied Research", "doi": null, "abstractUrl": "/proceedings-article/dbta/2009/3604a348/12OmNyRg4yv", "parentPublication": { "id": "proceedings/dbta/2009/3604/0", "title": "2009 First International Workshop on Database Technology and Applications, DBTA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispdc/2008/3472/0/3472a107", "title": "Designing Load Balancing Algorithms Capable of Dealing with Workload Variability", "doi": null, "abstractUrl": "/proceedings-article/ispdc/2008/3472a107/12OmNySosFv", "parentPublication": { "id": "proceedings/ispdc/2008/3472/0", "title": "2008 International Symposium on Parallel and Distributed Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/1/3498a404", "title": "A Dynamic Load Balancing Method Based on Stability Analysis", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498a404/12OmNz5s0Mc", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/spdp/1990/2087/0/00143573", "title": "Dynamic load balancing for parallel program execution on a message-passing multicomputer", "doi": null, "abstractUrl": "/proceedings-article/spdp/1990/00143573/12OmNzC5T87", "parentPublication": { "id": "proceedings/spdp/1990/2087/0", "title": "Parallel and Distributed Processing, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pd/1999/03/p3058", "title": "Improved Strategies for Dynamic Load Balancing", "doi": null, "abstractUrl": "/magazine/pd/1999/03/p3058/13rRUy0HYOw", "parentPublication": { "id": "mags/pd", "title": "IEEE Concurrency (out of print)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy3iFty", "title": "Proceedings of IEEE 36th Annual Foundations of Computer Science", "acronym": "focs", "groupId": "1000292", "volume": "0", "displayVolume": "0", "year": "1995", "__typename": "ProceedingType" }, "article": { "id": "12OmNwGZNPL", "doi": "10.1109/SFCS.1995.492494", "title": "Load balancing in the L/sub p/ norm", "normalizedTitle": "Load balancing in the L/sub p/ norm", "abstract": "In the load balancing problem, there is a set of servers, and jobs arrive sequentially. Each job can be run on some subset of the servers, and must be assigned to one of them in an online fashion. Traditionally, the assignment of jobs to servers is measured by the L/sub /spl infin// norm; in other words, an assignment of jobs to servers is quantified by the maximum load assigned to any server. In this measure the performance of the greedy load balancing algorithm may be a logarithmic factor higher than the offline optimal. In many applications, the L/sub /spl infin// norm is not a suitable way to measure how well the jobs are balanced, If each job sees a delay that is proportional to the number of jobs on its server, then the average delay among all jobs is proportional to the sum of the squares of the numbers of jobs assigned to the servers. Minimizing the average delay is equivalent to minimizing the Euclidean (or L/sub 2/) norm. For any fixed p, 1/spl les/p>/spl infin/, we show that the greedy algorithm performs within a constant factor of the offline optimal with respect to the L/sub p/ norm. The constant grows linearly with p, which is best possible, but does not depend on the number of servers and jobs.", "abstracts": [ { "abstractType": "Regular", "content": "In the load balancing problem, there is a set of servers, and jobs arrive sequentially. Each job can be run on some subset of the servers, and must be assigned to one of them in an online fashion. Traditionally, the assignment of jobs to servers is measured by the L/sub /spl infin// norm; in other words, an assignment of jobs to servers is quantified by the maximum load assigned to any server. In this measure the performance of the greedy load balancing algorithm may be a logarithmic factor higher than the offline optimal. In many applications, the L/sub /spl infin// norm is not a suitable way to measure how well the jobs are balanced, If each job sees a delay that is proportional to the number of jobs on its server, then the average delay among all jobs is proportional to the sum of the squares of the numbers of jobs assigned to the servers. Minimizing the average delay is equivalent to minimizing the Euclidean (or L/sub 2/) norm. For any fixed p, 1/spl les/p>/spl infin/, we show that the greedy algorithm performs within a constant factor of the offline optimal with respect to the L/sub p/ norm. The constant grows linearly with p, which is best possible, but does not depend on the number of servers and jobs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the load balancing problem, there is a set of servers, and jobs arrive sequentially. Each job can be run on some subset of the servers, and must be assigned to one of them in an online fashion. Traditionally, the assignment of jobs to servers is measured by the L/sub /spl infin// norm; in other words, an assignment of jobs to servers is quantified by the maximum load assigned to any server. In this measure the performance of the greedy load balancing algorithm may be a logarithmic factor higher than the offline optimal. In many applications, the L/sub /spl infin// norm is not a suitable way to measure how well the jobs are balanced, If each job sees a delay that is proportional to the number of jobs on its server, then the average delay among all jobs is proportional to the sum of the squares of the numbers of jobs assigned to the servers. Minimizing the average delay is equivalent to minimizing the Euclidean (or L/sub 2/) norm. For any fixed p, 1/spl les/p>/spl infin/, we show that the greedy algorithm performs within a constant factor of the offline optimal with respect to the L/sub p/ norm. The constant grows linearly with p, which is best possible, but does not depend on the number of servers and jobs.", "fno": "71830383", "keywords": [ "Resource Allocation Online Operation Queueing Theory Delays Deterministic Algorithms Competitive Algorithms Load Balancing L Sub P Norm Online Operation Job Assignment L Sub Spl Infin Norm Maximum Load Greedy Load Balancing Algorithm Offline Optimal Average Job Delay Sum Of The Squares Euclidean Norm Competitive Algorithms Deterministic Algorithm" ], "authors": [ { "affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA", "fullName": "B. Awerbuch", "givenName": "B.", "surname": "Awerbuch", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA", "fullName": "Y. Azar", "givenName": "Y.", "surname": "Azar", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA", "fullName": "E.F. Grove", "givenName": "E.F.", "surname": "Grove", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA", "fullName": "Ming-Yang Kao", "givenName": "Ming-Yang", "surname": "Kao", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA", "fullName": "P. Krishnan", "givenName": "P.", "surname": "Krishnan", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. Sci., Johns Hopkins Univ., Baltimore, MD, USA", "fullName": "J.S. Vitter", "givenName": "J.S.", "surname": "Vitter", "__typename": "ArticleAuthorType" } ], "idPrefix": "focs", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1995-10-01T00:00:00", "pubType": "proceedings", "pages": "383", "year": "1995", "issn": "0272-5428", "isbn": "0-8186-7183-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "71830374", "articleId": "12OmNy5R3CK", "__typename": "AdjacentArticleType" }, "next": { "fno": "71830392", "articleId": "12OmNCfAPwP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvDI3Md", "title": "High Performance Computing and Communication & IEEE International Conference on Embedded Software and Systems, IEEE International Conference on", "acronym": "hpcc-icess", "groupId": "1002461", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNxdm4sw", "doi": "10.1109/HPCC.2011.39", "title": "Delay-Tolerant Dynamic Load Balancing", "normalizedTitle": "Delay-Tolerant Dynamic Load Balancing", "abstract": "Several approaches for load balancing in distributed systems were introduced, however, most of them require prior knowledge of the environments operation conditions and/or constant monitoring of these conditions at run time. That allows the applications to adjust the load and redistribute the tasks when necessary. These techniques were designed with the assumption that there is no high communication delay in discovering dynamic load behaviors for the rescheduling purposes. This paper proposes a new delay-tolerant dynamic load balancing technique that can be used effectively for reducing the execution time of some distributed tasks while minimizing the control overhead. Such tasks include downloading large files from replicated FTP servers and executing parallel applications on multiple independent distributed servers. This technique we call DDOps (Dual Direction Operations) allows the parallel/distributed application to make use of available resources efficiently while not requiring any significant control overhead. In our approach, load balancing is automatically inherent from the technique. Since the tasks are handled from opposite directions, processing will continue until the workers meet at some point which indicates all tasks are done. Thus DDOps is most suitable for non-dedicated heterogeneous distributed environments where resources vary in specifications, locations, and operating conditions. The experimental results in file download and parallel computations all show how efficient DDOps is and how well it balances the load among the different tasks.", "abstracts": [ { "abstractType": "Regular", "content": "Several approaches for load balancing in distributed systems were introduced, however, most of them require prior knowledge of the environments operation conditions and/or constant monitoring of these conditions at run time. That allows the applications to adjust the load and redistribute the tasks when necessary. These techniques were designed with the assumption that there is no high communication delay in discovering dynamic load behaviors for the rescheduling purposes. This paper proposes a new delay-tolerant dynamic load balancing technique that can be used effectively for reducing the execution time of some distributed tasks while minimizing the control overhead. Such tasks include downloading large files from replicated FTP servers and executing parallel applications on multiple independent distributed servers. This technique we call DDOps (Dual Direction Operations) allows the parallel/distributed application to make use of available resources efficiently while not requiring any significant control overhead. In our approach, load balancing is automatically inherent from the technique. Since the tasks are handled from opposite directions, processing will continue until the workers meet at some point which indicates all tasks are done. Thus DDOps is most suitable for non-dedicated heterogeneous distributed environments where resources vary in specifications, locations, and operating conditions. The experimental results in file download and parallel computations all show how efficient DDOps is and how well it balances the load among the different tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Several approaches for load balancing in distributed systems were introduced, however, most of them require prior knowledge of the environments operation conditions and/or constant monitoring of these conditions at run time. That allows the applications to adjust the load and redistribute the tasks when necessary. These techniques were designed with the assumption that there is no high communication delay in discovering dynamic load behaviors for the rescheduling purposes. This paper proposes a new delay-tolerant dynamic load balancing technique that can be used effectively for reducing the execution time of some distributed tasks while minimizing the control overhead. Such tasks include downloading large files from replicated FTP servers and executing parallel applications on multiple independent distributed servers. This technique we call DDOps (Dual Direction Operations) allows the parallel/distributed application to make use of available resources efficiently while not requiring any significant control overhead. In our approach, load balancing is automatically inherent from the technique. Since the tasks are handled from opposite directions, processing will continue until the workers meet at some point which indicates all tasks are done. Thus DDOps is most suitable for non-dedicated heterogeneous distributed environments where resources vary in specifications, locations, and operating conditions. The experimental results in file download and parallel computations all show how efficient DDOps is and how well it balances the load among the different tasks.", "fno": "4538a237", "keywords": [ "Cloud Computing", "Grid Computing", "Heterogeneous Systems", "Load Balancing", "Delay Tolerant Systems" ], "authors": [], "idPrefix": "hpcc-icess", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-09-01T00:00:00", "pubType": "proceedings", "pages": "237-245", "year": "2011", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4538a229", "articleId": "12OmNyVes4z", "__typename": "AdjacentArticleType" }, "next": { "fno": "4538a246", "articleId": "12OmNAk5HPt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccee/2009/3925/1/3925a516", "title": "Two Hierarchical Dynamic Load Balancing Algorithms in Distributed Systems", "doi": null, "abstractUrl": "/proceedings-article/iccee/2009/3925a516/12OmNARRYzr", "parentPublication": { "id": "iccee/2009/3925/1", "title": "Computer and Electrical Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edoc/2004/2214/0/22140135", "title": "Evaluating the Performance of Middleware Load Balancing Strategies", "doi": null, "abstractUrl": "/proceedings-article/edoc/2004/22140135/12OmNB1eJBR", "parentPublication": { "id": "proceedings/edoc/2004/2214/0", "title": "Proceedings. Eighth IEEE International Enterprise Distributed Object Computing Conference, 2004. EDOC 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/computationworld/2009/3862/0/3862a545", "title": "Decentralized Load Balancing for Heterogeneous Grids", "doi": null, "abstractUrl": "/proceedings-article/computationworld/2009/3862a545/12OmNB6UIaM", "parentPublication": { "id": "proceedings/computationworld/2009/3862/0", "title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/delta/2008/3110/0/3110a435", "title": "Integrating Dynamic Load Balancing Strategies into the Car-Network", "doi": null, "abstractUrl": "/proceedings-article/delta/2008/3110a435/12OmNqFrGrC", "parentPublication": { "id": "proceedings/delta/2008/3110/0", "title": "Electronic Design, Test and Applications, IEEE International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pcc/2006/0198/0/01629422", "title": "An efficient load balancing algorithm for heterogeneous grid systems considering desirability of grid sites", "doi": null, "abstractUrl": "/proceedings-article/pcc/2006/01629422/12OmNvonIHA", "parentPublication": { "id": "proceedings/pcc/2006/0198/0", "title": "2006 IEEE International Performance Computing and Communications Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2003/1926/0/19260049b", "title": "Agent-Based Grid Load Balancing Using Performance-Driven Task Scheduling", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2003/19260049b/12OmNwBjP5D", "parentPublication": { "id": "proceedings/ipdps/2003/1926/0", "title": "Parallel and Distributed Processing Symposium, International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paciia/2008/3490/2/3490c813", "title": "Load Balancing Oriented Economic Grid Resource Scheduling", "doi": null, "abstractUrl": "/proceedings-article/paciia/2008/3490c813/12OmNwcCITX", "parentPublication": { "id": "paciia/2008/3490/2", "title": "Pacific-Asia Workshop on Computational Intelligence and Industrial Application, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nca/2001/1432/0/14320364", "title": "Static Load Balancing for CFD Simulations on a Network of Workstations", "doi": null, "abstractUrl": "/proceedings-article/nca/2001/14320364/12OmNyLiuvb", "parentPublication": { "id": "proceedings/nca/2001/1432/0", "title": "Proceedings IEEE International Symposium on Network Computing and Applications. NCA 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2002/08/l0787", "title": "On Load Balancing for Distributed Multiagent Computing", "doi": null, "abstractUrl": "/journal/td/2002/08/l0787/13rRUIJuxp4", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2001/09/l0899", "title": "Observations on Using Genetic Algorithms for Dynamic Load-Balancing", "doi": null, "abstractUrl": "/journal/td/2001/09/l0899/13rRUxlgxSV", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwkhTj9", "title": "2009 First International Workshop on Database Technology and Applications, DBTA", "acronym": "dbta", "groupId": "1002840", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNyRg4yv", "doi": "10.1109/DBTA.2009.101", "title": "Load Balancing System Applied Research", "normalizedTitle": "Load Balancing System Applied Research", "abstract": "Load Balancing for high-Scheduler is scalable, highly available network services demand, borrowed many mature technology, the development of a high-performance server, cluster System is a key component of, this paper analyses the development of a network of four needs, details of the three load balancing technology, and accordingly is a load balancing scheduler design, then details of the resulting load balancing for a scheduling system structure of the groups set, finally, load balancing scheduling for specific applications.", "abstracts": [ { "abstractType": "Regular", "content": "Load Balancing for high-Scheduler is scalable, highly available network services demand, borrowed many mature technology, the development of a high-performance server, cluster System is a key component of, this paper analyses the development of a network of four needs, details of the three load balancing technology, and accordingly is a load balancing scheduler design, then details of the resulting load balancing for a scheduling system structure of the groups set, finally, load balancing scheduling for specific applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Load Balancing for high-Scheduler is scalable, highly available network services demand, borrowed many mature technology, the development of a high-performance server, cluster System is a key component of, this paper analyses the development of a network of four needs, details of the three load balancing technology, and accordingly is a load balancing scheduler design, then details of the resulting load balancing for a scheduling system structure of the groups set, finally, load balancing scheduling for specific applications.", "fno": "3604a348", "keywords": [ "Load Balancing", "Cluster", "Scheduling" ], "authors": [ { "affiliation": null, "fullName": "Zengqingjiang Yaojuan", "givenName": "Zengqingjiang", "surname": "Yaojuan", "__typename": "ArticleAuthorType" } ], "idPrefix": "dbta", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-04-01T00:00:00", "pubType": "proceedings", "pages": "348-350", "year": "2009", "issn": null, "isbn": "978-0-7695-3604-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3604a345", "articleId": "12OmNCdTeKK", "__typename": "AdjacentArticleType" }, "next": { "fno": "3604a351", "articleId": "12OmNvlxJvE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpads/2009/3900/0/3900a671", "title": "Double-layer Scheduling Strategy of Load Balancing in Scientific Workflow", "doi": null, "abstractUrl": "/proceedings-article/icpads/2009/3900a671/12OmNCd2rpQ", "parentPublication": { "id": "proceedings/icpads/2009/3900/0", "title": "Parallel and Distributed Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2008/3449/0/3449a147", "title": "Dynamic Load Balancing in Web Cache Cluster", "doi": null, "abstractUrl": "/proceedings-article/gcc/2008/3449a147/12OmNvIfDRe", "parentPublication": { "id": "proceedings/gcc/2008/3449/0", "title": "2008 Seventh International Conference on Grid and Cooperative Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paciia/2008/3490/2/3490c813", "title": "Load Balancing Oriented Economic Grid Resource Scheduling", "doi": null, "abstractUrl": "/proceedings-article/paciia/2008/3490c813/12OmNwcCITX", "parentPublication": { "id": "paciia/2008/3490/2", "title": "Pacific-Asia Workshop on Computational Intelligence and Industrial Application, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waina/2013/4952/0/4952a649", "title": "TeraScaler ELB-an Algorithm of Prediction-Based Elastic Load Balancing Resource Management in Cloud Computing", "doi": null, "abstractUrl": "/proceedings-article/waina/2013/4952a649/12OmNwpoFLx", "parentPublication": { "id": "proceedings/waina/2013/4952/0", "title": "2013 27th International Conference on Advanced Information Networking and Applications Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/1/4077a144", "title": "A Load-Balancing Dynamic Scheduling Algorithm under Machine Failure Conditions", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077a144/12OmNwswg1X", "parentPublication": { "id": "proceedings/icicta/2010/4077/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2007/2875/4/28750809", "title": "A Load Balancing Strategy in Web Cluster System", "doi": null, "abstractUrl": "/proceedings-article/icnc/2007/28750809/12OmNx3q6U9", "parentPublication": { "id": "proceedings/icnc/2007/2875/4", "title": "Third International Conference on Natural Computation (ICNC 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icns/2010/3969/0/3969a363", "title": "Scheduling Algorithm of Load Balancing Based on Dynamic Policies", "doi": null, "abstractUrl": "/proceedings-article/icns/2010/3969a363/12OmNzSyCiU", "parentPublication": { "id": "proceedings/icns/2010/3969/0", "title": "2010 Sixth International Conference on Networking and Services", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmcsn/2012/4738/0/4738a380", "title": "ERP Load Balancing Scheme Based on Process Scheduling", "doi": null, "abstractUrl": "/proceedings-article/cmcsn/2012/4738a380/12OmNzaQoAg", "parentPublication": { "id": "proceedings/cmcsn/2012/4738/0", "title": "Computing, Measurement, Control and Sensor Network, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/1997/02/l0173", "title": "On Runtime Parallel Scheduling for Processor Load Balancing", "doi": null, "abstractUrl": "/journal/td/1997/02/l0173/13rRUxBrGgl", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pd/1999/03/p3058", "title": "Improved Strategies for Dynamic Load Balancing", "doi": null, "abstractUrl": "/magazine/pd/1999/03/p3058/13rRUy0HYOw", "parentPublication": { "id": "mags/pd", "title": "IEEE Concurrency (out of print)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": null, "article": { "id": "12OmNz5s0Mc", "doi": "10.1109/ISCSCT.2008.219", "title": "A Dynamic Load Balancing Method Based on Stability Analysis", "normalizedTitle": "A Dynamic Load Balancing Method Based on Stability Analysis", "abstract": "The key issue for parallel cluster computing is the stability of load balancing method, the unstable behavior such as oscillatory action which caused by delay can intensely influence the performance of load balancing algorithm. In such a case, a linear dynamic load balancing model is proposed in this paper. By analyzing the stability of this linear model, we can not only obtain the relation between delay and load balancing gain, but also find the affect between stability and scalability of system. Based on the linear model, a more actual nonlinear model is introduced in the end. Simulation results are presented and compared with analyzing results and other load balancing methods.", "abstracts": [ { "abstractType": "Regular", "content": "The key issue for parallel cluster computing is the stability of load balancing method, the unstable behavior such as oscillatory action which caused by delay can intensely influence the performance of load balancing algorithm. In such a case, a linear dynamic load balancing model is proposed in this paper. By analyzing the stability of this linear model, we can not only obtain the relation between delay and load balancing gain, but also find the affect between stability and scalability of system. Based on the linear model, a more actual nonlinear model is introduced in the end. Simulation results are presented and compared with analyzing results and other load balancing methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The key issue for parallel cluster computing is the stability of load balancing method, the unstable behavior such as oscillatory action which caused by delay can intensely influence the performance of load balancing algorithm. In such a case, a linear dynamic load balancing model is proposed in this paper. By analyzing the stability of this linear model, we can not only obtain the relation between delay and load balancing gain, but also find the affect between stability and scalability of system. Based on the linear model, a more actual nonlinear model is introduced in the end. Simulation results are presented and compared with analyzing results and other load balancing methods.", "fno": "3498a404", "keywords": [ "Load Balancing", "Stability Analysis", "Load Balancing Gain", "Delay", "Scalability" ], "authors": [ { "affiliation": null, "fullName": "Qingyang Meng", "givenName": "Qingyang", "surname": "Meng", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jianzhong Qiao", "givenName": "Jianzhong", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Liu", "givenName": "Jun", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shukuan Lin", "givenName": "Shukuan", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "iscsct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "404-408", "year": "2008", "issn": null, "isbn": "978-0-7695-3498-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3498a394", "articleId": "12OmNzvz6HV", "__typename": "AdjacentArticleType" }, "next": { "fno": "3498a409", "articleId": "12OmNqBbI02", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccsn/2009/3522/0/3522a309", "title": "Research on the Stability of Load Balancing Algorithm for Scalable Parallel Computing", "doi": null, "abstractUrl": "/proceedings-article/iccsn/2009/3522a309/12OmNAoDi2Q", "parentPublication": { "id": "proceedings/iccsn/2009/3522/0", "title": "Communication Software and Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cso/2009/3605/1/3605a068", "title": "Adaptive Control of Stable Load Balancing Algorithm for Parallel Cluster Computing", "doi": null, "abstractUrl": "/proceedings-article/cso/2009/3605a068/12OmNC3FG4o", "parentPublication": { "id": "cso/2009/3605/1", "title": "2009 International Joint Conference on Computational Sciences and Optimization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2009/3610/1/3610a253", "title": "The Effect of Execution-Time/Delay Ratio on Stability of Load Balancing Control", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2009/3610a253/12OmNs0TKMc", "parentPublication": { "id": "proceedings/nswctc/2009/3610/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gcc/2008/3449/0/3449a147", "title": "Dynamic Load Balancing in Web Cache Cluster", "doi": null, "abstractUrl": "/proceedings-article/gcc/2008/3449a147/12OmNvIfDRe", "parentPublication": { "id": "proceedings/gcc/2008/3449/0", "title": "2008 Seventh International Conference on Grid and Cooperative Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imccc/2011/4519/0/4519a200", "title": "Study and Design of Dynamic Load Balancing in Hybrid Honeynet", "doi": null, "abstractUrl": "/proceedings-article/imccc/2011/4519a200/12OmNvSKNYE", "parentPublication": { "id": "proceedings/imccc/2011/4519/0", "title": "Instrumentation, Measurement, Computer, Communication and Control, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2010/4157/0/4157a436", "title": "Hierarchical Load Balancing for Charm++ Applications on Large Supercomputers", "doi": null, "abstractUrl": "/proceedings-article/icppw/2010/4157a436/12OmNvT2oYo", "parentPublication": { "id": "proceedings/icppw/2010/4157/0", "title": "2010 39th International Conference on Parallel Processing Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssst/1996/7352/0/73520412", "title": "Stability Analysis of a Load Balancing Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ssst/1996/73520412/12OmNyFCvRk", "parentPublication": { "id": "proceedings/ssst/1996/7352/0", "title": "Southeastern Symposium on System Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dese/2011/4593/0/4593a581", "title": "Load Balancing Mathematical Model", "doi": null, "abstractUrl": "/proceedings-article/dese/2011/4593a581/12OmNzG4guL", "parentPublication": { "id": "proceedings/dese/2011/4593/0", "title": "2011 Developments in E-systems Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/1998/8603/0/86030435", "title": "On the Stability of a Distributed Dynamic Load Balancing Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icpads/1998/86030435/12OmNzy7uSK", "parentPublication": { "id": "proceedings/icpads/1998/8603/0", "title": "Parallel and Distributed Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pd/1996/03/p3025", "title": "How Network Topology Affects Dynamic Load Balancing", "doi": null, "abstractUrl": "/magazine/pd/1996/03/p3025/13rRUNvya6L", "parentPublication": { "id": "mags/pd", "title": "IEEE Concurrency (out of print)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyuPL0g", "title": "Proceedings of Virtual Reality", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "1999", "__typename": "ProceedingType" }, "article": { "id": "12OmNwDACcO", "doi": "10.1109/VR.1999.756964", "title": "Walking About Virtual Environments on an Infinite Floor", "normalizedTitle": "Walking About Virtual Environments on an Infinite Floor", "abstract": "This paper presents a new configuration of locomotion interface for walking about virtual space. Traveling on foot is the most intuitive way for locomotion. Infinite surface driven by actuators is an ideal device for creation of sense of walking. We selected a torus-shaped surface to realize the locomotion interface. The locomotion interface employs twelve sets of treadmills. These treadmills are connected side by side and driven to perpendicular direction. Infinite surface is generated by the motion of the treadmills. The walker can go to any direction while his/her position is fixed in the real world. Effectiveness of the device is tested by motion analysis and study on sense of distance.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a new configuration of locomotion interface for walking about virtual space. Traveling on foot is the most intuitive way for locomotion. Infinite surface driven by actuators is an ideal device for creation of sense of walking. We selected a torus-shaped surface to realize the locomotion interface. The locomotion interface employs twelve sets of treadmills. These treadmills are connected side by side and driven to perpendicular direction. Infinite surface is generated by the motion of the treadmills. The walker can go to any direction while his/her position is fixed in the real world. Effectiveness of the device is tested by motion analysis and study on sense of distance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a new configuration of locomotion interface for walking about virtual space. Traveling on foot is the most intuitive way for locomotion. Infinite surface driven by actuators is an ideal device for creation of sense of walking. We selected a torus-shaped surface to realize the locomotion interface. The locomotion interface employs twelve sets of treadmills. These treadmills are connected side by side and driven to perpendicular direction. Infinite surface is generated by the motion of the treadmills. The walker can go to any direction while his/her position is fixed in the real world. Effectiveness of the device is tested by motion analysis and study on sense of distance.", "fno": "00930286", "keywords": [ "Locomotion", "Walking", "Navigation", "Haptic Feedback" ], "authors": [ { "affiliation": "University of Tsukuba,", "fullName": "Hiroo Iwata", "givenName": "Hiroo", "surname": "Iwata", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "1999-03-01T00:00:00", "pubType": "proceedings", "pages": "286", "year": "1999", "issn": "1087-8270", "isbn": "0-7695-0093-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00930282", "articleId": "12OmNs0kywB", "__typename": "AdjacentArticleType" }, "next": { "fno": "00930296", "articleId": "12OmNC17hXa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mWh", "title": "2013 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNxFJXN3", "doi": "10.1109/VR.2013.6549395", "title": "Flexible and general redirected walking for head-mounted displays", "normalizedTitle": "Flexible and general redirected walking for head-mounted displays", "abstract": "Traditional head-mounted display (HMD) locomotion interfaces map real world movements into an equivalent virtual world movement. Therefore, people cannot naturally explore a virtual world that is larger than the real world space. Joysticks, treadmills, redirected walking, and other techniques have been proposed by others to relax this restriction. In this work, we propose a new “general redirected walking” interface which works with arbitrary shaped and sized real worlds and virtual worlds by injecting dynamic translation or rotation gains into virtual world and steering a user to walk along a best direction in the real world identified by three heuristics in real time. We tested our algorithm with 10 virtual world models and 5 real-world (i.e., tracking system) models using software designed to simulate a user walking on a random path. We have also developed a prototype implementation for our HMD system.", "abstracts": [ { "abstractType": "Regular", "content": "Traditional head-mounted display (HMD) locomotion interfaces map real world movements into an equivalent virtual world movement. Therefore, people cannot naturally explore a virtual world that is larger than the real world space. Joysticks, treadmills, redirected walking, and other techniques have been proposed by others to relax this restriction. In this work, we propose a new “general redirected walking” interface which works with arbitrary shaped and sized real worlds and virtual worlds by injecting dynamic translation or rotation gains into virtual world and steering a user to walk along a best direction in the real world identified by three heuristics in real time. We tested our algorithm with 10 virtual world models and 5 real-world (i.e., tracking system) models using software designed to simulate a user walking on a random path. We have also developed a prototype implementation for our HMD system.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Traditional head-mounted display (HMD) locomotion interfaces map real world movements into an equivalent virtual world movement. Therefore, people cannot naturally explore a virtual world that is larger than the real world space. Joysticks, treadmills, redirected walking, and other techniques have been proposed by others to relax this restriction. In this work, we propose a new “general redirected walking” interface which works with arbitrary shaped and sized real worlds and virtual worlds by injecting dynamic translation or rotation gains into virtual world and steering a user to walk along a best direction in the real world identified by three heuristics in real time. We tested our algorithm with 10 virtual world models and 5 real-world (i.e., tracking system) models using software designed to simulate a user walking on a random path. We have also developed a prototype implementation for our HMD system.", "fno": "06549395", "keywords": [ "Legged Locomotion", "Software", "Virtual Environments", "Software Algorithms", "Visualization", "Navigation", "I 3 7 Computer Graphics Three Dimensional Graphics And Realism Virtual Reality" ], "authors": [ { "affiliation": "Michigan Technol. Univ., Houghton, MI, USA", "fullName": "Ruimin Zhang", "givenName": null, "surname": "Ruimin Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Michigan Technol. Univ., Houghton, MI, USA", "fullName": "Scott A. Kuhl", "givenName": "Scott A.", "surname": "Kuhl", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "127-128", "year": "2013", "issn": "1087-8270", "isbn": "978-1-4673-4795-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06549394", "articleId": "12OmNx4gUpe", "__typename": "AdjacentArticleType" }, "next": { "fno": "06549396", "articleId": "12OmNqNG3ev", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2016/0842/0/07460032", "title": "Automated path prediction for redirected walking using navigation meshes", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802053", "title": "An enhanced steering algorithm for redirected walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550194", "title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448288", "title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446579", "title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404579", "title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07036075", "title": "Cognitive Resource Demands of Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09961901", "title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798319", "title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/02/09364750", "title": "Multi-Technique Redirected Walking Method", "doi": null, "abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzw8jgZ", "title": "2011 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNyNQSC1", "doi": "10.1109/VR.2011.5759456", "title": "Shadow walking: An unencumbered locomotion technique for systems with under-floor projection", "normalizedTitle": "Shadow walking: An unencumbered locomotion technique for systems with under-floor projection", "abstract": "When viewed from below, a user's feet cast shadows onto the floor screen of an under-floor projection system, such as a six-sided CAVE. Tracking those shadows with a camera provides enough information for calculating a user's ground-plane location, foot orientation, and footstep events. We present Shadow Walking, an unencumbered locomotion technique that uses shadow tracking to sense a user's walking direction and step speed. Shadow Walking affords virtual locomotion by detecting if a user is walking in place. In addition, Shadow Walking supports a sidestep gesture, similar to the iPhone's pinch gesture. In this paper, we describe how we implemented Shadow Walking and present a preliminary assessment of our new locomotion technique. We have found Shadow Walking provides advantages of being unencumbered, inexpensive, and easy to implement compared to other walking-in-place approaches. It also has potential for extended gestures and multi-user locomotion.", "abstracts": [ { "abstractType": "Regular", "content": "When viewed from below, a user's feet cast shadows onto the floor screen of an under-floor projection system, such as a six-sided CAVE. Tracking those shadows with a camera provides enough information for calculating a user's ground-plane location, foot orientation, and footstep events. We present Shadow Walking, an unencumbered locomotion technique that uses shadow tracking to sense a user's walking direction and step speed. Shadow Walking affords virtual locomotion by detecting if a user is walking in place. In addition, Shadow Walking supports a sidestep gesture, similar to the iPhone's pinch gesture. In this paper, we describe how we implemented Shadow Walking and present a preliminary assessment of our new locomotion technique. We have found Shadow Walking provides advantages of being unencumbered, inexpensive, and easy to implement compared to other walking-in-place approaches. It also has potential for extended gestures and multi-user locomotion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "When viewed from below, a user's feet cast shadows onto the floor screen of an under-floor projection system, such as a six-sided CAVE. Tracking those shadows with a camera provides enough information for calculating a user's ground-plane location, foot orientation, and footstep events. We present Shadow Walking, an unencumbered locomotion technique that uses shadow tracking to sense a user's walking direction and step speed. Shadow Walking affords virtual locomotion by detecting if a user is walking in place. In addition, Shadow Walking supports a sidestep gesture, similar to the iPhone's pinch gesture. In this paper, we describe how we implemented Shadow Walking and present a preliminary assessment of our new locomotion technique. We have found Shadow Walking provides advantages of being unencumbered, inexpensive, and easy to implement compared to other walking-in-place approaches. It also has potential for extended gestures and multi-user locomotion.", "fno": "05759456", "keywords": [ "Multi User Locomotion", "Shadow Walking", "Unencumbered Locomotion Technique", "Under Floor Projection System", "Floor Screen", "Ground Plane Location", "Foot Orientation", "Footstep Event", "Shadow Tracking", "User Walking Direction Detection", "Step Speed", "Virtual Locomotion" ], "authors": [ { "affiliation": null, "fullName": "D J Zielinski", "givenName": "D J", "surname": "Zielinski", "__typename": "ArticleAuthorType" }, { "affiliation": "Virginia Tech, Blacksburg, VA, USA", "fullName": "R P McMahan", "givenName": "R P", "surname": "McMahan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "R B Brady", "givenName": "R B", "surname": "Brady", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-03-01T00:00:00", "pubType": "proceedings", "pages": "167-170", "year": "2011", "issn": null, "isbn": "978-1-4577-0039-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05759455", "articleId": "12OmNwFid4R", "__typename": "AdjacentArticleType" }, "next": { "fno": "05759457", "articleId": "12OmNxWLTGX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2010/6237/0/05444816", "title": "Improved Redirection with Distractors: A large-scale-real-walking locomotion interface and its effect on navigation in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444816/12OmNqBbHKZ", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a015", "title": "Analysis of Short Term Path Prediction of Human Locomotion for Augmented and Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a015/12OmNvAiSwv", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/1999/0093/0/00930286", "title": "Walking About Virtual Environments on an Infinite Floor", "doi": null, "abstractUrl": "/proceedings-article/vr/1999/00930286/12OmNwDACcO", "parentPublication": { "id": "proceedings/vr/1999/0093/0", "title": "Proceedings of Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759437", "title": "An evaluation of navigational ability comparing Redirected Free Exploration with Distractors to Walking-in-Place and joystick locomotio interfaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759437/12OmNx8OuyK", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476598", "title": "LLCM-WIP: Low-Latency, Continuous-Motion Walking-in-Place", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476598/12OmNyQYtvN", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460030", "title": "Eye tracking for locomotion prediction in redirected walking", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460030/12OmNz4SOsF", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/04/ttg2012040538", "title": "Redirecting Walking and Driving for Natural Navigation in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2012/04/ttg2012040538/13rRUwgQpDs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404569", "title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/07/06109251", "title": "The Design and Evaluation of a Large-Scale Real-Walking Locomotion Interface", "doi": null, "abstractUrl": "/journal/tg/2012/07/06109251/13rRUygT7mV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a395", "title": "Is Walking Necessary for Effective Locomotion and Interaction in VR?", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a395/1tnXRY815xS", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0PIoIPV6", "doi": "10.1109/VR.2019.8798286", "title": "Evaluating the Effectiveness of Redirected Walking with Auditory Distractors for Navigation in Virtual Environments", "normalizedTitle": "Evaluating the Effectiveness of Redirected Walking with Auditory Distractors for Navigation in Virtual Environments", "abstract": "Many virtual locomotion interfaces allowing users to move in virtual reality have been built and evaluated, such as redirected walking (RDW), walking-in-place (WIP), and joystick input. RDW has been shown to be among the most natural and immersive as it supports real walking, and many newer methods further adapt RDW to allow for customization and greater immersion. Most of these methods have been demonstrated to work with vision, in this paper we evaluate the ability for a general distractor-based RDW framework to be used with only auditory display. We conducted two studies evaluating the differences between RDW with auditory distractors and other distractor modalities using distraction ratio, virtual and physical path information, immersion, simulator sickness, and other measurements. Our results indicate that auditory RDW has the potential to be used with complex navigational tasks, such as crossing streets and avoiding obstacles. It can be used without designing the system specifically for audio-only users. Additionally, sense of presence and simulator sickness remain reasonable across all user groups.", "abstracts": [ { "abstractType": "Regular", "content": "Many virtual locomotion interfaces allowing users to move in virtual reality have been built and evaluated, such as redirected walking (RDW), walking-in-place (WIP), and joystick input. RDW has been shown to be among the most natural and immersive as it supports real walking, and many newer methods further adapt RDW to allow for customization and greater immersion. Most of these methods have been demonstrated to work with vision, in this paper we evaluate the ability for a general distractor-based RDW framework to be used with only auditory display. We conducted two studies evaluating the differences between RDW with auditory distractors and other distractor modalities using distraction ratio, virtual and physical path information, immersion, simulator sickness, and other measurements. Our results indicate that auditory RDW has the potential to be used with complex navigational tasks, such as crossing streets and avoiding obstacles. It can be used without designing the system specifically for audio-only users. Additionally, sense of presence and simulator sickness remain reasonable across all user groups.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many virtual locomotion interfaces allowing users to move in virtual reality have been built and evaluated, such as redirected walking (RDW), walking-in-place (WIP), and joystick input. RDW has been shown to be among the most natural and immersive as it supports real walking, and many newer methods further adapt RDW to allow for customization and greater immersion. Most of these methods have been demonstrated to work with vision, in this paper we evaluate the ability for a general distractor-based RDW framework to be used with only auditory display. We conducted two studies evaluating the differences between RDW with auditory distractors and other distractor modalities using distraction ratio, virtual and physical path information, immersion, simulator sickness, and other measurements. Our results indicate that auditory RDW has the potential to be used with complex navigational tasks, such as crossing streets and avoiding obstacles. It can be used without designing the system specifically for audio-only users. Additionally, sense of presence and simulator sickness remain reasonable across all user groups.", "fno": "08798286", "keywords": [ "Cognition", "Ergonomics", "Interactive Devices", "Virtual Reality", "Redirected Walking", "Auditory Distractors", "Virtual Environments", "Virtual Locomotion Interfaces", "Virtual Reality", "Walking In Place", "Joystick Input", "General Distractor Based RDW Framework", "Auditory Display", "Distractor Modalities", "Virtual Path Information", "Physical Path Information", "Simulator Sickness", "Auditory RDW", "Complex Navigational Tasks", "Legged Locomotion", "Distortion", "Navigation", "Task Analysis", "Visualization", "Virtual Environments", "Dogs", "Virtual Locomotion X 2014 Redirected Walking X 2014 Distractors" ], "authors": [ { "affiliation": "UNC, Chapel Hill", "fullName": "Nicholas Rewkowski", "givenName": "Nicholas", "surname": "Rewkowski", "__typename": "ArticleAuthorType" }, { "affiliation": "UNC, Chapel Hill", "fullName": "Atul Rungta", "givenName": "Atul", "surname": "Rungta", "__typename": "ArticleAuthorType" }, { "affiliation": "UNC, Chapel Hill", "fullName": "Mary Whitton", "givenName": "Mary", "surname": "Whitton", "__typename": "ArticleAuthorType" }, { "affiliation": "UMD College, Park UNC Chapel Hill", "fullName": "Ming Lin", "givenName": "Ming", "surname": "Lin", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "395-404", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797989", "articleId": "1cJ15zHucrC", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798016", "articleId": "1cJ14UH16nK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892279", "title": "Curvature gains in redirected walking: A closer look", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892279/12OmNBEGYJE", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892373", "title": "Application of redirected walking in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504742", "title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446479", "title": "Adopting the Roll Manipulation for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446225", "title": "Effect of Environment Size on Curvature Redirected Walking Thresholds", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a524", "title": "The Chaotic Behavior of Redirection &#x2013; Revisiting Simulations in Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10049511", "title": "Redirected Walking On Omnidirectional Treadmill", "doi": null, "abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a053", "title": "Redirected Walking Based on Historical User Walking Data", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794563", "title": "Estimation of Rotation Gain Thresholds Considering FOV, Gender, and Distractors", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794563/1dNHkjixhDi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a639", "title": "Visual-Auditory Redirection: Multimodal Integration of Incongruent Visual and Auditory Cues for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a639/1pysvxeFG4E", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0Uje3t8Q", "doi": "10.1109/VR.2019.8798255", "title": "HapticSphere: Physical Support To Enable Precision Touch Interaction in Mobile Mixed-Reality", "normalizedTitle": "HapticSphere: Physical Support To Enable Precision Touch Interaction in Mobile Mixed-Reality", "abstract": "This work presents HapticSphere, a wearable spherical surface enabled by bridging a finger and the head-mounted display (HMD) with a passive string. Users perceive a physical support on a finger attached to a string, when extending their arm and reaching out to the string's maximum extension. This physical support assists users in precise touch interaction in the context of stationary and walking virtual or mixed-reality experiences. We propose three methods of attachment of the haptic string (directly on the head or on the body), and illustrate a novel single-step calibration algorithm that supports these configurations by estimating a grand haptic sphere, once a head-coordinated touch interaction is established. Two user studies were conducted to validate our approach and to compare the touch performance with physical support in sitting and walking conditions in the context of mobile mixed-reality scenarios. The results show that, in the walking condition, touch interaction with physical support significantly outperformed the visual-only condition.", "abstracts": [ { "abstractType": "Regular", "content": "This work presents HapticSphere, a wearable spherical surface enabled by bridging a finger and the head-mounted display (HMD) with a passive string. Users perceive a physical support on a finger attached to a string, when extending their arm and reaching out to the string's maximum extension. This physical support assists users in precise touch interaction in the context of stationary and walking virtual or mixed-reality experiences. We propose three methods of attachment of the haptic string (directly on the head or on the body), and illustrate a novel single-step calibration algorithm that supports these configurations by estimating a grand haptic sphere, once a head-coordinated touch interaction is established. Two user studies were conducted to validate our approach and to compare the touch performance with physical support in sitting and walking conditions in the context of mobile mixed-reality scenarios. The results show that, in the walking condition, touch interaction with physical support significantly outperformed the visual-only condition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work presents HapticSphere, a wearable spherical surface enabled by bridging a finger and the head-mounted display (HMD) with a passive string. Users perceive a physical support on a finger attached to a string, when extending their arm and reaching out to the string's maximum extension. This physical support assists users in precise touch interaction in the context of stationary and walking virtual or mixed-reality experiences. We propose three methods of attachment of the haptic string (directly on the head or on the body), and illustrate a novel single-step calibration algorithm that supports these configurations by estimating a grand haptic sphere, once a head-coordinated touch interaction is established. Two user studies were conducted to validate our approach and to compare the touch performance with physical support in sitting and walking conditions in the context of mobile mixed-reality scenarios. The results show that, in the walking condition, touch interaction with physical support significantly outperformed the visual-only condition.", "fno": "08798255", "keywords": [ "Calibration", "Haptic Interfaces", "Helmet Mounted Displays", "Human Computer Interaction", "Mobile Computing", "Touch Sensitive Screens", "Virtual Reality", "Precision Touch Interaction", "Wearable Spherical Surface", "Head Mounted Display", "Passive String", "Mixed Reality Experiences", "Haptic String", "Head Coordinated Touch Interaction", "Touch Performance", "Mixed Reality Scenarios", "Grand Haptic Sphere", "Walking Conditions", "Sitting Onditions", "Resists", "Force", "Virtual Reality", "Force Feedback", "Neck", "Legged Locomotion", "Human Centered Computing X 2014 Visualization X 2014 Visualization Techniques X 2014 Treemaps", "Human Centered Computing X 2014 Visualization X 2014 Visualization Design And Evaluation Methods" ], "authors": [ { "affiliation": "National Chiao Tung University, Taiwan", "fullName": "Chiu-Hsuan Wang", "givenName": "Chiu-Hsuan", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "National Chiao Tung University, Taiwan", "fullName": "Chen-Yuan Hsieh", "givenName": "Chen-Yuan", "surname": "Hsieh", "__typename": "ArticleAuthorType" }, { "affiliation": "NTUST, Taiwan", "fullName": "Neng-Hao Yu", "givenName": "Neng-Hao", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "KAIST, Korea", "fullName": "Andrea Bianchi", "givenName": "Andrea", "surname": "Bianchi", "__typename": "ArticleAuthorType" }, { "affiliation": "National Chiao Tung University, Taiwan", "fullName": "Liwei Chan", "givenName": "Liwei", "surname": "Chan", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "331-339", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798205", "articleId": "1cJ1bY8RJIc", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798036", "articleId": "1cJ15HGOeqc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2015/6886/0/07131766", "title": "A multi-touch finger gesture based low-fatigue VR travel framework", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131766/12OmNzayNeN", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446563", "title": "Redirected Walking in Irregularly Shaped Physical Environments with Dynamic Obstacles", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446563/13bd1eW2l9A", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446300", "title": "Human Compensation Strategies for Orientation Drifts", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446300/13bd1fdV4lD", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446574", "title": "Cognitive and Touch Performance Effects of Mismatched 3D Physical and Visual Perceptions", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446574/13bd1sx4Zt2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08645818", "title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields", "doi": null, "abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09961901", "title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798260", "title": "Obstacles Awareness Methods from Occupancy Map for Free Walking in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798260/1cJ0GIFMerK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797740", "title": "Towards Utilizing Touch-sensitive Physical Keyboards for Text Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797740/1cJ196OGdJm", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a387", "title": "Evaluating Text Entry in Virtual Reality using a Touch-sensitive Physical Keyboard", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a387/1gyslQzq07K", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/02/09364750", "title": "Multi-Technique Redirected Walking Method", "doi": null, "abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1aPwr8l2", "doi": "10.1109/VR.2019.8798319", "title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces", "normalizedTitle": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces", "abstract": "Shifting from single-person experiences to multi-user interactions is an inevitable trend of virtual reality technology. Existing methods primarily address the problem of one- or two-user redirected walking and do not respond to additional challenges related to potential collisions among three or more users who are moving both virtually and physically. To apply redirected walking to multiple users who are immersed in virtual reality experiences, we present a novel algorithm of three-user redirected walking in shared physical spaces. In addition, we present the steps to apply three-user redirected walking to multiplayer VR scene, where the users are divided into different groups based on the users' motion states. Therefore, this strategy can be applied to each group to address the challenges of redirected walking when there are more than three users. The results show that sharing a space using our three-user redirected walking algorithm is completely feasible.", "abstracts": [ { "abstractType": "Regular", "content": "Shifting from single-person experiences to multi-user interactions is an inevitable trend of virtual reality technology. Existing methods primarily address the problem of one- or two-user redirected walking and do not respond to additional challenges related to potential collisions among three or more users who are moving both virtually and physically. To apply redirected walking to multiple users who are immersed in virtual reality experiences, we present a novel algorithm of three-user redirected walking in shared physical spaces. In addition, we present the steps to apply three-user redirected walking to multiplayer VR scene, where the users are divided into different groups based on the users' motion states. Therefore, this strategy can be applied to each group to address the challenges of redirected walking when there are more than three users. The results show that sharing a space using our three-user redirected walking algorithm is completely feasible.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Shifting from single-person experiences to multi-user interactions is an inevitable trend of virtual reality technology. Existing methods primarily address the problem of one- or two-user redirected walking and do not respond to additional challenges related to potential collisions among three or more users who are moving both virtually and physically. To apply redirected walking to multiple users who are immersed in virtual reality experiences, we present a novel algorithm of three-user redirected walking in shared physical spaces. In addition, we present the steps to apply three-user redirected walking to multiplayer VR scene, where the users are divided into different groups based on the users' motion states. Therefore, this strategy can be applied to each group to address the challenges of redirected walking when there are more than three users. The results show that sharing a space using our three-user redirected walking algorithm is completely feasible.", "fno": "08798319", "keywords": [ "User Interfaces", "Virtual Reality", "Three User Redirected Walking Algorithm", "Single Person Experiences", "Multiuser Interactions", "Virtual Reality Technology", "Two User Redirected Walking", "Virtual Reality Experiences", "Legged Locomotion", "Collision Avoidance", "Navigation", "Virtual Environments", "Computer Simulation", "Approximation Algorithms", "Virtual Reality", "Redirected Walking", "Multi User", "Collision Avoidance", "Locomotion", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems Virtual Reality", "I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques" ], "authors": [ { "affiliation": "Zhejiang University of Technology, China", "fullName": "Tianyang Dong", "givenName": "Tianyang", "surname": "Dong", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University of Technology, China", "fullName": "Yifan Song", "givenName": "Yifan", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University of Technology, China", "fullName": "Yuqi Shen", "givenName": "Yuqi", "surname": "Shen", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhejiang University of Technology, China", "fullName": "Jing Fan", "givenName": "Jing", "surname": "Fan", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "894-895", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08797918", "articleId": "1cJ14ZjqmCQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "08798344", "articleId": "1cJ0TtALobe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2013/4795/0/06549395", "title": "Flexible and general redirected walking for head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892235", "title": "An evaluation of strategies for two-user redirected walking in shared physical spaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892235/12OmNy87Qwg", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446579", "title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446167", "title": "Redirected Spaces: Going Beyond Borders", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07036075", "title": "Cognitive Resource Demands of Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09715721", "title": "Validating Simulation-Based Evaluation of Redirected Walking Systems", "doi": null, "abstractUrl": "/journal/tg/2022/05/09715721/1B4hxt06P9m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09961901", "title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049692", "title": "FREE-RDW: A Multi-user Redirected Walking Method for Supporting Non-forward Steps", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049692/1KYopXwY5Vu", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090595", "title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/02/09364750", "title": "Multi-Technique Redirected Walking Method", "doi": null, "abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ1eRI8HqE", "doi": "10.1109/VR.2019.8797763", "title": "Physiological Effectivity and User Experience of Immersive Gait Rehabilitation", "normalizedTitle": "Physiological Effectivity and User Experience of Immersive Gait Rehabilitation", "abstract": "Gait impairments from neurological injuries require repeated and exhaustive physical exercises for rehabilitation. Prolonged physical training in clinical environments can easily become frustrating and de-motivating for various reasons which in turn risks to decrease efficiency during the healing process. This paper introduces an immersive VR system for gait rehabilitation which targets user experience and increase of motivation while evoking comparable physiological responses needed for successful training effects. The system provides a virtual environment consisting of open fields, forest, mountains, waterfalls, animals, and a beach for inspiring strolls and is able to include a virtual trainer as a companion during the walks. We evaluated the ecological validity of the system with healthy subjects before performing the clinical trial. We assessed the system's target qualities with a longitudinal study with 45 healthy participants in three consecutive days in comparison to a baseline non-VR condition. The system was able to evoke similar physiological responses. The workload was increased for the VR condition but the system also elicited a higher enjoyment and motivation which was the main goal. The latter benefits slightly decreased over time (as did workload) while they were still higher than in the non-VR condition. The virtual trainer did not show to be beneficial, the corresponding implications are discussed. Overall, the approach shows promising results which renders the system a viable alternative for the given use case while it motivates interesting direction for future work.", "abstracts": [ { "abstractType": "Regular", "content": "Gait impairments from neurological injuries require repeated and exhaustive physical exercises for rehabilitation. Prolonged physical training in clinical environments can easily become frustrating and de-motivating for various reasons which in turn risks to decrease efficiency during the healing process. This paper introduces an immersive VR system for gait rehabilitation which targets user experience and increase of motivation while evoking comparable physiological responses needed for successful training effects. The system provides a virtual environment consisting of open fields, forest, mountains, waterfalls, animals, and a beach for inspiring strolls and is able to include a virtual trainer as a companion during the walks. We evaluated the ecological validity of the system with healthy subjects before performing the clinical trial. We assessed the system's target qualities with a longitudinal study with 45 healthy participants in three consecutive days in comparison to a baseline non-VR condition. The system was able to evoke similar physiological responses. The workload was increased for the VR condition but the system also elicited a higher enjoyment and motivation which was the main goal. The latter benefits slightly decreased over time (as did workload) while they were still higher than in the non-VR condition. The virtual trainer did not show to be beneficial, the corresponding implications are discussed. Overall, the approach shows promising results which renders the system a viable alternative for the given use case while it motivates interesting direction for future work.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Gait impairments from neurological injuries require repeated and exhaustive physical exercises for rehabilitation. Prolonged physical training in clinical environments can easily become frustrating and de-motivating for various reasons which in turn risks to decrease efficiency during the healing process. This paper introduces an immersive VR system for gait rehabilitation which targets user experience and increase of motivation while evoking comparable physiological responses needed for successful training effects. The system provides a virtual environment consisting of open fields, forest, mountains, waterfalls, animals, and a beach for inspiring strolls and is able to include a virtual trainer as a companion during the walks. We evaluated the ecological validity of the system with healthy subjects before performing the clinical trial. We assessed the system's target qualities with a longitudinal study with 45 healthy participants in three consecutive days in comparison to a baseline non-VR condition. The system was able to evoke similar physiological responses. The workload was increased for the VR condition but the system also elicited a higher enjoyment and motivation which was the main goal. The latter benefits slightly decreased over time (as did workload) while they were still higher than in the non-VR condition. The virtual trainer did not show to be beneficial, the corresponding implications are discussed. Overall, the approach shows promising results which renders the system a viable alternative for the given use case while it motivates interesting direction for future work.", "fno": "08797763", "keywords": [ "Gait Analysis", "Injuries", "Medical Computing", "Neurophysiology", "Patient Rehabilitation", "Virtual Reality", "Healing Process", "Immersive VR System", "User Experience", "Virtual Environment", "Virtual Trainer", "Clinical Trial", "Baseline Non VR Condition", "VR Condition", "Physiological Effectivity", "Immersive Gait Rehabilitation", "Gait Impairments", "Neurological Injuries", "Exhaustive Physical Exercises", "Prolonged Physical Training", "Clinical Environments", "Physiological Responses", "Ecological Validity", "Training", "Legged Locomotion", "Virtual Environments", "Stroke Medical Condition", "Task Analysis", "Forestry", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Virtual Reality" ], "authors": [ { "affiliation": "Human-Computer Interaction, Julius-Maximilians-Universität, Würzburg, Germany", "fullName": "Negin Hamzeheinejad", "givenName": "Negin", "surname": "Hamzeheinejad", "__typename": "ArticleAuthorType" }, { "affiliation": "Human-Computer Interaction, Julius-Maximilians-Universität, Würzburg, Germany", "fullName": "Daniel Roth", "givenName": "Daniel", "surname": "Roth", "__typename": "ArticleAuthorType" }, { "affiliation": "Human-Computer Interaction, Julius-Maximilians-Universität, Würzburg, Germany", "fullName": "Daniel Götz", "givenName": "Daniel", "surname": "Götz", "__typename": "ArticleAuthorType" }, { "affiliation": "Klinik Bavaria, Bad Kissingen, Germany", "fullName": "Franz Weilbach", "givenName": "Franz", "surname": "Weilbach", "__typename": "ArticleAuthorType" }, { "affiliation": "Human-Computer Interaction, Julius-Maximilians-Universität, Würzburg, Germany", "fullName": "Marc Erich Latoschik", "givenName": "Marc Erich", "surname": "Latoschik", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1421-1429", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798133", "articleId": "1cJ16mQ4Tw4", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797729", "articleId": "1cJ0R5bFWRW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2016/0842/0/07460072", "title": "Gaitzilla: A game to study the effects of virtual embodiment in gait rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460072/12OmNBqv2nN", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ngmast/2016/0949/0/07801465", "title": "Immersive Virtual Reality as a Supplement in the Rehabilitation Program of Post-Stroke Patients", "doi": null, "abstractUrl": "/proceedings-article/ngmast/2016/07801465/12OmNrMZpyR", "parentPublication": { "id": "proceedings/ngmast/2016/0949/0", "title": "2016 10th International Conference on Next-Generation Mobile Applications, Security and Technologies (NGMAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446432", "title": "Inducing Compensatory Changes in Gait Similar to External Perturbations Using an Immersive Head Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446432/13bd1fKQxs2", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446125", "title": "Immersive Robot-Assisted Virtual Reality Therapy for Neurologically-Caused Gait Impairments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446125/13bd1fWcuDE", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/i-span/2018/8534/0/853400a253", "title": "An Upper Extremity Rehabilitation System Using Virtual Reality Technology", "doi": null, "abstractUrl": "/proceedings-article/i-span/2018/853400a253/17D45WWzW5h", "parentPublication": { "id": "proceedings/i-span/2018/8534/0", "title": "2018 15th International Symposium on Pervasive Systems, Algorithms and Networks (I-SPAN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ai4i/2018/9209/0/08665677", "title": "Gait Recognition with Smart Devices Assisting Postoperative Rehabilitation in a Clinical Setting", "doi": null, "abstractUrl": "/proceedings-article/ai4i/2018/08665677/18qc12wasN2", "parentPublication": { "id": "proceedings/ai4i/2018/9209/0", "title": "2018 First International Conference on Artificial Intelligence for Industries (AI4I)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2017/4338/0/07917600", "title": "Measuring changes in gait and vehicle transfer ability during inpatient rehabilitation with wearable inertial sensors", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2017/07917600/19wAFWJqdEs", "parentPublication": { "id": "proceedings/percom-workshops/2017/4338/0", "title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797828", "title": "Immersive Virtual Reality and Gamification Within Procedurally Generated Environments to Increase Motivation During Gait Rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797828/1cJ13n6aEsE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797872", "title": "[DC] VR Simulation as a Motivator in Gait Rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797872/1cJ15Qs6tnG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a382", "title": "The Impact of Implicit and Explicit Feedback on Performance and Experience during VR-Supported Motor Rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a382/1tuAsJYxQ8E", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxV4itF", "title": "2017 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNApcu9b", "doi": "10.1109/VR.2017.7892335", "title": "Designing intentional impossible spaces in virtual reality narratives: A case study", "normalizedTitle": "Designing intentional impossible spaces in virtual reality narratives: A case study", "abstract": "Natural movement and locomotion in Virtual Environments (VE) is constrained by the user's immediate physical space. To overcome this obstacle, researchers have established the use of impossible spaces. This work illustrates how impossible spaces can be utilized to enhance the aesthetics of, and presence within, an interactive narrative. This is done by creating impossible spaces with a narrative intent. First, locomotion and impossible spaces in VR are surveyed; second, the benefits of using intentional impossible spaces from a narrative design perspective is presented; third, a VR narrative called Ares is put forth as a prototype; and fourth, a user study is explored. Impossible spaces with a narrative intent intertwines narratology with the world's aesthetics to enhance dramatic agency.", "abstracts": [ { "abstractType": "Regular", "content": "Natural movement and locomotion in Virtual Environments (VE) is constrained by the user's immediate physical space. To overcome this obstacle, researchers have established the use of impossible spaces. This work illustrates how impossible spaces can be utilized to enhance the aesthetics of, and presence within, an interactive narrative. This is done by creating impossible spaces with a narrative intent. First, locomotion and impossible spaces in VR are surveyed; second, the benefits of using intentional impossible spaces from a narrative design perspective is presented; third, a VR narrative called Ares is put forth as a prototype; and fourth, a user study is explored. Impossible spaces with a narrative intent intertwines narratology with the world's aesthetics to enhance dramatic agency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Natural movement and locomotion in Virtual Environments (VE) is constrained by the user's immediate physical space. To overcome this obstacle, researchers have established the use of impossible spaces. This work illustrates how impossible spaces can be utilized to enhance the aesthetics of, and presence within, an interactive narrative. This is done by creating impossible spaces with a narrative intent. First, locomotion and impossible spaces in VR are surveyed; second, the benefits of using intentional impossible spaces from a narrative design perspective is presented; third, a VR narrative called Ares is put forth as a prototype; and fourth, a user study is explored. Impossible spaces with a narrative intent intertwines narratology with the world's aesthetics to enhance dramatic agency.", "fno": "07892335", "keywords": [ "Virtual Environments", "Navigation", "Teleportation", "Geometry", "Buildings", "Computers", "Human Centered Computing", "Virtual Reality", "Interaction Design Theory", "Design" ], "authors": [ { "affiliation": "Georgia Institute of Technology, USA", "fullName": "Joshua A. Fisher", "givenName": "Joshua A.", "surname": "Fisher", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, USA", "fullName": "Amit Garg", "givenName": "Amit", "surname": "Garg", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, USA", "fullName": "Karan Pratap Singh", "givenName": "Karan Pratap", "surname": "Singh", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology, USA", "fullName": "Wesley Wang", "givenName": "Wesley", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-01-01T00:00:00", "pubType": "proceedings", "pages": "379-380", "year": "2017", "issn": "2375-5334", "isbn": "978-1-5090-6647-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07892334", "articleId": "12OmNxFaLDm", "__typename": "AdjacentArticleType" }, "next": { "fno": "07892336", "articleId": "12OmNyU63tX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2013/4795/0/06549386", "title": "Flexible spaces: A virtual step outside of reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549386/12OmNBOllfZ", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550194", "title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446167", "title": "Redirected Spaces: Going Beyond Borders", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/04/ttg2012040555", "title": "Impossible Spaces: Maximizing Natural Walking in Virtual Environments with Self-Overlapping Architecture", "doi": null, "abstractUrl": "/journal/tg/2012/04/ttg2012040555/13rRUygBwhF", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09737429", "title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness", "doi": null, "abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a622", "title": "Minimaps for Impossible Spaces: Improving Spatial Cognition in Self-Overlapping Virtual Rooms", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a622/1CJe2Dfxj8Y", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a501", "title": "Exploring Three-Dimensional Locomotion Techniques in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a501/1J7WrBbMYEg", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a530", "title": "The Evaluation of Gait-Free Locomotion Methods with Eye Movement in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a530/1J7WtHqguHu", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090660", "title": "Relative Room Size Judgments in Impossible Spaces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090660/1jIxqsUN6ik", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a389", "title": "Impossible Open Spaces: Exploring the Effects of Occlusion on the Noticeability of Self-Overlapping Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a389/1tnWx88Rxuw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxE2mWh", "title": "2013 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNBOllfZ", "doi": "10.1109/VR.2013.6549386", "title": "Flexible spaces: A virtual step outside of reality", "normalizedTitle": "Flexible spaces: A virtual step outside of reality", "abstract": "In this paper we introduce the concept of flexible spaces - a novel redirection technique that generalizes the use of overlapping (impossible) spaces and change blindness in an algorithm for dynamic layout generation. Flexible spaces is an impossible environment that violates the real world constancy in favor of providing the experience of seamless, unrestricted natural walking over a large-scale virtual environment (VE).", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we introduce the concept of flexible spaces - a novel redirection technique that generalizes the use of overlapping (impossible) spaces and change blindness in an algorithm for dynamic layout generation. Flexible spaces is an impossible environment that violates the real world constancy in favor of providing the experience of seamless, unrestricted natural walking over a large-scale virtual environment (VE).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we introduce the concept of flexible spaces - a novel redirection technique that generalizes the use of overlapping (impossible) spaces and change blindness in an algorithm for dynamic layout generation. Flexible spaces is an impossible environment that violates the real world constancy in favor of providing the experience of seamless, unrestricted natural walking over a large-scale virtual environment (VE).", "fno": "06549386", "keywords": [ "Legged Locomotion", "Layout", "Blindness", "Virtual Environments", "Navigation", "Educational Institutions", "Reorientation Techniques", "Virtual Reality", "Walking", "Locomotion" ], "authors": [ { "affiliation": null, "fullName": "Khrystyna Vasylevska", "givenName": "Khrystyna", "surname": "Vasylevska", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hannes Kaufmann", "givenName": "Hannes", "surname": "Kaufmann", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mark Bolas", "givenName": "Mark", "surname": "Bolas", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Evan A. Suma", "givenName": "Evan A.", "surname": "Suma", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "109-110", "year": "2013", "issn": "1087-8270", "isbn": "978-1-4673-4795-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06549385", "articleId": "12OmNyqzLWL", "__typename": "AdjacentArticleType" }, "next": { "fno": "06549387", "articleId": "12OmNCxL9TV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892335", "title": "Designing intentional impossible spaces in virtual reality narratives: A case study", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892335/12OmNApcu9b", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550194", "title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446167", "title": "Redirected Spaces: Going Beyond Borders", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a636", "title": "Heuristic Short-term Path Prediction for Spontaneous Human Locomotion in Virtual Open Spaces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a636/1CJcDwjfPvW", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a622", "title": "Minimaps for Impossible Spaces: Improving Spatial Cognition in Self-Overlapping Virtual Rooms", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a622/1CJe2Dfxj8Y", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09785918", "title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances", "doi": null, "abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09893374", "title": "A Segmented Redirection Mapping Method for Roadmaps of Large Constrained Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/5555/01/09893374/1GGLIh8KmSA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a554", "title": "Short-term Path Prediction for Spontaneous Human Locomotion in Arbitrary Virtual Spaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a554/1J7WabiAcYE", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090660", "title": "Relative Room Size Judgments in Impossible Spaces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090660/1jIxqsUN6ik", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a389", "title": "Impossible Open Spaces: Exploring the Effects of Occlusion on the Noticeability of Self-Overlapping Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a389/1tnWx88Rxuw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwl8GHU", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "acronym": "3dui", "groupId": "1001623", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNyFU75b", "doi": "10.1109/3DUI.2013.6550194", "title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments", "normalizedTitle": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments", "abstract": "Redirected walking techniques enable natural locomotion through immersive virtual environments (VEs) that are larger than the real world workspace. Most existing techniques rely upon manipulating the mapping between physical and virtual motions while the layout of the environment remains constant. However, if the primary focus of the experience is on the virtual world's content, rather than on its spatial layout, then the goal of redirected walking can be achieved through an entirely different strategy. In this paper, we introduce flexible spaces - a novel redirection technique that enables infinite real walking in virtual environments that do not require replication of real world layouts. Flexible spaces overcome the limitations and generalize the use of overlapping (impossible) spaces and change blindness by employing procedural layout generation. Our approach allows VE designers to focus on the content of the virtual world independent of the implementation details imposed by real walking, thereby making spatial manipulation techniques more practical for use in a variety of application domains.", "abstracts": [ { "abstractType": "Regular", "content": "Redirected walking techniques enable natural locomotion through immersive virtual environments (VEs) that are larger than the real world workspace. Most existing techniques rely upon manipulating the mapping between physical and virtual motions while the layout of the environment remains constant. However, if the primary focus of the experience is on the virtual world's content, rather than on its spatial layout, then the goal of redirected walking can be achieved through an entirely different strategy. In this paper, we introduce flexible spaces - a novel redirection technique that enables infinite real walking in virtual environments that do not require replication of real world layouts. Flexible spaces overcome the limitations and generalize the use of overlapping (impossible) spaces and change blindness by employing procedural layout generation. Our approach allows VE designers to focus on the content of the virtual world independent of the implementation details imposed by real walking, thereby making spatial manipulation techniques more practical for use in a variety of application domains.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Redirected walking techniques enable natural locomotion through immersive virtual environments (VEs) that are larger than the real world workspace. Most existing techniques rely upon manipulating the mapping between physical and virtual motions while the layout of the environment remains constant. However, if the primary focus of the experience is on the virtual world's content, rather than on its spatial layout, then the goal of redirected walking can be achieved through an entirely different strategy. In this paper, we introduce flexible spaces - a novel redirection technique that enables infinite real walking in virtual environments that do not require replication of real world layouts. Flexible spaces overcome the limitations and generalize the use of overlapping (impossible) spaces and change blindness by employing procedural layout generation. Our approach allows VE designers to focus on the content of the virtual world independent of the implementation details imposed by real walking, thereby making spatial manipulation techniques more practical for use in a variety of application domains.", "fno": "06550194", "keywords": [ "Legged Locomotion", "Layout", "Virtual Environments", "Navigation", "Blindness", "Heuristic Algorithms", "Tracking", "Reorientation Techniques", "Virtual Reality", "Walking", "Locomotion" ], "authors": [ { "affiliation": "Interactive Media Syst. Group, Vienna Univ. of Technol., Vienna, Austria", "fullName": "Khrystyna Vasylevska", "givenName": "Khrystyna", "surname": "Vasylevska", "__typename": "ArticleAuthorType" }, { "affiliation": "Interactive Media Syst. Group, Vienna Univ. of Technol., Vienna, Austria", "fullName": "Hannes Kaufmann", "givenName": "Hannes", "surname": "Kaufmann", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. for Creative Technol., Univ. of Southern California, Los Angeles, CA, USA", "fullName": "Mark Bolas", "givenName": "Mark", "surname": "Bolas", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. for Creative Technol., Univ. of Southern California, Los Angeles, CA, USA", "fullName": "Evan A. Suma", "givenName": "Evan A.", "surname": "Suma", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dui", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-03-01T00:00:00", "pubType": "proceedings", "pages": "39-42", "year": "2013", "issn": null, "isbn": "978-1-4673-6097-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06550193", "articleId": "12OmNAnMuyq", "__typename": "AdjacentArticleType" }, "next": { "fno": "06550195", "articleId": "12OmNzh5z5L", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2013/4795/0/06549386", "title": "Flexible spaces: A virtual step outside of reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549386/12OmNBOllfZ", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802053", "title": "An enhanced steering algorithm for redirected walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549395", "title": "Flexible and general redirected walking for head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08448288", "title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446579", "title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446167", "title": "Redirected Spaces: Going Beyond Borders", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07036075", "title": "Cognitive Resource Demands of Redirected Walking", "doi": null, "abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a622", "title": "Minimaps for Impossible Spaces: Improving Spatial Cognition in Self-Overlapping Virtual Rooms", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a622/1CJe2Dfxj8Y", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798319", "title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523832", "title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523832/1wpqjiNuSqY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgoia", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1fph1xv", "doi": "10.1109/VR.2018.8446167", "title": "Redirected Spaces: Going Beyond Borders", "normalizedTitle": "Redirected Spaces: Going Beyond Borders", "abstract": "Real walking in virtual reality (VR) is a promising locomotion technique since it offers multi-modal feedback to the user. Unfortunately, the virtual environment (VE) is limited by the available space in the physical world. So far, several techniques were developed to overcome this problem, e. g. redirected walking (RDW) and the use of impossible spaces. RDW subtly manipulates the viewpoint of the user to reorient her walking direction. Impossible spaces are based on subtle changes of the VE to reuse the same physical space for different virtual spaces. In this research demonstration, we show how these two approaches of redirected walking and impossible spaces can be combined. In particular, for our implementation we focus on the use of curved corridors that benefits both methods.", "abstracts": [ { "abstractType": "Regular", "content": "Real walking in virtual reality (VR) is a promising locomotion technique since it offers multi-modal feedback to the user. Unfortunately, the virtual environment (VE) is limited by the available space in the physical world. So far, several techniques were developed to overcome this problem, e. g. redirected walking (RDW) and the use of impossible spaces. RDW subtly manipulates the viewpoint of the user to reorient her walking direction. Impossible spaces are based on subtle changes of the VE to reuse the same physical space for different virtual spaces. In this research demonstration, we show how these two approaches of redirected walking and impossible spaces can be combined. In particular, for our implementation we focus on the use of curved corridors that benefits both methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Real walking in virtual reality (VR) is a promising locomotion technique since it offers multi-modal feedback to the user. Unfortunately, the virtual environment (VE) is limited by the available space in the physical world. So far, several techniques were developed to overcome this problem, e. g. redirected walking (RDW) and the use of impossible spaces. RDW subtly manipulates the viewpoint of the user to reorient her walking direction. Impossible spaces are based on subtle changes of the VE to reuse the same physical space for different virtual spaces. In this research demonstration, we show how these two approaches of redirected walking and impossible spaces can be combined. In particular, for our implementation we focus on the use of curved corridors that benefits both methods.", "fno": "08446167", "keywords": [ "Feedback", "Virtual Reality", "Redirected Walking", "Redirected Spaces", "Virtual Reality", "Multimodal Feedback", "Physical World", "RDW", "Walking Direction", "Physical Space", "Locomotion Technique", "Virtual Spaces", "Legged Locomotion", "Virtual Environments", "Visualization", "Electronic Mail", "Three Dimensional Displays", "User Interfaces", "Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality", "Human Centered Computing Human Computer Interaction HCI Interaction Techniques" ], "authors": [ { "affiliation": "Universität hamburg, Human-Computer Interaction", "fullName": "Eike Langbehn", "givenName": "Eike", "surname": "Langbehn", "__typename": "ArticleAuthorType" }, { "affiliation": "Universität hamburg, Human-Computer Interaction", "fullName": "Paul Lubos", "givenName": "Paul", "surname": "Lubos", "__typename": "ArticleAuthorType" }, { "affiliation": "Universität hamburg, Human-Computer Interaction", "fullName": "Frank Steinicke", "givenName": "Frank", "surname": "Steinicke", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-03-01T00:00:00", "pubType": "proceedings", "pages": "767-768", "year": "2018", "issn": null, "isbn": "978-1-5386-3365-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08446524", "articleId": "13bd1fdV4l2", "__typename": "AdjacentArticleType" }, "next": { "fno": "08446553", "articleId": "13bd1eOELLT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892373", "title": "Application of redirected walking in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446479", "title": "Adopting the Roll Manipulation for Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a115", "title": "Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a115/17D45WK5AlG", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a349", "title": "A Redirected Walking Toolkit for Exploring Large-Scale Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a349/1BLnzoFxHHy", "parentPublication": { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0", "title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a524", "title": "The Chaotic Behavior of Redirection &#x2013; Revisiting Simulations in Redirected Walking", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09785918", "title": "Redirected Walking for Exploring Immersive Virtual Spaces with HMD: A Comprehensive Review and Recent Advances", "doi": null, "abstractUrl": "/journal/tg/5555/01/09785918/1DPaEdHg6KQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a053", "title": "Redirected Walking Based on Historical User Walking Data", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798319", "title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089554", "title": "Shaking Hands in Virtual Space: Recovery in Redirected Walking for Direct Interaction between Two Users", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089554/1jIxaOIHjaw", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a184", "title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }