data
dict
{ "issue": { "id": "1BtbeKGFJzW", "title": "April", "year": "2022", "issueNum": "04", "idPrefix": "tp", "pubType": "journal", "volume": "44", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1ogEwfwfCjC", "doi": "10.1109/TPAMI.2020.3034267", "abstract": "Although generative adversarial networks (GANs) have made significant progress in face synthesis, there lacks enough understanding of what GANs have learned in the latent representation to map a random code to a photo-realistic image. In this work, we propose a framework called InterFaceGAN to interpret the disentangled face representation learned by the state-of-the-art GAN models and study the properties of the facial semantics encoded in the latent space. We first find that GANs learn various semantics in some linear subspaces of the latent space. After identifying these subspaces, we can realistically manipulate the corresponding facial attributes without retraining the model. We then conduct a detailed study on the correlation between different semantics and manage to better disentangle them via subspace projection, resulting in more precise control of the attribute manipulation. Besides manipulating the gender, age, expression, and presence of eyeglasses, we can even alter the face pose and fix the artifacts accidentally made by GANs. Furthermore, we perform an in-depth face identity analysis and a layer-wise analysis to evaluate the editing results quantitatively. Finally, we apply our approach to real face editing by employing GAN inversion approaches and explicitly training feed-forward models based on the synthetic data established by InterFaceGAN. Extensive experimental results suggest that learning to synthesize faces spontaneously brings a disentangled and controllable face representation.", "abstracts": [ { "abstractType": "Regular", "content": "Although generative adversarial networks (GANs) have made significant progress in face synthesis, there lacks enough understanding of what GANs have learned in the latent representation to map a random code to a photo-realistic image. In this work, we propose a framework called InterFaceGAN to interpret the disentangled face representation learned by the state-of-the-art GAN models and study the properties of the facial semantics encoded in the latent space. We first find that GANs learn various semantics in some linear subspaces of the latent space. After identifying these subspaces, we can realistically manipulate the corresponding facial attributes without retraining the model. We then conduct a detailed study on the correlation between different semantics and manage to better disentangle them via subspace projection, resulting in more precise control of the attribute manipulation. Besides manipulating the gender, age, expression, and presence of eyeglasses, we can even alter the face pose and fix the artifacts accidentally made by GANs. Furthermore, we perform an in-depth face identity analysis and a layer-wise analysis to evaluate the editing results quantitatively. Finally, we apply our approach to real face editing by employing GAN inversion approaches and explicitly training feed-forward models based on the synthetic data established by InterFaceGAN. Extensive experimental results suggest that learning to synthesize faces spontaneously brings a disentangled and controllable face representation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Although generative adversarial networks (GANs) have made significant progress in face synthesis, there lacks enough understanding of what GANs have learned in the latent representation to map a random code to a photo-realistic image. In this work, we propose a framework called InterFaceGAN to interpret the disentangled face representation learned by the state-of-the-art GAN models and study the properties of the facial semantics encoded in the latent space. We first find that GANs learn various semantics in some linear subspaces of the latent space. After identifying these subspaces, we can realistically manipulate the corresponding facial attributes without retraining the model. We then conduct a detailed study on the correlation between different semantics and manage to better disentangle them via subspace projection, resulting in more precise control of the attribute manipulation. Besides manipulating the gender, age, expression, and presence of eyeglasses, we can even alter the face pose and fix the artifacts accidentally made by GANs. Furthermore, we perform an in-depth face identity analysis and a layer-wise analysis to evaluate the editing results quantitatively. Finally, we apply our approach to real face editing by employing GAN inversion approaches and explicitly training feed-forward models based on the synthetic data established by InterFaceGAN. Extensive experimental results suggest that learning to synthesize faces spontaneously brings a disentangled and controllable face representation.", "title": "InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs", "normalizedTitle": "InterFaceGAN: Interpreting the Disentangled Face Representation Learned by GANs", "fno": "09241434", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Face Recognition", "Image Representation", "Learning Artificial Intelligence", "Neural Nets", "Realistic Images", "Inter Face GAN", "Disentangled Face Representation", "GAN", "Generative Adversarial Networks", "Face Synthesis", "Latent Representation", "Photo Realistic Image", "State Of The Art GAN Models", "Facial Semantics", "Latent Space", "Corresponding Facial Attributes", "Attribute Manipulation", "Face Pose", "In Depth Face Identity Analysis", "Face Editing", "GAN Inversion Approaches", "Controllable Face Representation", "Semantics", "Faces", "Gallium Nitride", "Generative Adversarial Networks", "Generators", "Aerospace Electronics", "Facial Features", "Generative Adversarial Network", "Face Editing", "Interpretability", "Explainable Artificial Intelligence", "Disentanglement" ], "authors": [ { "givenName": "Yujun", "surname": "Shen", "fullName": "Yujun Shen", "affiliation": "Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Ceyuan", "surname": "Yang", "fullName": "Ceyuan Yang", "affiliation": "Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaoou", "surname": "Tang", "fullName": "Xiaoou Tang", "affiliation": "Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Bolei", "surname": "Zhou", "fullName": "Bolei Zhou", "affiliation": "Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2022-04-01 00:00:00", "pubType": "trans", "pages": "2004-2018", "year": "2022", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457b283", "title": "Disentangled Representation Learning GAN for Pose-Invariant Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457b283/12OmNy314jC", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601a719", "title": "Structured GANs", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601a719/12OmNzaQomr", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545633", "title": "Facial Attribute Editing by Latent Space Adversarial Variational Autoencoders", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545633/17D45XfSEVf", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600c371", "title": "Face Synthesis and Recognition Using Disentangled Representation-Learning Wasserstein GAN", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600c371/1iTvl3KxDBS", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2020/3079/0/307900a283", "title": "Taking Control of Intra-class Variation in Conditional GANs Under Weak Supervision", "doi": null, "abstractUrl": "/proceedings-article/fg/2020/307900a283/1kecI6Mh9Sg", "parentPublication": { "id": "proceedings/fg/2020/3079/0/", "title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f153", "title": "Disentangled and Controllable Face Image Generation via 3D Imitative-Contrastive Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f153/1m3nE7TQglW", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j240", "title": "Interpreting the Latent Space of GANs for Semantic Face Editing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j240/1m3nRsHWYJa", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800g677", "title": "Cross-Domain Face Presentation Attack Detection via Multi-Domain Disentangled Representation Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800g677/1m3nzaJSkDu", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2020/9228/0/922800a647", "title": "Dialog Driven Face Construction using GANs", "doi": null, "abstractUrl": "/proceedings-article/ictai/2020/922800a647/1pP3uWLzXLq", "parentPublication": { "id": "proceedings/ictai/2020/9228/0", "title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ai/2021/01/09399843", "title": "Interpreting the Latent Space of GANs via Measuring Decoupling", "doi": null, "abstractUrl": "/journal/ai/2021/01/09399843/1sF3JUaqvAY", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09204804", "articleId": "1nmdPM6cDpm", "__typename": "AdjacentArticleType" }, "next": { "fno": "09214476", "articleId": "1nHNEVsfYTm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1BtbgXQkKiI", "name": "ttp202204-09241434s1-supp1-3034267.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202204-09241434s1-supp1-3034267.pdf", "extension": "pdf", "size": "1.17 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1J9y2mtpt3a", "title": "Jan.", "year": "2023", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1H2libIERoY", "doi": "10.1109/TVCG.2022.3209379", "abstract": "Music mood classification has been a challenging problem in comparison with other music classification problems (e.g., genre, composer, or period). One solution for addressing this challenge is to use an ensemble of machine learning models. In this paper, we show that visualization techniques can effectively convey the popular prediction as well as uncertainty at different music sections along the temporal axis while enabling the analysis of individual ML models in conjunction with their application to different musical data. In addition to the traditional visual designs, such as stacked line graph, ThemeRiver, and pixel-based visualization, we introduce a new variant of ThemeRiver, called “dual-flux ThemeRiver”, which allows viewers to observe and measure the most popular prediction more easily than stacked line graph and ThemeRiver. Together with pixel-based visualization, dual-flux ThemeRiver plots can also assist in model-development workflows, in addition to annotating music using ensemble model predictions.", "abstracts": [ { "abstractType": "Regular", "content": "Music mood classification has been a challenging problem in comparison with other music classification problems (e.g., genre, composer, or period). One solution for addressing this challenge is to use an ensemble of machine learning models. In this paper, we show that visualization techniques can effectively convey the popular prediction as well as uncertainty at different music sections along the temporal axis while enabling the analysis of individual ML models in conjunction with their application to different musical data. In addition to the traditional visual designs, such as stacked line graph, ThemeRiver, and pixel-based visualization, we introduce a new variant of ThemeRiver, called “dual-flux ThemeRiver”, which allows viewers to observe and measure the most popular prediction more easily than stacked line graph and ThemeRiver. Together with pixel-based visualization, dual-flux ThemeRiver plots can also assist in model-development workflows, in addition to annotating music using ensemble model predictions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Music mood classification has been a challenging problem in comparison with other music classification problems (e.g., genre, composer, or period). One solution for addressing this challenge is to use an ensemble of machine learning models. In this paper, we show that visualization techniques can effectively convey the popular prediction as well as uncertainty at different music sections along the temporal axis while enabling the analysis of individual ML models in conjunction with their application to different musical data. In addition to the traditional visual designs, such as stacked line graph, ThemeRiver, and pixel-based visualization, we introduce a new variant of ThemeRiver, called “dual-flux ThemeRiver”, which allows viewers to observe and measure the most popular prediction more easily than stacked line graph and ThemeRiver. Together with pixel-based visualization, dual-flux ThemeRiver plots can also assist in model-development workflows, in addition to annotating music using ensemble model predictions.", "title": "Visualizing Ensemble Predictions of Music Mood", "normalizedTitle": "Visualizing Ensemble Predictions of Music Mood", "fno": "09905423", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Learning Artificial Intelligence", "Music", "Composer", "Different Music Sections", "Different Musical Data", "Ensemble Model Predictions", "Ensemble Predictions", "Genre", "Individual ML Models", "Machine Learning Models", "Model Development Workflows", "Music Classification Problems", "Music Mood Classification", "Pixel Based Visualization", "Popular Prediction", "Stacked Line Graph", "Temporal Axis", "Theme River Plots", "Traditional Visual Designs", "Visualization Techniques", "Mood", "Data Visualization", "Visualization", "Predictive Models", "Data Models", "Machine Learning", "Uncertainty", "Time Series Visualization", "Ensemble Learning", "Music Mood Classification" ], "authors": [ { "givenName": "Zelin", "surname": "Ye", "fullName": "Zelin Ye", "affiliation": "University of Oxford, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Min", "surname": "Chen", "fullName": "Min Chen", "affiliation": "University of Oxford, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-01-01 00:00:00", "pubType": "trans", "pages": "864-874", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ncm/2009/3769/0/3769b485", "title": "Acquiring Mood Information from Songs in Large Music Database", "doi": null, "abstractUrl": "/proceedings-article/ncm/2009/3769b485/12OmNB836TX", "parentPublication": { "id": "proceedings/ncm/2009/3769/0", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ams/2012/4730/0/4730a007", "title": "Automatic Mood Classification Model for Indian Popular Music", "doi": null, "abstractUrl": "/proceedings-article/ams/2012/4730a007/12OmNqAU6DC", "parentPublication": { "id": "proceedings/ams/2012/4730/0", "title": "Asia International Conference on Modelling & Simulation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2015/8493/0/8493b241", "title": "Music Mood Classification via Deep Belief Network", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2015/8493b241/12OmNqNG3iO", "parentPublication": { "id": "proceedings/icdmw/2015/8493/0", "title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/5/3119e148", "title": "Discriminating Mood Taxonomy of Chinese Traditional Music and Western Classical Music with Content Feature Sets", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119e148/12OmNvSbBkI", "parentPublication": { "id": "proceedings/cisp/2008/3119/5", "title": "International Congress on Image and Signal Processing (CISP 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2014/5569/0/06970230", "title": "Cross-cultural mood regression for music digital libraries", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2014/06970230/12OmNwNwzFl", "parentPublication": { "id": "proceedings/jcdl/2014/5569/0", "title": "2014 IEEE/ACM Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/3/01394664", "title": "Graphical expression of the mood of music", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394664/12OmNwc3wBv", "parentPublication": { "id": "proceedings/icme/2004/8603/3", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmete/2016/3411/0/07938927", "title": "Mood Based Classification of Music by Analyzing Lyrical Data Using Text Mining", "doi": null, "abstractUrl": "/proceedings-article/icmete/2016/07938927/12OmNxwnct5", "parentPublication": { "id": "proceedings/icmete/2016/3411/0", "title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012116", "title": "Smoodi: Mood-based music recommendation player", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012116/12OmNyS6RHE", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a352", "title": "Visualizing the Semantics of Music", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a352/1cMFaBDrMKA", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/transai/2020/8699/0/869900a009", "title": "Play it again IMuCo! Music Composition to Match your Mood", "doi": null, "abstractUrl": "/proceedings-article/transai/2020/869900a009/1oJ0ts7x4jK", "parentPublication": { "id": "proceedings/transai/2020/8699/0", "title": "2020 Second International Conference on Transdisciplinary AI (TransAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09904444", "articleId": "1H0GdNRvIEE", "__typename": "AdjacentArticleType" }, "next": { "fno": "09904435", "articleId": "1H1ghDpBufu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1J9yPiWKkco", "name": "ttg202301-09905423s1-supp1-3209379.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09905423s1-supp1-3209379.pdf", "extension": "pdf", "size": "71.2 kB", "__typename": "WebExtraType" }, { "id": "1J9yOXKdWPC", "name": "ttg202301-09905423s1-supp2-3209379.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09905423s1-supp2-3209379.mp4", "extension": "mp4", "size": "5.96 MB", "__typename": "WebExtraType" }, { "id": "1J9yPc7wpgc", "name": "ttg202301-09905423s1-supp3-3209379.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09905423s1-supp3-3209379.mp4", "extension": "mp4", "size": "43.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1M2IpVB2R3i", "title": "May", "year": "2023", "issueNum": "05", "idPrefix": "tp", "pubType": "journal", "volume": "45", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1Gjwzjh5yhi", "doi": "10.1109/TPAMI.2022.3203516", "abstract": "Due to the rise of spherical cameras, monocular 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> depth estimation becomes an important technique for many applications (e.g., autonomous systems). Thus, state-of-the-art frameworks for monocular 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> depth estimation such as bi-projection fusion in BiFuse are proposed. To train such a framework, a large number of panoramas along with the corresponding depth ground truths captured by laser sensors are required, which highly increases the cost of data collection. Moreover, since such a data collection procedure is time-consuming, the scalability of extending these methods to different scenes becomes a challenge. To this end, self-training a network for monocular depth estimation from 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> videos is one way to alleviate this issue. However, there are no existing frameworks that incorporate bi-projection fusion into the self-training scheme, which highly limits the self-supervised performance since bi-projection fusion can leverage information from different projection types. In this paper, we propose BiFuse++ to explore the combination of bi-projection fusion and the self-training scenario. To be specific, we propose a new fusion module and Contrast-Aware Photometric Loss to improve the performance of BiFuse and increase the stability of self-training on real-world videos. We conduct both supervised and self-supervised experiments on benchmark datasets and achieve state-of-the-art performance.", "abstracts": [ { "abstractType": "Regular", "content": "Due to the rise of spherical cameras, monocular 360<inline-formula><tex-math notation=\"LaTeX\">$^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"wang-ieq1-3203516.gif\"/></alternatives></inline-formula> depth estimation becomes an important technique for many applications (e.g., autonomous systems). Thus, state-of-the-art frameworks for monocular 360<inline-formula><tex-math notation=\"LaTeX\">$^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"wang-ieq2-3203516.gif\"/></alternatives></inline-formula> depth estimation such as bi-projection fusion in BiFuse are proposed. To train such a framework, a large number of panoramas along with the corresponding depth ground truths captured by laser sensors are required, which highly increases the cost of data collection. Moreover, since such a data collection procedure is time-consuming, the scalability of extending these methods to different scenes becomes a challenge. To this end, self-training a network for monocular depth estimation from 360<inline-formula><tex-math notation=\"LaTeX\">$^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"wang-ieq3-3203516.gif\"/></alternatives></inline-formula> videos is one way to alleviate this issue. However, there are no existing frameworks that incorporate bi-projection fusion into the self-training scheme, which highly limits the self-supervised performance since bi-projection fusion can leverage information from different projection types. In this paper, we propose BiFuse++ to explore the combination of bi-projection fusion and the self-training scenario. To be specific, we propose a new fusion module and Contrast-Aware Photometric Loss to improve the performance of BiFuse and increase the stability of self-training on real-world videos. We conduct both supervised and self-supervised experiments on benchmark datasets and achieve state-of-the-art performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Due to the rise of spherical cameras, monocular 360- depth estimation becomes an important technique for many applications (e.g., autonomous systems). Thus, state-of-the-art frameworks for monocular 360- depth estimation such as bi-projection fusion in BiFuse are proposed. To train such a framework, a large number of panoramas along with the corresponding depth ground truths captured by laser sensors are required, which highly increases the cost of data collection. Moreover, since such a data collection procedure is time-consuming, the scalability of extending these methods to different scenes becomes a challenge. To this end, self-training a network for monocular depth estimation from 360- videos is one way to alleviate this issue. However, there are no existing frameworks that incorporate bi-projection fusion into the self-training scheme, which highly limits the self-supervised performance since bi-projection fusion can leverage information from different projection types. In this paper, we propose BiFuse++ to explore the combination of bi-projection fusion and the self-training scenario. To be specific, we propose a new fusion module and Contrast-Aware Photometric Loss to improve the performance of BiFuse and increase the stability of self-training on real-world videos. We conduct both supervised and self-supervised experiments on benchmark datasets and achieve state-of-the-art performance.", "title": "BiFuse++: Self-Supervised and Efficient Bi-Projection Fusion for 360&#x00B0; Depth Estimation", "normalizedTitle": "BiFuse++: Self-Supervised and Efficient Bi-Projection Fusion for 360° Depth Estimation", "fno": "09874253", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Image Fusion", "Image Motion Analysis", "Image Sensors", "Supervised Learning", "Video Signal Processing", "Bi Fuse", "Contrast Aware Photometric Loss", "Corresponding Depth Ground Truths", "Data Collection Procedure", "Different Projection Types", "Efficient Biprojection Fusion", "Incorporate Biprojection Fusion", "Laser Sensors", "Monocular 360 X 00 B 0 Depth Estimation", "Monocular Depth Estimation", "Spherical Cameras", "State Of The Art Frameworks", "Estimation", "Cameras", "Training", "Sensors", "Distortion", "Videos", "Neural Networks", "360", "Omnidirectional Vision", "Monocular Depth Estimation" ], "authors": [ { "givenName": "Fu-En", "surname": "Wang", "fullName": "Fu-En Wang", "affiliation": "Department of Electrical Engineering, National Tsing Hua University, Hsinchu, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Yu-Hsuan", "surname": "Yeh", "fullName": "Yu-Hsuan Yeh", "affiliation": "Department of Computer Science, National Yang Ming Chiao Tung University, Hsinchu, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Yi-Hsuan", "surname": "Tsai", "fullName": "Yi-Hsuan Tsai", "affiliation": "Phiar Technologies, Redwood City, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Wei-Chen", "surname": "Chiu", "fullName": "Wei-Chen Chiu", "affiliation": "Department of Computer Science, National Yang Ming Chiao Tung University, Hsinchu, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Min", "surname": "Sun", "fullName": "Min Sun", "affiliation": "Department of Electrical Engineering, National Tsing Hua University, Hsinchu, Taiwan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2023-05-01 00:00:00", "pubType": "trans", "pages": "5448-5460", "year": "2023", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/td/2022/12/09732663", "title": "Optimal Convex Hull Formation on a Grid by Asynchronous Robots With Lights", "doi": null, "abstractUrl": "/journal/td/2022/12/09732663/1BD8Qcr91gQ", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/08933488", "title": "Dynamic Voronoi Diagram for Moving Disks", "doi": null, "abstractUrl": "/journal/tg/2021/06/08933488/1fOf96QTKQE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/06/08976264", "title": "Algorithms for Inversion Mod &#x3C;inline-formula&#x3E;&#x3C;tex-math notation=&#x22;LaTeX&#x22;&#x3E;Z_$p^k$_Z&#x3C;/tex-math&#x3E;&#x3C;/inline-formula&#x3E;", "doi": null, "abstractUrl": "/journal/tc/2020/06/08976264/1h0W7qmGRHO", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/01/09039684", "title": "HyperMinHash: MinHash in LogLog Space", "doi": null, "abstractUrl": "/journal/tk/2022/01/09039684/1igS2G8DNfi", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/03/09185018", "title": "Fuzzy-Match Repair Guided by Quality Estimation", "doi": null, "abstractUrl": "/journal/tp/2022/03/09185018/1mNmWk2JvZ6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09321103", "title": "Perceptual Quality Assessment of Omnidirectional Images as Moving Camera Videos", "doi": null, "abstractUrl": "/journal/tg/2022/08/09321103/1qkwGXVtyQo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09497715", "title": "Spherical DNNs and Their Applications in 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> Images and Videos", "doi": null, "abstractUrl": "/journal/tp/2022/10/09497715/1vzY9kuYnwA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09541093", "title": "Learning Spherical Convolution for <inline-formula><tex-math notation=\"LaTeX\">Z_$360^{\\circ }$_Z</tex-math></inline-formula> Recognition", "doi": null, "abstractUrl": "/journal/tp/2022/11/09541093/1x3fMiX57S8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2023/05/09606582", "title": "HearFit<sup>+</sup>: Personalized Fitness Monitoring via Audio Signals on Smart Speakers", "doi": null, "abstractUrl": "/journal/tm/2023/05/09606582/1ymESBnYucM", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09672741", "title": "Multisensory 360&#x00B0; Videos Under Varying Resolution Levels Enhance Presence", "doi": null, "abstractUrl": "/journal/tg/2023/04/09672741/1zWzJCeaeGc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09912362", "articleId": "1HeiINuN2bm", "__typename": "AdjacentArticleType" }, "next": { "fno": "09870558", "articleId": "1GgcMrZ2dPi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1txPs9C3tok", "title": "June", "year": "2021", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1fOf96QTKQE", "doi": "10.1109/TVCG.2019.2959321", "abstract": "Voronoi diagrams are powerful for understanding spatial properties. However, few reports have been made for moving generators despite their important applications. We present a topology-oriented event-increment (TOI-E) algorithm for constructing a Voronoi diagram of moving circular disks in the plane over the time horizon <inline-formula><tex-math notation=\"LaTeX\">Z_$[0, t^{\\infty })$_Z</tex-math></inline-formula>. The proposed TOI-E algorithm computes the event history of the Voronoi diagram over the entire time horizon in <inline-formula><tex-math notation=\"LaTeX\">Z_$O(k_F \\log n + k_C n \\log n)$_Z</tex-math></inline-formula> time with <inline-formula><tex-math notation=\"LaTeX\">Z_$O(n \\log n)$_Z</tex-math></inline-formula> preprocessing time and <inline-formula><tex-math notation=\"LaTeX\">Z_$O(n + k_F + k_C)$_Z</tex-math></inline-formula> memory for <inline-formula><tex-math notation=\"LaTeX\">Z_$n$_Z</tex-math></inline-formula> disk generators, <inline-formula><tex-math notation=\"LaTeX\">Z_$k_F$_Z</tex-math></inline-formula> edge flips, and <inline-formula><tex-math notation=\"LaTeX\">Z_$k_C$_Z</tex-math></inline-formula> disk collisions during the time horizon. Given an event history, the Voronoi diagram of an arbitrary moment <inline-formula><tex-math notation=\"LaTeX\">Z_$t^{\\ast}&#x003C;t^{\\infty }$_Z</tex-math></inline-formula> can be constructed in <inline-formula><tex-math notation=\"LaTeX\">Z_$O(k^{\\ast} + n)$_Z</tex-math></inline-formula> time where <inline-formula><tex-math notation=\"LaTeX\">Z_$k^{\\ast}$_Z</tex-math></inline-formula> represents the number of events in <inline-formula><tex-math notation=\"LaTeX\">Z_$[0, t^{\\ast})$_Z</tex-math></inline-formula>. An example of the collision avoidance problem among moving disks is given by predicting future conjunctions among the disks using the proposed algorithm. Dynamic Voronoi diagrams will be very useful as a platform for the planning and management of the traffics of unmanned vehicles such as cars on street, vessels on surface, drones and airplanes in air, and satellites in geospace.", "abstracts": [ { "abstractType": "Regular", "content": "Voronoi diagrams are powerful for understanding spatial properties. However, few reports have been made for moving generators despite their important applications. We present a topology-oriented event-increment (TOI-E) algorithm for constructing a Voronoi diagram of moving circular disks in the plane over the time horizon <inline-formula><tex-math notation=\"LaTeX\">$[0, t^{\\infty })$</tex-math><alternatives><mml:math><mml:mrow><mml:mo>[</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:msup><mml:mi>t</mml:mi><mml:mi>&#x221E;</mml:mi></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq1-2959321.gif\"/></alternatives></inline-formula>. The proposed TOI-E algorithm computes the event history of the Voronoi diagram over the entire time horizon in <inline-formula><tex-math notation=\"LaTeX\">$O(k_F \\log n + k_C n \\log n)$</tex-math><alternatives><mml:math><mml:mrow><mml:mi>O</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mi>F</mml:mi></mml:msub><mml:mo form=\"prefix\">log</mml:mo><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mi>C</mml:mi></mml:msub><mml:mi>n</mml:mi><mml:mo form=\"prefix\">log</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq2-2959321.gif\"/></alternatives></inline-formula> time with <inline-formula><tex-math notation=\"LaTeX\">$O(n \\log n)$</tex-math><alternatives><mml:math><mml:mrow><mml:mi>O</mml:mi><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo form=\"prefix\">log</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq3-2959321.gif\"/></alternatives></inline-formula> preprocessing time and <inline-formula><tex-math notation=\"LaTeX\">$O(n + k_F + k_C)$</tex-math><alternatives><mml:math><mml:mrow><mml:mi>O</mml:mi><mml:mo>(</mml:mo><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mi>F</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>k</mml:mi><mml:mi>C</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq4-2959321.gif\"/></alternatives></inline-formula> memory for <inline-formula><tex-math notation=\"LaTeX\">$n$</tex-math><alternatives><mml:math><mml:mi>n</mml:mi></mml:math><inline-graphic xlink:href=\"kim-ieq5-2959321.gif\"/></alternatives></inline-formula> disk generators, <inline-formula><tex-math notation=\"LaTeX\">$k_F$</tex-math><alternatives><mml:math><mml:msub><mml:mi>k</mml:mi><mml:mi>F</mml:mi></mml:msub></mml:math><inline-graphic xlink:href=\"kim-ieq6-2959321.gif\"/></alternatives></inline-formula> edge flips, and <inline-formula><tex-math notation=\"LaTeX\">$k_C$</tex-math><alternatives><mml:math><mml:msub><mml:mi>k</mml:mi><mml:mi>C</mml:mi></mml:msub></mml:math><inline-graphic xlink:href=\"kim-ieq7-2959321.gif\"/></alternatives></inline-formula> disk collisions during the time horizon. Given an event history, the Voronoi diagram of an arbitrary moment <inline-formula><tex-math notation=\"LaTeX\">$t^{\\ast}&#x003C;t^{\\infty }$</tex-math><alternatives><mml:math><mml:mrow><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x002A;</mml:mo></mml:msup><mml:mo>&#x003C;</mml:mo><mml:msup><mml:mi>t</mml:mi><mml:mi>&#x221E;</mml:mi></mml:msup></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq8-2959321.gif\"/></alternatives></inline-formula> can be constructed in <inline-formula><tex-math notation=\"LaTeX\">$O(k^{\\ast} + n)$</tex-math><alternatives><mml:math><mml:mrow><mml:mi>O</mml:mi><mml:mo>(</mml:mo><mml:msup><mml:mi>k</mml:mi><mml:mo>&#x002A;</mml:mo></mml:msup><mml:mo>+</mml:mo><mml:mi>n</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq9-2959321.gif\"/></alternatives></inline-formula> time where <inline-formula><tex-math notation=\"LaTeX\">$k^{\\ast}$</tex-math><alternatives><mml:math><mml:msup><mml:mi>k</mml:mi><mml:mo>&#x002A;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"kim-ieq10-2959321.gif\"/></alternatives></inline-formula> represents the number of events in <inline-formula><tex-math notation=\"LaTeX\">$[0, t^{\\ast})$</tex-math><alternatives><mml:math><mml:mrow><mml:mo>[</mml:mo><mml:mn>0</mml:mn><mml:mo>,</mml:mo><mml:msup><mml:mi>t</mml:mi><mml:mo>&#x002A;</mml:mo></mml:msup><mml:mo>)</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"kim-ieq11-2959321.gif\"/></alternatives></inline-formula>. An example of the collision avoidance problem among moving disks is given by predicting future conjunctions among the disks using the proposed algorithm. Dynamic Voronoi diagrams will be very useful as a platform for the planning and management of the traffics of unmanned vehicles such as cars on street, vessels on surface, drones and airplanes in air, and satellites in geospace.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Voronoi diagrams are powerful for understanding spatial properties. However, few reports have been made for moving generators despite their important applications. We present a topology-oriented event-increment (TOI-E) algorithm for constructing a Voronoi diagram of moving circular disks in the plane over the time horizon -. The proposed TOI-E algorithm computes the event history of the Voronoi diagram over the entire time horizon in - time with - preprocessing time and - memory for - disk generators, - edge flips, and - disk collisions during the time horizon. Given an event history, the Voronoi diagram of an arbitrary moment - can be constructed in - time where - represents the number of events in -. An example of the collision avoidance problem among moving disks is given by predicting future conjunctions among the disks using the proposed algorithm. Dynamic Voronoi diagrams will be very useful as a platform for the planning and management of the traffics of unmanned vehicles such as cars on street, vessels on surface, drones and airplanes in air, and satellites in geospace.", "title": "Dynamic Voronoi Diagram for Moving Disks", "normalizedTitle": "Dynamic Voronoi Diagram for Moving Disks", "fno": "08933488", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Drones", "Heuristic Algorithms", "Vehicle Dynamics", "Generators", "Prediction Algorithms", "Probes", "Collision Avoidance", "Unmanned Vehicles", "Moving Vehicles", "Path Planning", "Collision Avoidance", "Topology Event", "Weighted Voronoi Diagram" ], "authors": [ { "givenName": "Chanyoung", "surname": "Song", "fullName": "Chanyoung Song", "affiliation": "School of Mechanical Engineering, Hanyang Univeristy, Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Jehyun", "surname": "Cha", "fullName": "Jehyun Cha", "affiliation": "School of Mechanical Engineering, Hanyang Univeristy, Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Mokwon", "surname": "Lee", "fullName": "Mokwon Lee", "affiliation": "School of Mechanical Engineering, Hanyang Univeristy, Seoul, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Deok-Soo", "surname": "Kim", "fullName": "Deok-Soo Kim", "affiliation": "Voronoi Diagram Research Center and HYU-HPSTAR-CIS Global High Pressure Research Center and School of Mechanical Engineering, Hanyang Univeristy, Seoul, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2021-06-01 00:00:00", "pubType": "trans", "pages": "2923-2940", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tb/2019/06/08371302", "title": "Efficient Algorithms for Finding the Closest <inline-formula><tex-math notation=\"LaTeX\">Z_$l$_Z</tex-math></inline-formula>-Mers in Biological Data", "doi": null, "abstractUrl": "/journal/tb/2019/06/08371302/13rRUxlgyai", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2019/04/08468118", "title": "Online Job Scheduling with Redundancy and Opportunistic Checkpointing: A Speedup-Function-Based Analysis", "doi": null, "abstractUrl": "/journal/td/2019/04/08468118/18l6N8lHxpS", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/05/09712197", "title": "Fast LDP-MST: An Efficient Density-Peak-Based Clustering Method for Large-Size Datasets", "doi": null, "abstractUrl": "/journal/tk/2023/05/09712197/1AUkecqbRok", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/06/08976264", "title": "Algorithms for Inversion Mod &#x3C;inline-formula&#x3E;&#x3C;tex-math notation=&#x22;LaTeX&#x22;&#x3E;Z_$p^k$_Z&#x3C;/tex-math&#x3E;&#x3C;/inline-formula&#x3E;", "doi": null, "abstractUrl": "/journal/tc/2020/06/08976264/1h0W7qmGRHO", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/01/09037115", "title": "Aligning Points to Lines: Provable Approximations", "doi": null, "abstractUrl": "/journal/tk/2022/01/09037115/1igMO6tI3Is", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/04/09121771", "title": "Continuous Monitoring of Maximum Clique Over Dynamic Graphs", "doi": null, "abstractUrl": "/journal/tk/2022/04/09121771/1kMT4CxqinC", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/07/09199134", "title": "Computing K-Cores in Large Uncertain Graphs: An Index-Based Optimal Approach", "doi": null, "abstractUrl": "/journal/tk/2022/07/09199134/1naBq7vTUIw", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2022/06/09242286", "title": "Wireless Powered Mobile Edge Computing: Dynamic Resource Allocation and Throughput Maximization", "doi": null, "abstractUrl": "/journal/tm/2022/06/09242286/1oijtIm7Ecg", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/08/09244575", "title": "Distributed Density Peaks Clustering Revisited", "doi": null, "abstractUrl": "/journal/tk/2022/08/09244575/1ojYk1yEY1i", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09650532", "title": "A Variational Framework for Curve Shortening in Various Geometric Domains", "doi": null, "abstractUrl": "/journal/tg/2023/04/09650532/1zkoVsoJeow", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09349198", "articleId": "1qYmbJluuBi", "__typename": "AdjacentArticleType" }, "next": { "fno": "08930077", "articleId": "1fCCO10cYW4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1txPFLtNApW", "name": "ttg202106-08933488s1-tvcg-2959321-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202106-08933488s1-tvcg-2959321-mm.zip", "extension": "zip", "size": "82.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1ECXHMu0OWc", "title": "Aug.", "year": "2022", "issueNum": "08", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1qkwGXVtyQo", "doi": "10.1109/TVCG.2021.3050888", "abstract": "Omnidirectional images (also referred to as static 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^{\\circ }$_Z</tex-math></inline-formula> panoramas) impose viewing conditions much different from those of regular 2D images. How do humans perceive image distortions in immersive virtual reality (VR) environments is an important problem which receives less attention. We argue that, apart from the distorted panorama itself, two types of VR viewing conditions are crucial in determining the viewing behaviors of users and the perceived quality of the panorama: the starting point and the exploration time. We first carry out a psychophysical experiment to investigate the interplay among the VR viewing conditions, the user viewing behaviors, and the perceived quality of 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^{\\circ }$_Z</tex-math></inline-formula> images. Then, we provide a thorough analysis of the collected human data, leading to several interesting findings. Moreover, we propose a computational framework for objective quality assessment of 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^{\\circ }$_Z</tex-math></inline-formula> images, embodying viewing conditions and behaviors in a delightful way. Specifically, we first transform an omnidirectional image to several video representations using different user viewing behaviors under different viewing conditions. We then leverage advanced 2D full-reference video quality models to compute the perceived quality. We construct a set of specific quality measures within the proposed framework, and demonstrate their promises on three VR quality databases.", "abstracts": [ { "abstractType": "Regular", "content": "Omnidirectional images (also referred to as static 360<inline-formula><tex-math notation=\"LaTeX\">$^{\\circ }$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"fang-ieq1-3050888.gif\"/></alternatives></inline-formula> panoramas) impose viewing conditions much different from those of regular 2D images. How do humans perceive image distortions in immersive virtual reality (VR) environments is an important problem which receives less attention. We argue that, apart from the distorted panorama itself, two types of VR viewing conditions are crucial in determining the viewing behaviors of users and the perceived quality of the panorama: the starting point and the exploration time. We first carry out a psychophysical experiment to investigate the interplay among the VR viewing conditions, the user viewing behaviors, and the perceived quality of 360<inline-formula><tex-math notation=\"LaTeX\">$^{\\circ }$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"fang-ieq2-3050888.gif\"/></alternatives></inline-formula> images. Then, we provide a thorough analysis of the collected human data, leading to several interesting findings. Moreover, we propose a computational framework for objective quality assessment of 360<inline-formula><tex-math notation=\"LaTeX\">$^{\\circ }$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"fang-ieq3-3050888.gif\"/></alternatives></inline-formula> images, embodying viewing conditions and behaviors in a delightful way. Specifically, we first transform an omnidirectional image to several video representations using different user viewing behaviors under different viewing conditions. We then leverage advanced 2D full-reference video quality models to compute the perceived quality. We construct a set of specific quality measures within the proposed framework, and demonstrate their promises on three VR quality databases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Omnidirectional images (also referred to as static 360- panoramas) impose viewing conditions much different from those of regular 2D images. How do humans perceive image distortions in immersive virtual reality (VR) environments is an important problem which receives less attention. We argue that, apart from the distorted panorama itself, two types of VR viewing conditions are crucial in determining the viewing behaviors of users and the perceived quality of the panorama: the starting point and the exploration time. We first carry out a psychophysical experiment to investigate the interplay among the VR viewing conditions, the user viewing behaviors, and the perceived quality of 360- images. Then, we provide a thorough analysis of the collected human data, leading to several interesting findings. Moreover, we propose a computational framework for objective quality assessment of 360- images, embodying viewing conditions and behaviors in a delightful way. Specifically, we first transform an omnidirectional image to several video representations using different user viewing behaviors under different viewing conditions. We then leverage advanced 2D full-reference video quality models to compute the perceived quality. We construct a set of specific quality measures within the proposed framework, and demonstrate their promises on three VR quality databases.", "title": "Perceptual Quality Assessment of Omnidirectional Images as Moving Camera Videos", "normalizedTitle": "Perceptual Quality Assessment of Omnidirectional Images as Moving Camera Videos", "fno": "09321103", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Video Coding", "Video Signal Processing", "Virtual Reality", "Visual Perception", "Perceptual Quality Assessment", "Omnidirectional Image", "Camera Videos", "Static 360 X 00 B 0 Panoramas", "Regular 2 D Images", "Image Distortions", "Immersive Virtual Reality Environments", "Distorted Panorama", "VR Viewing Conditions", "Perceived Quality", "User Viewing Behaviors", "Collected Human Data", "Objective Quality Assessment", "Different User Viewing", "Different Viewing Conditions", "Full Reference Video Quality Models", "Specific Quality Measures", "VR Quality Databases", "Two Dimensional Displays", "Quality Assessment", "Distortion", "Videos", "Image Coding", "Computational Modeling", "Visualization", "Omnidirectional Images", "Perceptual Quality Assessment", "Virtual Reality" ], "authors": [ { "givenName": "Xiangjie", "surname": "Sui", "fullName": "Xiangjie Sui", "affiliation": "School of Information Management, Jiangxi University of Finance and Economics, Nanchang, Jiangxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Kede", "surname": "Ma", "fullName": "Kede Ma", "affiliation": "Department of Computer Science, City University of Hong Kong, Kowloon, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Yiru", "surname": "Yao", "fullName": "Yiru Yao", "affiliation": "School of Information Management, Jiangxi University of Finance and Economics, Nanchang, Jiangxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yuming", "surname": "Fang", "fullName": "Yuming Fang", "affiliation": "School of Information Management, Jiangxi University of Finance and Economics, Nanchang, Jiangxi, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2022-08-01 00:00:00", "pubType": "trans", "pages": "3022-3034", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/td/2022/12/09732663", "title": "Optimal Convex Hull Formation on a Grid by Asynchronous Robots With Lights", "doi": null, "abstractUrl": "/journal/td/2022/12/09732663/1BD8Qcr91gQ", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/05/09874253", "title": "BiFuse++: Self-Supervised and Efficient Bi-Projection Fusion for 360&#x00B0; Depth Estimation", "doi": null, "abstractUrl": "/journal/tp/2023/05/09874253/1Gjwzjh5yhi", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/08933488", "title": "Dynamic Voronoi Diagram for Moving Disks", "doi": null, "abstractUrl": "/journal/tg/2021/06/08933488/1fOf96QTKQE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/06/08976264", "title": "Algorithms for Inversion Mod &#x3C;inline-formula&#x3E;&#x3C;tex-math notation=&#x22;LaTeX&#x22;&#x3E;Z_$p^k$_Z&#x3C;/tex-math&#x3E;&#x3C;/inline-formula&#x3E;", "doi": null, "abstractUrl": "/journal/tc/2020/06/08976264/1h0W7qmGRHO", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/01/09037115", "title": "Aligning Points to Lines: Provable Approximations", "doi": null, "abstractUrl": "/journal/tk/2022/01/09037115/1igMO6tI3Is", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/03/09185018", "title": "Fuzzy-Match Repair Guided by Quality Estimation", "doi": null, "abstractUrl": "/journal/tp/2022/03/09185018/1mNmWk2JvZ6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09497715", "title": "Spherical DNNs and Their Applications in 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> Images and Videos", "doi": null, "abstractUrl": "/journal/tp/2022/10/09497715/1vzY9kuYnwA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/03/09525250", "title": "Fast Reachability Queries Answering Based on <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathsf{RCN}$_Z</tex-math></inline-formula> Reduction", "doi": null, "abstractUrl": "/journal/tk/2023/03/09525250/1wuoOp439OU", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09541093", "title": "Learning Spherical Convolution for <inline-formula><tex-math notation=\"LaTeX\">Z_$360^{\\circ }$_Z</tex-math></inline-formula> Recognition", "doi": null, "abstractUrl": "/journal/tp/2022/11/09541093/1x3fMiX57S8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09672741", "title": "Multisensory 360&#x00B0; Videos Under Varying Resolution Levels Enhance Presence", "doi": null, "abstractUrl": "/journal/tg/2023/04/09672741/1zWzJCeaeGc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09286680", "articleId": "1por35qBdQs", "__typename": "AdjacentArticleType" }, "next": { "fno": "09293401", "articleId": "1pyonpfZjoY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1GF6jMpqNjy", "title": "Oct.", "year": "2022", "issueNum": "10", "idPrefix": "tp", "pubType": "journal", "volume": "44", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1vzY9kuYnwA", "doi": "10.1109/TPAMI.2021.3100259", "abstract": "Spherical images or videos, as typical non-euclidean data, are usually stored in the form of 2D panoramas obtained through an equirectangular projection, which is neither equal area nor conformal. The distortion caused by the projection limits the performance of vanilla Deep Neural Networks (DNNs) designed for traditional euclidean data. In this paper, we design a novel Spherical Deep Neural Network (DNN) to deal with the distortion caused by the equirectangular projection. Specifically, we customize a set of components, including a spherical convolution, a spherical pooling, a spherical ConvLSTM cell and a spherical MSE loss, as the replacements of their counterparts in vanilla DNNs for spherical data. The core idea is to change the identical behavior of the conventional operations in vanilla DNNs across different feature patches so that they will be adjusted to the distortion caused by the variance of sampling rate among different feature patches. We demonstrate the effectiveness of our Spherical DNNs for saliency detection and gaze estimation in <inline-formula><tex-math notation=\"LaTeX\">Z_$360^\\circ$_Z</tex-math></inline-formula> videos. For saliency detection, we take the temporal coherence of an observer&#x2019;s viewing process into consideration and propose to use a Spherical U-Net and a Spherical ConvLSTM to predict the saliency maps for each frame sequentially. As for gaze prediction, we propose to leverage a Spherical Encoder Module to extract spatial panoramic features, then we combine them with the gaze trajectory feature extracted by an LSTM for future gaze prediction. To facilitate the study of the <inline-formula><tex-math notation=\"LaTeX\">Z_$360^\\circ$_Z</tex-math></inline-formula> videos saliency detection, we further construct a large-scale <inline-formula><tex-math notation=\"LaTeX\">Z_$360^\\circ$_Z</tex-math></inline-formula> video saliency detection dataset that consists of 104 <inline-formula><tex-math notation=\"LaTeX\">Z_$360^\\circ$_Z</tex-math></inline-formula> videos viewed by 20+ human subjects. Comprehensive experiments validate the effectiveness of our proposed Spherical DNNs for 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> handwritten digit classification and sport classification, saliency detection and gaze tracking in <inline-formula><tex-math notation=\"LaTeX\">Z_$360^\\circ$_Z</tex-math></inline-formula> videos. We also visualize the regions contributing to the classification decisions in our proposed Spherical DNNs via the Grad-CAM technique in the classification task, and the results show that our Spherical DNNs constantly leverage reasonable and important regions for decision making, regardless the large distortions. All codes and dataset are available on <uri>https://github.com/svip-lab/SphericalDNNs</uri>.", "abstracts": [ { "abstractType": "Regular", "content": "Spherical images or videos, as typical non-euclidean data, are usually stored in the form of 2D panoramas obtained through an equirectangular projection, which is neither equal area nor conformal. The distortion caused by the projection limits the performance of vanilla Deep Neural Networks (DNNs) designed for traditional euclidean data. In this paper, we design a novel Spherical Deep Neural Network (DNN) to deal with the distortion caused by the equirectangular projection. Specifically, we customize a set of components, including a spherical convolution, a spherical pooling, a spherical ConvLSTM cell and a spherical MSE loss, as the replacements of their counterparts in vanilla DNNs for spherical data. The core idea is to change the identical behavior of the conventional operations in vanilla DNNs across different feature patches so that they will be adjusted to the distortion caused by the variance of sampling rate among different feature patches. We demonstrate the effectiveness of our Spherical DNNs for saliency detection and gaze estimation in <inline-formula><tex-math notation=\"LaTeX\">$360^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mn>360</mml:mn><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"xu-ieq2-3100259.gif\"/></alternatives></inline-formula> videos. For saliency detection, we take the temporal coherence of an observer&#x2019;s viewing process into consideration and propose to use a Spherical U-Net and a Spherical ConvLSTM to predict the saliency maps for each frame sequentially. As for gaze prediction, we propose to leverage a Spherical Encoder Module to extract spatial panoramic features, then we combine them with the gaze trajectory feature extracted by an LSTM for future gaze prediction. To facilitate the study of the <inline-formula><tex-math notation=\"LaTeX\">$360^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mn>360</mml:mn><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"xu-ieq3-3100259.gif\"/></alternatives></inline-formula> videos saliency detection, we further construct a large-scale <inline-formula><tex-math notation=\"LaTeX\">$360^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mn>360</mml:mn><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"xu-ieq4-3100259.gif\"/></alternatives></inline-formula> video saliency detection dataset that consists of 104 <inline-formula><tex-math notation=\"LaTeX\">$360^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mn>360</mml:mn><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"xu-ieq5-3100259.gif\"/></alternatives></inline-formula> videos viewed by 20+ human subjects. Comprehensive experiments validate the effectiveness of our proposed Spherical DNNs for 360<inline-formula><tex-math notation=\"LaTeX\">$^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"xu-ieq6-3100259.gif\"/></alternatives></inline-formula> handwritten digit classification and sport classification, saliency detection and gaze tracking in <inline-formula><tex-math notation=\"LaTeX\">$360^\\circ$</tex-math><alternatives><mml:math><mml:msup><mml:mn>360</mml:mn><mml:mo>&#x2218;</mml:mo></mml:msup></mml:math><inline-graphic xlink:href=\"xu-ieq7-3100259.gif\"/></alternatives></inline-formula> videos. We also visualize the regions contributing to the classification decisions in our proposed Spherical DNNs via the Grad-CAM technique in the classification task, and the results show that our Spherical DNNs constantly leverage reasonable and important regions for decision making, regardless the large distortions. All codes and dataset are available on <uri>https://github.com/svip-lab/SphericalDNNs</uri>.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Spherical images or videos, as typical non-euclidean data, are usually stored in the form of 2D panoramas obtained through an equirectangular projection, which is neither equal area nor conformal. The distortion caused by the projection limits the performance of vanilla Deep Neural Networks (DNNs) designed for traditional euclidean data. In this paper, we design a novel Spherical Deep Neural Network (DNN) to deal with the distortion caused by the equirectangular projection. Specifically, we customize a set of components, including a spherical convolution, a spherical pooling, a spherical ConvLSTM cell and a spherical MSE loss, as the replacements of their counterparts in vanilla DNNs for spherical data. The core idea is to change the identical behavior of the conventional operations in vanilla DNNs across different feature patches so that they will be adjusted to the distortion caused by the variance of sampling rate among different feature patches. We demonstrate the effectiveness of our Spherical DNNs for saliency detection and gaze estimation in - videos. For saliency detection, we take the temporal coherence of an observer’s viewing process into consideration and propose to use a Spherical U-Net and a Spherical ConvLSTM to predict the saliency maps for each frame sequentially. As for gaze prediction, we propose to leverage a Spherical Encoder Module to extract spatial panoramic features, then we combine them with the gaze trajectory feature extracted by an LSTM for future gaze prediction. To facilitate the study of the - videos saliency detection, we further construct a large-scale - video saliency detection dataset that consists of 104 - videos viewed by 20+ human subjects. Comprehensive experiments validate the effectiveness of our proposed Spherical DNNs for 360- handwritten digit classification and sport classification, saliency detection and gaze tracking in - videos. We also visualize the regions contributing to the classification decisions in our proposed Spherical DNNs via the Grad-CAM technique in the classification task, and the results show that our Spherical DNNs constantly leverage reasonable and important regions for decision making, regardless the large distortions. All codes and dataset are available on https://github.com/svip-lab/SphericalDNNs.", "title": "Spherical DNNs and Their Applications in 360<inline-formula><tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math></inline-formula> Images and Videos", "normalizedTitle": "Spherical DNNs and Their Applications in 360- Images and Videos", "fno": "09497715", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Deep Learning Artificial Intelligence", "Feature Extraction", "Image Classification", "Image Colour Analysis", "Image Motion Analysis", "Image Sequences", "Image Texture", "Object Detection", "Recurrent Neural Nets", "Video Signal Processing", "Vanilla DN Ns", "Feature Patches", "Large Scale 360 X 00 B 0 Video Saliency Detection Dataset", "Noneuclidean Data", "Equirectangular Projection", "Vanilla Deep Neural Networks", "Spherical Convolution", "Spherical Pooling", "Spherical Conv LSTM Cell", "Spherical MSE Loss", "Spherical DN Ns", "Spherical Encoder Module", "Spherical Images", "Spherical Deep Neural Network", "Gaze Trajectory Feature Extraction", "Spatial Panoramic Features", "Videos", "Saliency Detection", "Distortion", "Convolution", "Task Analysis", "Feature Extraction", "Kernel", "Spherical Deep Neural Networks", "Saliency Detection", "Gaze Prediction", "360<named-content xmlns:xlink=\"http://www.w3.org/1999/xlink\" xmlns:ali=\"http://www.niso.org/schemas/ali/1.0/\" xmlns:mml=\"http://www.w3.org/1998/Math/MathML\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" content-type=\"math\" xlink:type=\"simple\"> <inline-formula> <tex-math notation=\"LaTeX\">Z_$^\\circ$_Z</tex-math> </inline-formula> </named-content> videos" ], "authors": [ { "givenName": "Yanyu", "surname": "Xu", "fullName": "Yanyu Xu", "affiliation": "Institute of High Performance Computing (IHPC), ASTAR, Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Ziheng", "surname": "Zhang", "fullName": "Ziheng Zhang", "affiliation": "AI-Prime Co., Ltd, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shenghua", "surname": "Gao", "fullName": "Shenghua Gao", "affiliation": "Shanghai Engineering Research Center of Intelligent Vision and Imaging, and Shanghai Engineering Research Center of Energy Efficient and Custom AI IC, ShanghaiTech University, Shanghai, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2022-10-01 00:00:00", "pubType": "trans", "pages": "7235-7252", "year": "2022", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2022/06/09723546", "title": "Multicriteria Scalable Graph Drawing via Stochastic Gradient Descent, <inline-formula><tex-math notation=\"LaTeX\">Z_$(SGD)^{2}$_Z</tex-math></inline-formula>", "doi": null, "abstractUrl": "/journal/tg/2022/06/09723546/1BocJwdaFYk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/06/08976264", "title": "Algorithms for Inversion Mod &#x3C;inline-formula&#x3E;&#x3C;tex-math notation=&#x22;LaTeX&#x22;&#x3E;Z_$p^k$_Z&#x3C;/tex-math&#x3E;&#x3C;/inline-formula&#x3E;", "doi": null, "abstractUrl": "/journal/tc/2020/06/08976264/1h0W7qmGRHO", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2021/05/08985262", "title": "<italic>Motion-Fi<inline-formula><tex-math notation=\"LaTeX\">Z_$^+$_Z</tex-math></inline-formula></italic>: Recognizing and Counting Repetitive Motions With Wireless Backscattering", "doi": null, "abstractUrl": "/journal/tm/2021/05/08985262/1hcyDuse3Qc", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/08/09373914", "title": "A Practical <inline-formula><tex-math notation=\"LaTeX\">Z_$O(N^2)$_Z</tex-math></inline-formula> Outlier Removal Method for Correspondence-Based Point Cloud Registration", "doi": null, "abstractUrl": "/journal/tp/2022/08/09373914/1rPt9ICFlCw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/12/09385890", "title": "Personalized Route Recommendation With Neural Network Enhanced Search Algorithm<inline-formula><tex-math notation=\"LaTeX\"/></inline-formula>", "doi": null, "abstractUrl": "/journal/tk/2022/12/09385890/1seiheAM4vK", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09395242", "title": "TRACK: A New Method From a Re-Examination of Deep Architectures for Head Motion Prediction in 360<inline-formula><tex-math notation=\"LaTeX\">Z_${}^{\\circ }$_Z</tex-math></inline-formula> Videos", "doi": null, "abstractUrl": "/journal/tp/2022/09/09395242/1syq55q0W7C", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09524471", "title": "A(DP)<inline-formula><tex-math notation=\"LaTeX\">Z_$^2$_Z</tex-math></inline-formula>SGD: Asynchronous Decentralized Parallel Stochastic Gradient Descent With Differential Privacy", "doi": null, "abstractUrl": "/journal/tp/2022/11/09524471/1wpq5To7ikU", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/03/09525250", "title": "Fast Reachability Queries Answering Based on <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathsf{RCN}$_Z</tex-math></inline-formula> Reduction", "doi": null, "abstractUrl": "/journal/tk/2023/03/09525250/1wuoOp439OU", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09541093", "title": "Learning Spherical Convolution for <inline-formula><tex-math notation=\"LaTeX\">Z_$360^{\\circ }$_Z</tex-math></inline-formula> Recognition", "doi": null, "abstractUrl": "/journal/tp/2022/11/09541093/1x3fMiX57S8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/07/09585362", "title": "A Fast <inline-formula><tex-math notation=\"LaTeX\">Z_$f(r,k+1)/k$_Z</tex-math></inline-formula>-Diagnosis for Interconnection Networks Under MM* Model", "doi": null, "abstractUrl": "/journal/td/2022/07/09585362/1y11LlQdiGk", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09449976", "articleId": "1uiiNgsKInK", "__typename": "AdjacentArticleType" }, "next": { "fno": "09454290", "articleId": "1uqBdfvmYc8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1GF6qnPfxcY", "name": "ttp202210-09497715s1-supp1-3100259.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202210-09497715s1-supp1-3100259.pdf", "extension": "pdf", "size": "12.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1IXUpNdkyWs", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "ts", "pubType": "journal", "volume": "48", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1A4Sz68iffO", "doi": "10.1109/TSE.2022.3141758", "abstract": "Software often produces biased outputs. In particular, machine learning (ML) based software is known to produce erroneous predictions when processing <italic>discriminatory inputs</italic>. Such unfair program behavior can be caused by societal bias. In the last few years, Amazon, Microsoft and Google have provided software services that produce unfair outputs, mostly due to societal bias (e.g., gender or race). In such events, developers are saddled with the task of conducting <italic>fairness testing</italic>. Fairness testing is challenging; developers are tasked with <italic>generating discriminatory inputs that reveal and explain biases</italic>. We propose a <italic>grammar-based fairness testing approach</italic> (called <sc>Astraea</sc>) which leverages context-free grammars to generate discriminatory inputs that <italic>reveal fairness violations</italic> in software systems. Using probabilistic grammars, <sc>Astraea</sc> also provides fault diagnosis by <italic>isolating the cause</italic> of observed software bias. <sc>Astraea</sc>&#x2019;s diagnoses facilitate the improvement of ML fairness. <sc>Astraea</sc> was evaluated on 18 software systems that provide three major <italic>natural language processing</italic> (NLP) services. In our evaluation, <sc>Astraea</sc> generated fairness violations at a rate of about 18&#x0025;. <sc>Astraea</sc> generated over 573K discriminatory test cases and found over 102K fairness violations. Furthermore, <sc>Astraea</sc> improves software fairness by about 76&#x0025; via model-retraining, on average.", "abstracts": [ { "abstractType": "Regular", "content": "Software often produces biased outputs. In particular, machine learning (ML) based software is known to produce erroneous predictions when processing <italic>discriminatory inputs</italic>. Such unfair program behavior can be caused by societal bias. In the last few years, Amazon, Microsoft and Google have provided software services that produce unfair outputs, mostly due to societal bias (e.g., gender or race). In such events, developers are saddled with the task of conducting <italic>fairness testing</italic>. Fairness testing is challenging; developers are tasked with <italic>generating discriminatory inputs that reveal and explain biases</italic>. We propose a <italic>grammar-based fairness testing approach</italic> (called <sc>Astraea</sc>) which leverages context-free grammars to generate discriminatory inputs that <italic>reveal fairness violations</italic> in software systems. Using probabilistic grammars, <sc>Astraea</sc> also provides fault diagnosis by <italic>isolating the cause</italic> of observed software bias. <sc>Astraea</sc>&#x2019;s diagnoses facilitate the improvement of ML fairness. <sc>Astraea</sc> was evaluated on 18 software systems that provide three major <italic>natural language processing</italic> (NLP) services. In our evaluation, <sc>Astraea</sc> generated fairness violations at a rate of about 18&#x0025;. <sc>Astraea</sc> generated over 573K discriminatory test cases and found over 102K fairness violations. Furthermore, <sc>Astraea</sc> improves software fairness by about 76&#x0025; via model-retraining, on average.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Software often produces biased outputs. In particular, machine learning (ML) based software is known to produce erroneous predictions when processing discriminatory inputs. Such unfair program behavior can be caused by societal bias. In the last few years, Amazon, Microsoft and Google have provided software services that produce unfair outputs, mostly due to societal bias (e.g., gender or race). In such events, developers are saddled with the task of conducting fairness testing. Fairness testing is challenging; developers are tasked with generating discriminatory inputs that reveal and explain biases. We propose a grammar-based fairness testing approach (called Astraea) which leverages context-free grammars to generate discriminatory inputs that reveal fairness violations in software systems. Using probabilistic grammars, Astraea also provides fault diagnosis by isolating the cause of observed software bias. Astraea’s diagnoses facilitate the improvement of ML fairness. Astraea was evaluated on 18 software systems that provide three major natural language processing (NLP) services. In our evaluation, Astraea generated fairness violations at a rate of about 18%. Astraea generated over 573K discriminatory test cases and found over 102K fairness violations. Furthermore, Astraea improves software fairness by about 76% via model-retraining, on average.", "title": "<sc>Astraea</sc>: Grammar-Based Fairness Testing", "normalizedTitle": "Astraea: Grammar-Based Fairness Testing", "fno": "09678017", "hasPdf": true, "idPrefix": "ts", "keywords": [ "Context Free Grammars", "Fault Diagnosis", "Learning Artificial Intelligence", "Natural Language Processing", "Astraea", "Context Free Grammars", "Gender", "Grammar Based Fairness Testing", "Machine Learning Based Software", "ML Fairness", "Natural Language Processing", "Probabilistic Grammars", "Program Behavior", "Societal Bias", "Software Services", "Testing", "Grammar", "Task Analysis", "Sentiment Analysis", "Test Pattern Generators", "Software Testing", "Software Systems", "Software Fairness", "Machine Learning", "Natural Language Processing", "Software Testing", "Program Debugging" ], "authors": [ { "givenName": "Ezekiel", "surname": "Soremekun", "fullName": "Ezekiel Soremekun", "affiliation": "Interdisciplinary Centre for Security, Reliability and Trust (SnT), University of Luxembourg, Esch-sur-Alzette, Luxembourg", "__typename": "ArticleAuthorType" }, { "givenName": "Sakshi", "surname": "Udeshi", "fullName": "Sakshi Udeshi", "affiliation": "Singapore University of Technology and Design, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Sudipta", "surname": "Chattopadhyay", "fullName": "Sudipta Chattopadhyay", "affiliation": "Singapore University of Technology and Design, Singapore", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "5188-5211", "year": "2022", "issn": "0098-5589", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icse/2022/9221/0/922100a871", "title": "Explanation-Guided Fairness Testing through Genetic Algorithm", "doi": null, "abstractUrl": "/proceedings-article/icse/2022/922100a871/1EmrUEKRdte", "parentPublication": { "id": "proceedings/icse/2022/9221/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2023/04/09847081", "title": "Neural Network Guided Evolutionary Fuzzing for Finding Traffic Violations of Autonomous Vehicles", "doi": null, "abstractUrl": "/journal/ts/2023/04/09847081/1Fu4NBuPCak", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2023/04/09951398", "title": "FairMask: Better Fairness via Model-Based Rebalancing of Protected Attributes", "doi": null, "abstractUrl": "/journal/ts/2023/04/09951398/1Ik4KtcTGLe", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2021/10/08862860", "title": "<sc>IntRepair</sc>: Informed Repairing of Integer Overflows", "doi": null, "abstractUrl": "/journal/ts/2021/10/08862860/1dXEPR9d6M0", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2021/11/08907363", "title": "Grammar Based Directed Testing of Machine Learning Systems", "doi": null, "abstractUrl": "/journal/ts/2021/11/08907363/1f75VuEDS7K", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2018/5937/0/09000070", "title": "Automated Directed Fairness Testing", "doi": null, "abstractUrl": "/proceedings-article/ase/2018/09000070/1htBMG2tTji", "parentPublication": { "id": "proceedings/ase/2018/5937/0", "title": "2018 33rd IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/02/09238399", "title": "<sc>Cartolabe</sc>: A Web-Based Scalable Visualization of Large Document Collections", "doi": null, "abstractUrl": "/magazine/cg/2021/02/09238399/1oa1KJAPKOA", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2020/7121/0/712100a949", "title": "White-box Fairness Testing through Adversarial Sampling", "doi": null, "abstractUrl": "/proceedings-article/icse/2020/712100a949/1pK5ic5lre8", "parentPublication": { "id": "proceedings/icse/2020/7121/0", "title": "2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2020/4380/0/438000a121", "title": "Fairness Testing of Machine Learning Models Using Deep Reinforcement Learning", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2020/438000a121/1r54mhPqDni", "parentPublication": { "id": "proceedings/trustcom/2020/4380/0", "title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2022/09/09478582", "title": "<sc>Trimmer</sc>: An Automated System for Configuration-Based Software Debloating", "doi": null, "abstractUrl": "/journal/ts/2022/09/09478582/1v55PcR6GyY", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09733807", "articleId": "1BJIo3KeKWI", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1IXUuAgPMAw", "name": "tts202212-09678017s1-supp1-3141758.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/tts202212-09678017s1-supp1-3141758.pdf", "extension": "pdf", "size": "287 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvkpkSQ", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "ta", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1M80CWMQIvK", "doi": "10.1109/TAFFC.2023.3265072", "abstract": "How can we reliably transfer affect models trained in controlled laboratory conditions (<italic>in-vitro</italic>) to uncontrolled real-world settings (<italic>in-vivo</italic>)? The information gap between in-vitro and in-vivo applications defines a core challenge of affective computing. This gap is caused by limitations related to affect sensing including intrusiveness, hardware malfunctions and availability of sensors. As a response to these limitations, we introduce the concept of privileged information for operating affect models in real-world scenarios (in the wild). Privileged information enables affect models to be trained across multiple modalities available in a lab, and ignore, without significant performance drops, those modalities that are not available when they operate in the wild. Our approach is tested in two multimodal affect databases one of which is designed for testing models of affect in the wild. By training our affect models using all modalities and then using solely raw footage frames for testing the models, we reach the performance of models that fuse all available modalities for both training and testing. The results are robust across both classification and regression affect modeling tasks which are dominant paradigms in affective computing. Our findings make a decisive step towards realizing affect interaction in the wild.", "abstracts": [ { "abstractType": "Regular", "content": "How can we reliably transfer affect models trained in controlled laboratory conditions (<italic>in-vitro</italic>) to uncontrolled real-world settings (<italic>in-vivo</italic>)? The information gap between in-vitro and in-vivo applications defines a core challenge of affective computing. This gap is caused by limitations related to affect sensing including intrusiveness, hardware malfunctions and availability of sensors. As a response to these limitations, we introduce the concept of privileged information for operating affect models in real-world scenarios (in the wild). Privileged information enables affect models to be trained across multiple modalities available in a lab, and ignore, without significant performance drops, those modalities that are not available when they operate in the wild. Our approach is tested in two multimodal affect databases one of which is designed for testing models of affect in the wild. By training our affect models using all modalities and then using solely raw footage frames for testing the models, we reach the performance of models that fuse all available modalities for both training and testing. The results are robust across both classification and regression affect modeling tasks which are dominant paradigms in affective computing. Our findings make a decisive step towards realizing affect interaction in the wild.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "How can we reliably transfer affect models trained in controlled laboratory conditions (in-vitro) to uncontrolled real-world settings (in-vivo)? The information gap between in-vitro and in-vivo applications defines a core challenge of affective computing. This gap is caused by limitations related to affect sensing including intrusiveness, hardware malfunctions and availability of sensors. As a response to these limitations, we introduce the concept of privileged information for operating affect models in real-world scenarios (in the wild). Privileged information enables affect models to be trained across multiple modalities available in a lab, and ignore, without significant performance drops, those modalities that are not available when they operate in the wild. Our approach is tested in two multimodal affect databases one of which is designed for testing models of affect in the wild. By training our affect models using all modalities and then using solely raw footage frames for testing the models, we reach the performance of models that fuse all available modalities for both training and testing. The results are robust across both classification and regression affect modeling tasks which are dominant paradigms in affective computing. Our findings make a decisive step towards realizing affect interaction in the wild.", "title": "From the Lab to the Wild: Affect Modeling Via Privileged Information", "normalizedTitle": "From the Lab to the Wild: Affect Modeling Via Privileged Information", "fno": "10094004", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Computational Modeling", "Visualization", "Sensors", "Data Models", "Brain Modeling", "Testing", "Emotion Recognition", "Affect Modelling", "Arousal", "Machine Learning", "Physiology", "Pixels", "Privileged Information", "Valence" ], "authors": [ { "givenName": "Konstantinos", "surname": "Makantasis", "fullName": "Konstantinos Makantasis", "affiliation": "Department of Artificial Intelligence, Univerisity of Malta, Msida, Malta", "__typename": "ArticleAuthorType" }, { "givenName": "Kosmas", "surname": "Pinitas", "fullName": "Kosmas Pinitas", "affiliation": "Institute of Digital Games, Univerisity of Malta, Msida, Malta", "__typename": "ArticleAuthorType" }, { "givenName": "Antonios", "surname": "Liapis", "fullName": "Antonios Liapis", "affiliation": "Institute of Digital Games, Univerisity of Malta, Msida, Malta", "__typename": "ArticleAuthorType" }, { "givenName": "Georgios N.", "surname": "Yannakakis", "fullName": "Georgios N. Yannakakis", "affiliation": "Institute of Digital Games, Univerisity of Malta, Msida, Malta", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-04-01 00:00:00", "pubType": "trans", "pages": "1-13", "year": "5555", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2016/1437/0/1437b487", "title": "Facial Affect “In-the-Wild”: A Survey and a New Database", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437b487/12OmNBBhN9l", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2018/2335/0/233501a492", "title": "Linear and Non-Linear Multimodal Fusion for Continuous Affect Estimation In-the-Wild", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a492/12OmNzZEAvH", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2020/02/08249871", "title": "Deep Physiological Affect Network for the Recognition of Human Emotions", "doi": null, "abstractUrl": "/journal/ta/2020/02/08249871/13rRUwjoNvg", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09726856", "title": "Audio-Visual Gated-Sequenced Neural Networks for Affect Recognition", "doi": null, "abstractUrl": "/journal/ta/5555/01/09726856/1BrwhfZgXL2", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900c381", "title": "Time-Continuous Audiovisual Fusion with Recurrence vs Attention for In-The-Wild Affect Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900c381/1G4F6CuA7ok", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09999489", "title": "Smart Affect Monitoring with Wearables in the Wild: An Unobtrusive Mood-Aware Emotion Recognition System", "doi": null, "abstractUrl": "/journal/ta/5555/01/09999489/1JrMxVamoeI", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2023/4544/0/10042676", "title": "Towards Intercultural Affect Recognition: Audio-Visual Affect Recognition in the Wild Across Six Cultures", "doi": null, "abstractUrl": "/proceedings-article/fg/2023/10042676/1KOuYEXpVSw", "parentPublication": { "id": "proceedings/fg/2023/4544/0", "title": "2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/02/09064942", "title": "BReG-NeXt: Facial Affect Computing Using Adaptive Residual Networks With Bounded Gradient", "doi": null, "abstractUrl": "/journal/ta/2022/02/09064942/1iZGpZ99nA4", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2020/3079/0/307900a600", "title": "Multi-modal Sequence-to-sequence Model for Continuous Affect Prediction in the Wild Using Deep 3D Features", "doi": null, "abstractUrl": "/proceedings-article/fg/2020/307900a600/1kecIKwGzao", "parentPublication": { "id": "proceedings/fg/2020/3079/0/", "title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2021/0019/0/09597417", "title": "Privileged Information for Modeling Affect In The Wild", "doi": null, "abstractUrl": "/proceedings-article/acii/2021/09597417/1yylcbPFACQ", "parentPublication": { "id": "proceedings/acii/2021/0019/0", "title": "2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10091157", "articleId": "1M2HZjkToE8", "__typename": "AdjacentArticleType" }, "next": { "fno": "10094033", "articleId": "1M80D4Up7gc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1DQPlKUprk4", "title": "April-June", "year": "2022", "issueNum": "02", "idPrefix": "ta", "pubType": "journal", "volume": "13", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1iaendfpjig", "doi": "10.1109/TAFFC.2020.2980275", "abstract": "One of the most effective ways to improve quality of life in dementia is by exposing people to meaningful activities. The study of engagement is crucial to identify which activities are significant for persons with dementia and customize them. Previous work has mainly focused on developing assessment tools and the only available model of engagement for people with dementia focused on factors influencing engagement or influenced by engagement. This article focuses on the internal functioning of engagement and presents the development and testing of a model specifying the components of engagement, their measures, and the relationships they entertain. We collected behavioral and physiological data while participants with dementia (N &#x003D; 14) were involved in six sessions of play, three of game-based cognitive stimulation and three of robot-based free play. We tested the concurrent validity of the measures employed to gauge engagement and ran factorial analysis and Structural Equation Modeling to determine whether the components of engagement and their relationships were those hypothesized. The model we constructed, which we call the ENGAGE-DEM, achieved excellent goodness of fit and can be considered a scaffold to the development of affective computing frameworks for measuring engagement online and offline, especially in HCI and HRI.", "abstracts": [ { "abstractType": "Regular", "content": "One of the most effective ways to improve quality of life in dementia is by exposing people to meaningful activities. The study of engagement is crucial to identify which activities are significant for persons with dementia and customize them. Previous work has mainly focused on developing assessment tools and the only available model of engagement for people with dementia focused on factors influencing engagement or influenced by engagement. This article focuses on the internal functioning of engagement and presents the development and testing of a model specifying the components of engagement, their measures, and the relationships they entertain. We collected behavioral and physiological data while participants with dementia (N &#x003D; 14) were involved in six sessions of play, three of game-based cognitive stimulation and three of robot-based free play. We tested the concurrent validity of the measures employed to gauge engagement and ran factorial analysis and Structural Equation Modeling to determine whether the components of engagement and their relationships were those hypothesized. The model we constructed, which we call the ENGAGE-DEM, achieved excellent goodness of fit and can be considered a scaffold to the development of affective computing frameworks for measuring engagement online and offline, especially in HCI and HRI.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "One of the most effective ways to improve quality of life in dementia is by exposing people to meaningful activities. The study of engagement is crucial to identify which activities are significant for persons with dementia and customize them. Previous work has mainly focused on developing assessment tools and the only available model of engagement for people with dementia focused on factors influencing engagement or influenced by engagement. This article focuses on the internal functioning of engagement and presents the development and testing of a model specifying the components of engagement, their measures, and the relationships they entertain. We collected behavioral and physiological data while participants with dementia (N = 14) were involved in six sessions of play, three of game-based cognitive stimulation and three of robot-based free play. We tested the concurrent validity of the measures employed to gauge engagement and ran factorial analysis and Structural Equation Modeling to determine whether the components of engagement and their relationships were those hypothesized. The model we constructed, which we call the ENGAGE-DEM, achieved excellent goodness of fit and can be considered a scaffold to the development of affective computing frameworks for measuring engagement online and offline, especially in HCI and HRI.", "title": "ENGAGE-DEM: A Model of Engagement of People With Dementia", "normalizedTitle": "ENGAGE-DEM: A Model of Engagement of People With Dementia", "fno": "09035425", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Affective Computing", "Behavioural Sciences Computing", "Cognition", "Computer Games", "Diseases", "Internet", "Social Aspects Of Automation", "Statistical Analysis", "User Interfaces", "ENGAGE DEM", "Dementia", "People Engagement", "Behavioral Data", "Physiological Data", "Game Based Cognitive Stimulation", "Robot Based Free Play", "Factorial Analysis", "Structural Equation Modeling", "Affective Computing Framework", "Dementia", "Computational Modeling", "Mathematical Model", "Task Analysis", "Physiology", "Games", "Atmospheric Measurements", "Modelling Human Emotion", "Nonverbal Signals", "Physiological Measures", "Health Care", "Social Agents Robotics" ], "authors": [ { "givenName": "Giulia", "surname": "Perugia", "fullName": "Giulia Perugia", "affiliation": "Department of Information Technology, Uppsala University, Uppsala, Sweden", "__typename": "ArticleAuthorType" }, { "givenName": "Marta", "surname": "Díaz-Boladeras", "fullName": "Marta Díaz-Boladeras", "affiliation": "Technical Research Centre for Dependency Care and Autonomous Living CETpD, Universitat Politècnica de Catalunya (UPC), Barcelona, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Andreu", "surname": "Català-Mallofré", "fullName": "Andreu Català-Mallofré", "affiliation": "Technical Research Centre for Dependency Care and Autonomous Living CETpD, Universitat Politècnica de Catalunya (UPC), Barcelona, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Emilia I.", "surname": "Barakova", "fullName": "Emilia I. Barakova", "affiliation": "Department of Industrial Design, Technische Universiteit Eindhoven (TU/e), Eindhoven, AZ, Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Matthias", "surname": "Rauterberg", "fullName": "Matthias Rauterberg", "affiliation": "Department of Industrial Design, Technische Universiteit Eindhoven (TU/e), Eindhoven, AZ, Netherlands", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "02", "pubDate": "2022-04-01 00:00:00", "pubType": "trans", "pages": "926-943", "year": "2022", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/gtsd/2016/3638/0/3638a171", "title": "Data Mining the Co-Morbid Associations between Dementia and Various Kinds of Illnesses Using a Medicine Database", "doi": null, "abstractUrl": "/proceedings-article/gtsd/2016/3638a171/12OmNC8MsAL", "parentPublication": { "id": "proceedings/gtsd/2016/3638/0", "title": "2016 3rd International Conference on Green Technology and Sustainable Development (GTSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibmw/2010/8303/0/05703836", "title": "Monitoring and analysis of sleep pattern for people with early dementia", "doi": null, "abstractUrl": "/proceedings-article/bibmw/2010/05703836/12OmNx8OumU", "parentPublication": { "id": "proceedings/bibmw/2010/8303/0", "title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2017/1324/0/132401a574", "title": "Gamification in Social Networking: A Platform for People Living with Dementia and their Caregivers", "doi": null, "abstractUrl": "/proceedings-article/bibe/2017/132401a574/12OmNzBwGKG", "parentPublication": { "id": "proceedings/bibe/2017/1324/0", "title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoin/2015/8342/0/07057910", "title": "Considerations and design on apps for elderly with mild-to-moderate dementia", "doi": null, "abstractUrl": "/proceedings-article/icoin/2015/07057910/12OmNzC5TnX", "parentPublication": { "id": "proceedings/icoin/2015/8342/0", "title": "2015 International Conference on Information Networking (ICOIN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2018/3227/0/08480263", "title": "Discovery of Causal Relations in the Challenging Behaviour of People with Dementia", "doi": null, "abstractUrl": "/proceedings-article/percomw/2018/08480263/17D45VTRori", "parentPublication": { "id": "proceedings/percomw/2018/3227/0", "title": "2018 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2018/9294/0/08639371", "title": "A Vision-based Transfer Learning Approach for Recognizing Behavioral Symptoms in People with Dementia", "doi": null, "abstractUrl": "/proceedings-article/avss/2018/08639371/17PYElRdthE", "parentPublication": { "id": "proceedings/avss/2018/9294/0", "title": "2018 15th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2021/0021/0/09666306", "title": "A Multimodal Engagement-Aware Recommender System for People with Dementia", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2021/09666306/1A3hSAjxpVC", "parentPublication": { "id": "proceedings/aciiw/2021/0021/0", "title": "2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a077", "title": "Designing Animal Robot Recreation to Increase the Amount of Communication for Elderly People with Dementia", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a077/1GU6R50xuow", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/02/08859245", "title": "FaceEngage: Robust Estimation of Gameplay Engagement from User-Contributed (YouTube) Videos", "doi": null, "abstractUrl": "/journal/ta/2022/02/08859245/1dR0QohRi9y", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2020/7397/0/739700a826", "title": "Development and Evaluation of Robot Teaching Materials for Learning to Cope with Elderly People with Dementia", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2020/739700a826/1tGctE4tMzK", "parentPublication": { "id": "proceedings/iiai-aai/2020/7397/0", "title": "2020 9th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09028119", "articleId": "1i3AKComau4", "__typename": "AdjacentArticleType" }, "next": { "fno": "09037266", "articleId": "1ifd4ywuBO0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1DQPrhVe8DK", "name": "tta202202-09035425s1-supp1-2980275.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/tta202202-09035425s1-supp1-2980275.pdf", "extension": "pdf", "size": "321 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1ugDQjSl8Jy", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tp", "pubType": "journal", "volume": "43", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1t8VREDi7Cg", "doi": "10.1109/TPAMI.2021.3075978", "abstract": "Under-panel cameras provide an intriguing way to maximize the display area for a mobile device. An under-panel camera images a scene via the openings in the display panel; hence, a captured photograph is noisy as well as endowed with a large diffractive blur as the display acts as an aperture on the lens. Unfortunately, the pattern of openings commonly found in current LED displays are not conducive to high-quality deblurring. This paper redesigns the layout of openings in the display to engineer a blur kernel that is robustly invertible in the presence of noise. We first provide a basic analysis using Fourier optics that indicates that the nature of the blur is critically affected by the periodicity of the display openings as well as the shape of the opening at each individual display pixel. Armed with this insight, we provide a suite of modifications to the pixel layout that promote the invertibility of the blur kernels. We evaluate the proposed layouts with photomasks placed in front of a cellphone camera, thereby emulating an under-panel camera. A key takeaway is that optimizing the display layout does indeed produce significant improvements.", "abstracts": [ { "abstractType": "Regular", "content": "Under-panel cameras provide an intriguing way to maximize the display area for a mobile device. An under-panel camera images a scene via the openings in the display panel; hence, a captured photograph is noisy as well as endowed with a large diffractive blur as the display acts as an aperture on the lens. Unfortunately, the pattern of openings commonly found in current LED displays are not conducive to high-quality deblurring. This paper redesigns the layout of openings in the display to engineer a blur kernel that is robustly invertible in the presence of noise. We first provide a basic analysis using Fourier optics that indicates that the nature of the blur is critically affected by the periodicity of the display openings as well as the shape of the opening at each individual display pixel. Armed with this insight, we provide a suite of modifications to the pixel layout that promote the invertibility of the blur kernels. We evaluate the proposed layouts with photomasks placed in front of a cellphone camera, thereby emulating an under-panel camera. A key takeaway is that optimizing the display layout does indeed produce significant improvements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Under-panel cameras provide an intriguing way to maximize the display area for a mobile device. An under-panel camera images a scene via the openings in the display panel; hence, a captured photograph is noisy as well as endowed with a large diffractive blur as the display acts as an aperture on the lens. Unfortunately, the pattern of openings commonly found in current LED displays are not conducive to high-quality deblurring. This paper redesigns the layout of openings in the display to engineer a blur kernel that is robustly invertible in the presence of noise. We first provide a basic analysis using Fourier optics that indicates that the nature of the blur is critically affected by the periodicity of the display openings as well as the shape of the opening at each individual display pixel. Armed with this insight, we provide a suite of modifications to the pixel layout that promote the invertibility of the blur kernels. We evaluate the proposed layouts with photomasks placed in front of a cellphone camera, thereby emulating an under-panel camera. A key takeaway is that optimizing the display layout does indeed produce significant improvements.", "title": "Designing Display Pixel Layouts for Under-Panel Cameras", "normalizedTitle": "Designing Display Pixel Layouts for Under-Panel Cameras", "fno": "09416801", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Cameras", "Fourier Transform Optics", "Image Restoration", "Image Sensors", "LED Displays", "Liquid Crystal Displays", "Optical Design Techniques", "Designing Display Pixel Layouts", "Display Area", "Under Panel Camera Images", "Opening", "Display Panel", "Diffractive Blur", "Current LED Displays", "Blur Kernel", "Display Openings", "Individual Display Pixel", "Pixel Layout", "Cellphone Camera", "Display Layout", "Cameras", "Apertures", "Layout", "Lenses", "Organic Light Emitting Diodes", "Optics", "Shape", "Computational Photography", "Under Panel Cameras", "Deblurring" ], "authors": [ { "givenName": "Anqi", "surname": "Yang", "fullName": "Anqi Yang", "affiliation": "Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Aswin C.", "surname": "Sankaranarayanan", "fullName": "Aswin C. Sankaranarayanan", "affiliation": "Department of Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "2245-2256", "year": "2021", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isvri/2011/0054/0/05759666", "title": "A scent-emitting video display system", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759666/12OmNALUoyw", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2008/1971/0/04480768", "title": "Automultiscopic display by revolving flat-panel displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2008/04480768/12OmNAolGTH", "parentPublication": { "id": "proceedings/vr/2008/1971/0", "title": "IEEE Virtual Reality 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/memsys/1997/3744/0/00581854", "title": "Full color LED display panel fabricated on a silicon microreflector", "doi": null, "abstractUrl": "/proceedings-article/memsys/1997/00581854/12OmNyLiuAu", "parentPublication": { "id": "proceedings/memsys/1997/3744/0", "title": "Proceedings IEEE The Tenth Annual International Workshop on Micro Electro Mechanical Systems. An Investigation of Micro Structures, Sensors, Actuators, Machines and Robots", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2018/1857/0/185701a309", "title": "Power Constrained Contrast Enhancement by Joint L2,1-norm Regularized Sparse Coding for OLED Display", "doi": null, "abstractUrl": "/proceedings-article/mipr/2018/185701a309/12OmNzd7bEl", "parentPublication": { "id": "proceedings/mipr/2018/1857/0", "title": "2018 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ic3/2015/7947/0/07346727", "title": "Content aware targeted image manipulation to reduce power consumption in OLED panels", "doi": null, "abstractUrl": "/proceedings-article/ic3/2015/07346727/12OmNzmclDS", "parentPublication": { "id": "proceedings/ic3/2015/7947/0", "title": "2015 Eighth International Conference on Contemporary Computing (IC3)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2011/06/05428768", "title": "A novel pixel design for AM-OLED displays using nanocrystalline silicon TFTs", "doi": null, "abstractUrl": "/journal/si/2011/06/05428768/13rRUwd9CJ4", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798364", "title": "Color Moir&#x00E9; Reduction Method for Thin Integral 3D Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798364/1cJ0XcgYa1W", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/02/09146716", "title": "Volumetric Head-Mounted Display With Locally Adaptive Focal Blocks", "doi": null, "abstractUrl": "/journal/tg/2022/02/09146716/1lHjPSqVrpK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900j175", "title": "Image Restoration for Under-Display Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900j175/1yeKP5vm9qM", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a662", "title": "Removing Diffraction Image Artifacts in Under-Display Camera via Dynamic Skip Connection Network", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a662/1yeLj4UKSXe", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09415174", "articleId": "1t2ieevWpnG", "__typename": "AdjacentArticleType" }, "next": { "fno": "09416824", "articleId": "1t8VOdQnK5G", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1ugE0MCQwjC", "name": "ttp202107-09416801s1-supp1-3075978.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202107-09416801s1-supp1-3075978.pdf", "extension": "pdf", "size": "20.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNBCZnTW", "title": "July/August", "year": "1994", "issueNum": "04", "idPrefix": "cg", "pubType": "magazine", "volume": "14", "label": "July/August", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwInvnd", "doi": "10.1109/38.291531", "abstract": "The computational expense of volume rendering motivates the development of parallel implementations on multicomputers. Parallelism achieves higher frame rates, which provide more natural viewing control and enhanced comprehension of 3D structure. Although many parallel implementations exist, we have no framework to compare their relative merits independent of host hardware. The article attempts to establish that framework by enumerating and classifying parallel volume-rendering algorithms suitable for multicomputers with distributed memory and a communication network. It determined the communication costs for classes of parallel algorithms by considering their inherent communication requirements.", "abstracts": [ { "abstractType": "Regular", "content": "The computational expense of volume rendering motivates the development of parallel implementations on multicomputers. Parallelism achieves higher frame rates, which provide more natural viewing control and enhanced comprehension of 3D structure. Although many parallel implementations exist, we have no framework to compare their relative merits independent of host hardware. The article attempts to establish that framework by enumerating and classifying parallel volume-rendering algorithms suitable for multicomputers with distributed memory and a communication network. It determined the communication costs for classes of parallel algorithms by considering their inherent communication requirements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The computational expense of volume rendering motivates the development of parallel implementations on multicomputers. Parallelism achieves higher frame rates, which provide more natural viewing control and enhanced comprehension of 3D structure. Although many parallel implementations exist, we have no framework to compare their relative merits independent of host hardware. The article attempts to establish that framework by enumerating and classifying parallel volume-rendering algorithms suitable for multicomputers with distributed memory and a communication network. It determined the communication costs for classes of parallel algorithms by considering their inherent communication requirements.", "title": "Communication Costs for Parallel Volume-Rendering Algorithms", "normalizedTitle": "Communication Costs for Parallel Volume-Rendering Algorithms", "fno": "mcg1994040049", "hasPdf": true, "idPrefix": "cg", "keywords": [], "authors": [ { "givenName": "Ulrich", "surname": "Neumann", "fullName": "Ulrich Neumann", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "04", "pubDate": "1994-07-01 00:00:00", "pubType": "mags", "pages": "49-58", "year": "1994", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "mcg1994040041", "articleId": "13rRUxE04nf", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg1994040059", "articleId": "13rRUxE04ng", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBCZnTW", "title": "July/August", "year": "1994", "issueNum": "04", "idPrefix": "cg", "pubType": "magazine", "volume": "14", "label": "July/August", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy3gmXs", "doi": "10.1109/38.291528", "abstract": "We describe a classification scheme that we believe provides a more structured framework for reasoning about parallel rendering. The scheme is based on where the sort from object coordinates to screen coordinates occurs, which we believe is fundamental whenever both geometry processing and rasterization are performed in parallel. This classification scheme supports the analysis of computational and communication costs, and encompasses the bulk of current and proposed highly parallel renderers - both hardware and software. We begin by reviewing the standard feed-forward rendering pipeline, showing how different ways of parallelizing it lead to three classes of rendering algorithms. Next, we consider each of these classes in detail, analyzing their aggregate processing and communication costs, possible variations, and constraints they may impose on rendering applications. Finally, we use these analyses to compare the classes and identify when each is likely to be preferable.", "abstracts": [ { "abstractType": "Regular", "content": "We describe a classification scheme that we believe provides a more structured framework for reasoning about parallel rendering. The scheme is based on where the sort from object coordinates to screen coordinates occurs, which we believe is fundamental whenever both geometry processing and rasterization are performed in parallel. This classification scheme supports the analysis of computational and communication costs, and encompasses the bulk of current and proposed highly parallel renderers - both hardware and software. We begin by reviewing the standard feed-forward rendering pipeline, showing how different ways of parallelizing it lead to three classes of rendering algorithms. Next, we consider each of these classes in detail, analyzing their aggregate processing and communication costs, possible variations, and constraints they may impose on rendering applications. Finally, we use these analyses to compare the classes and identify when each is likely to be preferable.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a classification scheme that we believe provides a more structured framework for reasoning about parallel rendering. The scheme is based on where the sort from object coordinates to screen coordinates occurs, which we believe is fundamental whenever both geometry processing and rasterization are performed in parallel. This classification scheme supports the analysis of computational and communication costs, and encompasses the bulk of current and proposed highly parallel renderers - both hardware and software. We begin by reviewing the standard feed-forward rendering pipeline, showing how different ways of parallelizing it lead to three classes of rendering algorithms. Next, we consider each of these classes in detail, analyzing their aggregate processing and communication costs, possible variations, and constraints they may impose on rendering applications. Finally, we use these analyses to compare the classes and identify when each is likely to be preferable.", "title": "A Sorting Classification of Parallel Rendering", "normalizedTitle": "A Sorting Classification of Parallel Rendering", "fno": "mcg1994040023", "hasPdf": true, "idPrefix": "cg", "keywords": [], "authors": [ { "givenName": "Steven", "surname": "Molnar", "fullName": "Steven Molnar", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Cox", "fullName": "Michael Cox", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Ellsworth", "fullName": "David Ellsworth", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Fuchs", "fullName": "Henry Fuchs", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "04", "pubDate": "1994-07-01 00:00:00", "pubType": "mags", "pages": "23-32", "year": "1994", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "mcg1994040021", "articleId": "13rRUygBw1Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg1994040033", "articleId": "13rRUwgQpkR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrFBPWq", "title": "September-October", "year": "2006", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "12", "label": "September-October", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwkxc5i", "doi": "10.1109/TVCG.2006.164", "abstract": "Time-varying, multi-variate, and comparative data sets are not easily visualized due to the amount of data that is presented to the user at once. By combining several volumes together with different operators into one visualized volume, the user is able to compare values from different data sets in space over time, run, or field without having to mentally switch between different renderings of individual data sets. In this paper, we propose using a volume shader where the user is given the ability to easily select and operate on many data volumes to create comparison relationships. The user specifies an expression with set and numerical operations and her data to see relationships between data fields. Furthermore, we render the contextual information of the volume shader by converting it to a volume tree. We visualize the different levels and nodes of the volume tree so that the user can see the results of suboperations. This gives the user a deeper understanding of the final visualization, by seeing how the parts of the whole are operationally constructed.", "abstracts": [ { "abstractType": "Regular", "content": "Time-varying, multi-variate, and comparative data sets are not easily visualized due to the amount of data that is presented to the user at once. By combining several volumes together with different operators into one visualized volume, the user is able to compare values from different data sets in space over time, run, or field without having to mentally switch between different renderings of individual data sets. In this paper, we propose using a volume shader where the user is given the ability to easily select and operate on many data volumes to create comparison relationships. The user specifies an expression with set and numerical operations and her data to see relationships between data fields. Furthermore, we render the contextual information of the volume shader by converting it to a volume tree. We visualize the different levels and nodes of the volume tree so that the user can see the results of suboperations. This gives the user a deeper understanding of the final visualization, by seeing how the parts of the whole are operationally constructed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Time-varying, multi-variate, and comparative data sets are not easily visualized due to the amount of data that is presented to the user at once. By combining several volumes together with different operators into one visualized volume, the user is able to compare values from different data sets in space over time, run, or field without having to mentally switch between different renderings of individual data sets. In this paper, we propose using a volume shader where the user is given the ability to easily select and operate on many data volumes to create comparison relationships. The user specifies an expression with set and numerical operations and her data to see relationships between data fields. Furthermore, we render the contextual information of the volume shader by converting it to a volume tree. We visualize the different levels and nodes of the volume tree so that the user can see the results of suboperations. This gives the user a deeper understanding of the final visualization, by seeing how the parts of the whole are operationally constructed.", "title": "Multi-variate, Time Varying, and Comparative Visualization with Contextual Cues", "normalizedTitle": "Multi-variate, Time Varying, and Comparative Visualization with Contextual Cues", "fno": "v0909", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Animation", "Switches", "Computer Science", "Humans", "Transfer Functions", "Filling", "Focus Context", "Multi Variate", "Time Varying", "Comparative" ], "authors": [ { "givenName": "Jonathan", "surname": "Woodring", "fullName": "Jonathan Woodring", "affiliation": "Dept. of Comput. Sci. & Eng., Ohio State Univ., Columbus, OH", "__typename": "ArticleAuthorType" }, { "givenName": "Han-Wei", "surname": "Shen", "fullName": "Han-Wei Shen", "affiliation": "Dept. of Comput. Sci. & Eng., Ohio State Univ., Columbus, OH", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2006-09-01 00:00:00", "pubType": "trans", "pages": "909-916", "year": "2006", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/visual/1990/2083/0/00146362", "title": "A procedural interface for volume rendering", "doi": null, "abstractUrl": "/proceedings-article/visual/1990/00146362/12OmNApLGMS", "parentPublication": { "id": "proceedings/visual/1990/2083/0", "title": "1990 First IEEE Conference on Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2017/5738/0/08031593", "title": "Using interactive particle-based rendering to visualize a large-scale time-varying unstructured volume with mixed cell types", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2017/08031593/12OmNBRKwBs", "parentPublication": { "id": "proceedings/pacificvis/2017/5738/0", "title": "2017 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1996/864/0/00568133", "title": "Three dimensional visualization of proteins in cellular interactions", "doi": null, "abstractUrl": "/proceedings-article/visual/1996/00568133/12OmNBSSVcj", "parentPublication": { "id": "proceedings/visual/1996/864/0", "title": "Proceedings of Seventh Annual IEEE Visualization '96", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300055", "title": "High Dimensional Direct Rendering of Time-Varying Volumetric Data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300055/12OmNqyUUDX", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2005/9462/0/01532857", "title": "Illustration-inspired techniques for visualizing time-varying data", "doi": null, "abstractUrl": "/proceedings-article/vis/2005/01532857/12OmNwFicZ4", "parentPublication": { "id": "proceedings/vis/2005/9462/0", "title": "IEEE Visualization 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vg/2005/26/0/01500520", "title": "Visualization of time-varying volumetric data using differential time-histogram table", "doi": null, "abstractUrl": "/proceedings-article/vg/2005/01500520/12OmNx9FhRn", "parentPublication": { "id": "proceedings/vg/2005/26/0", "title": "Volume Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532805", "title": "High performance volume splatting for visualization of neurovascular data", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532805/12OmNyQGS8K", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/02/v0208", "title": "Visualization of Boundaries in Volumetric Data Sets Using LH Histograms", "doi": null, "abstractUrl": "/journal/tg/2006/02/v0208/13rRUwbs2aT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122345", "title": "Automatic Tuning of Spatially Varying Transfer Functions for Blood Vessel Visualization", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122345/13rRUx0xPIE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08467383", "title": "Interactive obstruction-free lensing for volumetric data visualization", "doi": null, "abstractUrl": "/journal/tg/2019/01/08467383/17D45WnnFYV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v0901", "articleId": "13rRUyoPSOX", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0917", "articleId": "13rRUwcAqq5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCy2L3z", "title": "Oct.", "year": "2012", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "18", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgs2BU", "doi": "10.1109/TVCG.2011.152", "abstract": "This paper proposes an algorithm to build a set of orthogonal Point-Based Manifold Harmonic Bases (PB-MHB) for spectral analysis over point-sampled manifold surfaces. To ensure that PB-MHB are orthogonal to each other, it is necessary to have symmetrizable discrete Laplace-Beltrami Operator (LBO) over the surfaces. Existing converging discrete LBO for point clouds, as proposed by Belkin et al. [CHECK END OF SENTENCE], is not guaranteed to be symmetrizable. We build a new point-wisely discrete LBO over the point-sampled surface that is guaranteed to be symmetrizable, and prove its convergence. By solving the eigen problem related to the new operator, we define a set of orthogonal bases over the point cloud. Experiments show that the new operator is converging better than other symmetrizable discrete Laplacian operators (such as graph Laplacian) defined on point-sampled surfaces, and can provide orthogonal bases for further spectral geometric analysis and processing tasks.", "abstracts": [ { "abstractType": "Regular", "content": "This paper proposes an algorithm to build a set of orthogonal Point-Based Manifold Harmonic Bases (PB-MHB) for spectral analysis over point-sampled manifold surfaces. To ensure that PB-MHB are orthogonal to each other, it is necessary to have symmetrizable discrete Laplace-Beltrami Operator (LBO) over the surfaces. Existing converging discrete LBO for point clouds, as proposed by Belkin et al. [CHECK END OF SENTENCE], is not guaranteed to be symmetrizable. We build a new point-wisely discrete LBO over the point-sampled surface that is guaranteed to be symmetrizable, and prove its convergence. By solving the eigen problem related to the new operator, we define a set of orthogonal bases over the point cloud. Experiments show that the new operator is converging better than other symmetrizable discrete Laplacian operators (such as graph Laplacian) defined on point-sampled surfaces, and can provide orthogonal bases for further spectral geometric analysis and processing tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper proposes an algorithm to build a set of orthogonal Point-Based Manifold Harmonic Bases (PB-MHB) for spectral analysis over point-sampled manifold surfaces. To ensure that PB-MHB are orthogonal to each other, it is necessary to have symmetrizable discrete Laplace-Beltrami Operator (LBO) over the surfaces. Existing converging discrete LBO for point clouds, as proposed by Belkin et al. [CHECK END OF SENTENCE], is not guaranteed to be symmetrizable. We build a new point-wisely discrete LBO over the point-sampled surface that is guaranteed to be symmetrizable, and prove its convergence. By solving the eigen problem related to the new operator, we define a set of orthogonal bases over the point cloud. Experiments show that the new operator is converging better than other symmetrizable discrete Laplacian operators (such as graph Laplacian) defined on point-sampled surfaces, and can provide orthogonal bases for further spectral geometric analysis and processing tasks.", "title": "Point-Based Manifold Harmonics", "normalizedTitle": "Point-Based Manifold Harmonics", "fno": "ttg2012101693", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Manifolds", "Symmetric Matrices", "Eigenvalues And Eigenfunctions", "Harmonic Analysis", "Convergence", "Laplace Equations", "Approximation Methods", "Eigenfunction", "Point Sampled Surface", "Laplace Beltrami Operator" ], "authors": [ { "givenName": "Yang", "surname": "Liu", "fullName": "Yang Liu", "affiliation": "University of Texas at Dallas, Richardson", "__typename": "ArticleAuthorType" }, { "givenName": "Balakrishnan", "surname": "Prabhakaran", "fullName": "Balakrishnan Prabhakaran", "affiliation": "University of Texas at Dallas, Richardson", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaohu", "surname": "Guo", "fullName": "Xiaohu Guo", "affiliation": "University of Texas at Dallas, Richardson", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2012-10-01 00:00:00", "pubType": "trans", "pages": "1693-1703", "year": "2012", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/gmp/2002/1674/0/16740119", "title": "Fair Triangle Mesh Generation with Discrete Elastica", "doi": null, "abstractUrl": "/proceedings-article/gmp/2002/16740119/12OmNAhxjFi", "parentPublication": { "id": "proceedings/gmp/2002/1674/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2012/1226/0/028P1A28", "title": "Geometric understanding of point clouds using Laplace-Beltrami operator", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/028P1A28/12OmNApcub3", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2010/7259/0/05521461", "title": "Point Cloud Skeletons via Laplacian Based Contraction", "doi": null, "abstractUrl": "/proceedings-article/smi/2010/05521461/12OmNCvLXYS", "parentPublication": { "id": "proceedings/smi/2010/7259/0", "title": "Shape Modeling International (SMI 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicic/2008/3161/0/31610453", "title": "Measurement of Harmonics and Inter-Harmonics Based on DWFFT", "doi": null, "abstractUrl": "/proceedings-article/icicic/2008/31610453/12OmNqBKTUi", "parentPublication": { "id": "proceedings/icicic/2008/3161/0", "title": "Innovative Computing ,Information and Control, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2011/4484/0/4484a001", "title": "The DSO Feature Based Point Cloud Simplification", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2011/4484a001/12OmNrH1PBa", "parentPublication": { "id": "proceedings/cgiv/2011/4484/0", "title": "2011 Eighth International Conference Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2001/1007/0/10070275", "title": "A New Approach of Point-Based Rendering", "doi": null, "abstractUrl": "/proceedings-article/cgi/2001/10070275/12OmNvSbBxL", "parentPublication": { "id": "proceedings/cgi/2001/1007/0", "title": "Proceedings. Computer Graphics International 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118a867", "title": "Continuous Manifold Based Adaptation for Evolving Visual Domains", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a867/12OmNwwd2Ue", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2009/2353/0/04959579", "title": "Morphing of transient sounds based on shift-invariant discrete wavelet transform and singular value decomposition", "doi": null, "abstractUrl": "/proceedings-article/icassp/2009/04959579/12OmNy3RRBc", "parentPublication": { "id": "proceedings/icassp/2009/2353/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a047", "title": "Effective Rotation-Invariant Point CNN with Spherical Harmonics Kernels", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a047/1ezREpXIpZC", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102861", "title": "3d Dynamic Point Cloud Inpainting Via Temporal Consistency On Graphs", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102861/1kwripvFDZm", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2012101678", "articleId": "13rRUxBJhFt", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2012101704", "articleId": "13rRUNvgyWl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgLV", "name": "ttg2012101693s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2012101693s.pdf", "extension": "pdf", "size": "218 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNro0HSY", "title": "Sept.", "year": "2013", "issueNum": "09", "idPrefix": "tp", "pubType": "journal", "volume": "35", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyeTVjh", "doi": "10.1109/TPAMI.2012.275", "abstract": "This paper presents a new distance for measuring shape dissimilarity between objects. Recent publications introduced the use of eigenvalues of the Laplace operator as compact shape descriptors. Here, we revisit the eigenvalues to define a proper distance, called Weighted Spectral Distance (WESD), for quantifying shape dissimilarity. The definition of WESD is derived through analyzing the heat trace. This analysis provides the proposed distance with an intuitive meaning and mathematically links it to the intrinsic geometry of objects. We analyze the resulting distance definition, present and prove its important theoretical properties. Some of these properties include: 1) WESD is defined over the entire sequence of eigenvalues yet it is guaranteed to converge, 2) it is a pseudometric, 3) it is accurately approximated with a finite number of eigenvalues, and 4) it can be mapped to the ([0,1)) interval. Last, experiments conducted on synthetic and real objects are presented. These experiments highlight the practical benefits of WESD for applications in vision and medical image analysis.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a new distance for measuring shape dissimilarity between objects. Recent publications introduced the use of eigenvalues of the Laplace operator as compact shape descriptors. Here, we revisit the eigenvalues to define a proper distance, called Weighted Spectral Distance (WESD), for quantifying shape dissimilarity. The definition of WESD is derived through analyzing the heat trace. This analysis provides the proposed distance with an intuitive meaning and mathematically links it to the intrinsic geometry of objects. We analyze the resulting distance definition, present and prove its important theoretical properties. Some of these properties include: 1) WESD is defined over the entire sequence of eigenvalues yet it is guaranteed to converge, 2) it is a pseudometric, 3) it is accurately approximated with a finite number of eigenvalues, and 4) it can be mapped to the ([0,1)) interval. Last, experiments conducted on synthetic and real objects are presented. These experiments highlight the practical benefits of WESD for applications in vision and medical image analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a new distance for measuring shape dissimilarity between objects. Recent publications introduced the use of eigenvalues of the Laplace operator as compact shape descriptors. Here, we revisit the eigenvalues to define a proper distance, called Weighted Spectral Distance (WESD), for quantifying shape dissimilarity. The definition of WESD is derived through analyzing the heat trace. This analysis provides the proposed distance with an intuitive meaning and mathematically links it to the intrinsic geometry of objects. We analyze the resulting distance definition, present and prove its important theoretical properties. Some of these properties include: 1) WESD is defined over the entire sequence of eigenvalues yet it is guaranteed to converge, 2) it is a pseudometric, 3) it is accurately approximated with a finite number of eigenvalues, and 4) it can be mapped to the ([0,1)) interval. Last, experiments conducted on synthetic and real objects are presented. These experiments highlight the practical benefits of WESD for applications in vision and medical image analysis.", "title": "WESD--Weighted Spectral Distance for Measuring Shape Dissimilarity", "normalizedTitle": "WESD--Weighted Spectral Distance for Measuring Shape Dissimilarity", "fno": "ttp2013092284", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Shape", "Eigenvalues And Eigenfunctions", "Geometry", "Laplace Equations", "Heating", "Equations", "Global Positioning System", "Medical Images", "Shape Distance", "Spectral Distance", "Laplace Operator", "Laplace Spectrum", "Segmentations", "Label Maps" ], "authors": [ { "givenName": "E.", "surname": "Konukoglu", "fullName": "E. Konukoglu", "affiliation": "Med. Sch., Athinoula A. Martinos Center for Biomed. Imaging, Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "B.", "surname": "Glocker", "fullName": "B. Glocker", "affiliation": "Microsoft Res. Cambridge, Cambridge, UK", "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Criminisi", "fullName": "A. Criminisi", "affiliation": "Microsoft Res. Cambridge, Cambridge, UK", "__typename": "ArticleAuthorType" }, { "givenName": "K. M.", "surname": "Pohl", "fullName": "K. M. Pohl", "affiliation": "Univ. of Pennsylvania, Philadelphia, PA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2013-09-01 00:00:00", "pubType": "trans", "pages": "2284-2297", "year": "2013", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icvrv/2014/6854/0/6854a290", "title": "A Scale-Invariant Diffusion Distance for Non-rigid Shape Analysis", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2014/6854a290/12OmNAnMuwS", "parentPublication": { "id": "proceedings/icvrv/2014/6854/0", "title": "2014 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118c313", "title": "Stable and Informative Spectral Signatures for Graph Matching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c313/12OmNAnuTkg", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2013/2576/0/06814992", "title": "GBI-SA: GBI Feature with Subtle Adjustment for Robust Non-rigid 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06814992/12OmNB8Cjas", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2017/3835/0/3835a949", "title": "Fast Compressive Spectral Clustering", "doi": null, "abstractUrl": "/proceedings-article/icdm/2017/3835a949/12OmNC3Xhju", "parentPublication": { "id": "proceedings/icdm/2017/3835/0", "title": "2017 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761245", "title": "Object recognition using graph spectral invariants", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761245/12OmNwErpPj", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981743", "title": "Laplace-Beltrami eigenfunction metrics and geodesic shape distance features for shape matching in synthetic aperture sonar", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981743/12OmNwkhTgo", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2016/5407/0/5407a499", "title": "SpectroMeter: Amortized Sublinear Spectral Approximation of Distance on Graphs", "doi": null, "abstractUrl": "/proceedings-article/3dv/2016/5407a499/12OmNzdoMNK", "parentPublication": { "id": "proceedings/3dv/2016/5407/0", "title": "2016 Fourth International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08449121", "title": "Hamiltonian Operator for Spectral Shape Analysis", "doi": null, "abstractUrl": "/journal/tg/2020/02/08449121/13rRUxlgxOs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/01/ttp2014010171", "title": "Learning Spectral Descriptors for Deformable Shape Correspondence", "doi": null, "abstractUrl": "/journal/tp/2014/01/ttp2014010171/13rRUxly8YE", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bracis/2019/4253/0/425300a783", "title": "A Probabilistic Algorithm to Estimate the Spectral Moments of Large Undirected Weighted Graphs", "doi": null, "abstractUrl": "/proceedings-article/bracis/2019/425300a783/1fHkFVCOVd6", "parentPublication": { "id": "proceedings/bracis/2019/4253/0", "title": "2019 8th Brazilian Conference on Intelligent Systems (BRACIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttp2013092270", "articleId": "13rRUwvT9hr", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttp2013092298", "articleId": "13rRUB7a12h", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRIO", "name": "ttp2013092284s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp2013092284s.pdf", "extension": "pdf", "size": "121 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1MTOUEFAeT6", "title": "June", "year": "2023", "issueNum": "06", "idPrefix": "tp", "pubType": "journal", "volume": "45", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1IxvTvt5Qty", "doi": "10.1109/TPAMI.2022.3224253", "abstract": "This article presents a novel intrinsic image transfer (IIT) algorithm for image illumination manipulation, which creates a local image translation between two illumination surfaces. This model is built on an optimization-based framework composed of illumination, reflectance and content photo-realistic losses, respectively. Each loss is first defined on the corresponding sub-layers factorized by an intrinsic image decomposition and then reduced under the well-known spatial-varying illumination illumination-invariant reflectance prior knowledge. We illustrate that all losses, with the aid of an &#x201C;exemplar&#x201D; image, can be directly defined on images without the necessity of taking an intrinsic image decomposition, thereby giving a closed-form solution to image illumination manipulation. We also demonstrate its versatility and benefits to several illumination-related tasks: illumination compensation, image enhancement and tone mapping, and high dynamic range (HDR) image compression, and show their high-quality results on natural image datasets.", "abstracts": [ { "abstractType": "Regular", "content": "This article presents a novel intrinsic image transfer (IIT) algorithm for image illumination manipulation, which creates a local image translation between two illumination surfaces. This model is built on an optimization-based framework composed of illumination, reflectance and content photo-realistic losses, respectively. Each loss is first defined on the corresponding sub-layers factorized by an intrinsic image decomposition and then reduced under the well-known spatial-varying illumination illumination-invariant reflectance prior knowledge. We illustrate that all losses, with the aid of an &#x201C;exemplar&#x201D; image, can be directly defined on images without the necessity of taking an intrinsic image decomposition, thereby giving a closed-form solution to image illumination manipulation. We also demonstrate its versatility and benefits to several illumination-related tasks: illumination compensation, image enhancement and tone mapping, and high dynamic range (HDR) image compression, and show their high-quality results on natural image datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article presents a novel intrinsic image transfer (IIT) algorithm for image illumination manipulation, which creates a local image translation between two illumination surfaces. This model is built on an optimization-based framework composed of illumination, reflectance and content photo-realistic losses, respectively. Each loss is first defined on the corresponding sub-layers factorized by an intrinsic image decomposition and then reduced under the well-known spatial-varying illumination illumination-invariant reflectance prior knowledge. We illustrate that all losses, with the aid of an “exemplar” image, can be directly defined on images without the necessity of taking an intrinsic image decomposition, thereby giving a closed-form solution to image illumination manipulation. We also demonstrate its versatility and benefits to several illumination-related tasks: illumination compensation, image enhancement and tone mapping, and high dynamic range (HDR) image compression, and show their high-quality results on natural image datasets.", "title": "Intrinsic Image Transfer for Illumination Manipulation", "normalizedTitle": "Intrinsic Image Transfer for Illumination Manipulation", "fno": "09961945", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Lighting", "Image Decomposition", "Task Analysis", "Image Coding", "Visualization", "Image Color Analysis", "Computational Modeling", "Image Illumination And Reflectance", "Intrinsic Image Decomposation", "Intrinsic Images Transfer", "Illumination Manipulation" ], "authors": [ { "givenName": "Junqing", "surname": "Huang", "fullName": "Junqing Huang", "affiliation": "Department of Mathematics: Analysis, Logic and Discrete Mathematics, Faculty of Sciences, Ghent University, Ghent, Belgium", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Ruzhansky", "fullName": "Michael Ruzhansky", "affiliation": "Department of Mathematics: Analysis, Logic and Discrete Mathematics, Faculty of Sciences, Ghent University, Ghent, Belgium", "__typename": "ArticleAuthorType" }, { "givenName": "Qianying", "surname": "Zhang", "fullName": "Qianying Zhang", "affiliation": "Department of Liberal Arts, Shenzhen Institute of Information Technology, Shenzhen, China", "__typename": "ArticleAuthorType" }, { "givenName": "Haihui", "surname": "Wang", "fullName": "Haihui Wang", "affiliation": "School of Mathematical Sciences, Beihang University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2023-06-01 00:00:00", "pubType": "trans", "pages": "7444-7456", "year": "2023", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391d469", "title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995507", "title": "Intrinsic images using optimization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995507/12OmNCbU3cE", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iihmsp/2006/2745/0/04041690", "title": "Recovering Intrinsic Images from Weighted Edge Maps", "doi": null, "abstractUrl": "/proceedings-article/iihmsp/2006/04041690/12OmNCfjeyV", "parentPublication": { "id": "proceedings/iihmsp/2006/2745/0", "title": "2006 International Conference on Intelligent Information Hiding and Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a172", "title": "A Comprehensive Multi-Illuminant Dataset for Benchmarking of the Intrinsic Image Algorithms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a172/12OmNyQpgMj", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a433", "title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9758", "title": "PIE-Net: Photometric Invariant Edge Guided Network for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9758/1H0N3uaU7mM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d245", "title": "Unsupervised Learning for Intrinsic Image Decomposition From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d245/1m3obXljyCI", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09625763", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "doi": null, "abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900q6362", "title": "Intrinsic Image Harmonization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900q6362/1yeIEpOSHgA", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09964439", "articleId": "1IFEEJ0hSCs", "__typename": "AdjacentArticleType" }, "next": { "fno": "09933726", "articleId": "1HVshVy8jYI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1I6Nvxq2hxe", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tp", "pubType": "journal", "volume": "44", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yLTnG9Uisw", "doi": "10.1109/TPAMI.2021.3129795", "abstract": "Recent learning-based intrinsic image decomposition methods have achieved remarkable progress. However, they usually require massive ground truth intrinsic images for supervised learning, which limits their applicability on real-world images since obtaining ground truth intrinsic decomposition for natural images is very challenging. In this paper, we present an unsupervised framework that is able to learn the decomposition effectively from a single natural image by training solely with the image itself. Our approach is built upon the observations that the reflectance of a natural image typically has high internal self-similarity of patches, and a convolutional generation network tends to boost the self-similarity of an image when trained for image reconstruction. Based on the observations, an unsupervised intrinsic decomposition network (UIDNet) consisting of two fully convolutional encoder-decoder sub-networks, i.e., reflectance prediction network (RPN) and shading prediction network (SPN), is devised to decompose an image into reflectance and shading by promoting the internal self-similarity of the reflectance component, in a way that jointly trains RPN and SPN to reproduce the given image. A novel loss function is also designed to make effective the training for intrinsic decomposition. Experimental results on three benchmark real-world datasets demonstrate the superiority of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Recent learning-based intrinsic image decomposition methods have achieved remarkable progress. However, they usually require massive ground truth intrinsic images for supervised learning, which limits their applicability on real-world images since obtaining ground truth intrinsic decomposition for natural images is very challenging. In this paper, we present an unsupervised framework that is able to learn the decomposition effectively from a single natural image by training solely with the image itself. Our approach is built upon the observations that the reflectance of a natural image typically has high internal self-similarity of patches, and a convolutional generation network tends to boost the self-similarity of an image when trained for image reconstruction. Based on the observations, an unsupervised intrinsic decomposition network (UIDNet) consisting of two fully convolutional encoder-decoder sub-networks, i.e., reflectance prediction network (RPN) and shading prediction network (SPN), is devised to decompose an image into reflectance and shading by promoting the internal self-similarity of the reflectance component, in a way that jointly trains RPN and SPN to reproduce the given image. A novel loss function is also designed to make effective the training for intrinsic decomposition. Experimental results on three benchmark real-world datasets demonstrate the superiority of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent learning-based intrinsic image decomposition methods have achieved remarkable progress. However, they usually require massive ground truth intrinsic images for supervised learning, which limits their applicability on real-world images since obtaining ground truth intrinsic decomposition for natural images is very challenging. In this paper, we present an unsupervised framework that is able to learn the decomposition effectively from a single natural image by training solely with the image itself. Our approach is built upon the observations that the reflectance of a natural image typically has high internal self-similarity of patches, and a convolutional generation network tends to boost the self-similarity of an image when trained for image reconstruction. Based on the observations, an unsupervised intrinsic decomposition network (UIDNet) consisting of two fully convolutional encoder-decoder sub-networks, i.e., reflectance prediction network (RPN) and shading prediction network (SPN), is devised to decompose an image into reflectance and shading by promoting the internal self-similarity of the reflectance component, in a way that jointly trains RPN and SPN to reproduce the given image. A novel loss function is also designed to make effective the training for intrinsic decomposition. Experimental results on three benchmark real-world datasets demonstrate the superiority of the proposed method.", "title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "normalizedTitle": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues", "fno": "09625763", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Convolutional Neural Nets", "Image Classification", "Image Reconstruction", "Image Texture", "Unsupervised Learning", "Convolutional Generation Network", "Fully Convolutional Encoder Decoder Subnetworks", "Ground Truth Intrinsic Decomposition", "Image Reconstruction", "Internal Self Similarity", "Internal Self Similarity Cues", "Learning Based Intrinsic Image Decomposition Methods", "Massive Ground Truth Intrinsic Images", "Natural Image", "Real World Images", "Reflectance Prediction Network", "Shading Prediction Network", "Supervised Learning", "Unsupervised Framework", "Unsupervised Intrinsic Decomposition Network", "Unsupervised Intrinsic Image Decomposition", "Training", "Lighting", "Image Reconstruction", "Image Decomposition", "Surface Acoustic Waves", "Image Sequences", "Annotations", "Intrinsic Images", "Reflectance", "Shading" ], "authors": [ { "givenName": "Qing", "surname": "Zhang", "fullName": "Qing Zhang", "affiliation": "School of Computer Science and Engineering, Sun Yat-Sen University, Guangzhou, Guangdong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jin", "surname": "Zhou", "fullName": "Jin Zhou", "affiliation": "School of Electronics and Information Technology, Sun Yat-sen University, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lei", "surname": "Zhu", "fullName": "Lei Zhu", "affiliation": "Hong Kong University of Science and Technology (Guangzhou), Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Sun", "fullName": "Wei Sun", "affiliation": "School of Electronics and Information Technology, Sun Yat-sen University, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chunxia", "surname": "Xiao", "fullName": "Chunxia Xiao", "affiliation": "School of Computer Science, Wuhan University, Wuhan, Hubei, China", "__typename": "ArticleAuthorType" }, { "givenName": "Wei-Shi", "surname": "Zheng", "fullName": "Wei-Shi Zheng", "affiliation": "School of Computer Science and Engineering, Sun Yat-Sen University, Guangzhou, Guangdong, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "9669-9686", "year": "2022", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890318", "title": "L0 co-intrinsic images decomposition", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587660", "title": "Intrinsic image decomposition with non-local texture cues", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587660/12OmNBCHMMC", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d469", "title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a433", "title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/12/ttp2013122904", "title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance", "doi": null, "abstractUrl": "/journal/tp/2013/12/ttp2013122904/13rRUxOdD3R", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g430", "title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a312", "title": "HSI-Guided Intrinsic Image Decomposition for Outdoor Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a312/1G56nWipNPa", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300h819", "title": "GLoSH: Global-Local Spherical Harmonics for Intrinsic Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300h819/1hQqy771H9u", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c521", "title": "Non-Local Intrinsic Decomposition With Near-Infrared Priors", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c521/1hVluc7QzBK", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d245", "title": "Unsupervised Learning for Intrinsic Image Decomposition From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d245/1m3obXljyCI", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09583771", "articleId": "1xSHN8Dq0HS", "__typename": "AdjacentArticleType" }, "next": { "fno": "09628041", "articleId": "1yXvHRI3JRK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1I6NX1ujhGE", "name": "ttp202212-09625763s1-supp1-3129795.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202212-09625763s1-supp1-3129795.pdf", "extension": "pdf", "size": "14.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1rCbPpaC8GA", "title": "March", "year": "2021", "issueNum": "01", "idPrefix": "bd", "pubType": "journal", "volume": "7", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "18Nk65SQcWA", "doi": "10.1109/TBDATA.2019.2908178", "abstract": "K-nearest neighbor (kNN) search is an important problem in data mining and knowledge discovery. Inspired by the huge success of tree-based methodology and ensemble methods over the last decades, we propose a new method for kNN search, random projection forests (rpForests). rpForests finds nearest neighbors by combining multiple kNN-sensitive trees with each constructed recursively through a series of random projections. As demonstrated by experiments on a wide collection of real datasets, our method achieves a remarkable accuracy in terms of fast decaying missing rate of kNNs and that of discrepancy in the k-th nearest neighbor distances. rpForests has a very low computational complexity as a tree-based methodology. The ensemble nature of rpForests makes it easily parallelized to run on clustered or multicore computers; the running time is expected to be nearly inversely proportional to the number of cores or machines. We give theoretical insights on rpForests by showing the exponential decay of neighboring points being separated by ensemble random projection trees when the ensemble size increases. Our theory can also be used to refine the choice of random projections in the growth of rpForests; experiments show that the effect is remarkable.", "abstracts": [ { "abstractType": "Regular", "content": "K-nearest neighbor (kNN) search is an important problem in data mining and knowledge discovery. Inspired by the huge success of tree-based methodology and ensemble methods over the last decades, we propose a new method for kNN search, random projection forests (rpForests). rpForests finds nearest neighbors by combining multiple kNN-sensitive trees with each constructed recursively through a series of random projections. As demonstrated by experiments on a wide collection of real datasets, our method achieves a remarkable accuracy in terms of fast decaying missing rate of kNNs and that of discrepancy in the k-th nearest neighbor distances. rpForests has a very low computational complexity as a tree-based methodology. The ensemble nature of rpForests makes it easily parallelized to run on clustered or multicore computers; the running time is expected to be nearly inversely proportional to the number of cores or machines. We give theoretical insights on rpForests by showing the exponential decay of neighboring points being separated by ensemble random projection trees when the ensemble size increases. Our theory can also be used to refine the choice of random projections in the growth of rpForests; experiments show that the effect is remarkable.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "K-nearest neighbor (kNN) search is an important problem in data mining and knowledge discovery. Inspired by the huge success of tree-based methodology and ensemble methods over the last decades, we propose a new method for kNN search, random projection forests (rpForests). rpForests finds nearest neighbors by combining multiple kNN-sensitive trees with each constructed recursively through a series of random projections. As demonstrated by experiments on a wide collection of real datasets, our method achieves a remarkable accuracy in terms of fast decaying missing rate of kNNs and that of discrepancy in the k-th nearest neighbor distances. rpForests has a very low computational complexity as a tree-based methodology. The ensemble nature of rpForests makes it easily parallelized to run on clustered or multicore computers; the running time is expected to be nearly inversely proportional to the number of cores or machines. We give theoretical insights on rpForests by showing the exponential decay of neighboring points being separated by ensemble random projection trees when the ensemble size increases. Our theory can also be used to refine the choice of random projections in the growth of rpForests; experiments show that the effect is remarkable.", "title": "K-Nearest Neighbor Search by Random Projection Forests", "normalizedTitle": "K-Nearest Neighbor Search by Random Projection Forests", "fno": "08676336", "hasPdf": true, "idPrefix": "bd", "keywords": [ "Computational Complexity", "Data Mining", "Nearest Neighbour Methods", "Search Problems", "Trees Mathematics", "K Nearest Neighbor Search", "Computational Complexity", "K NN Sensitive Trees", "Nearest Neighbors", "K NN Search", "Tree Based Methodology", "Knowledge Discovery", "Data Mining", "Random Projection Forests", "Random Projections", "Ensemble Size Increases", "Ensemble Random Projection Trees", "Rp Forests", "Big Data", "Vegetation", "Computational Complexity", "Forestry", "Data Mining", "Computers", "Search Problems", "K Nearest Neighbors", "Random Projection Forests", "Ensemble", "Unsupervised Learning" ], "authors": [ { "givenName": "Donghui", "surname": "Yan", "fullName": "Donghui Yan", "affiliation": "Department of Mathematics and Program in Data Science, University of Massachusetts, Dartmouth, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Yingjie", "surname": "Wang", "fullName": "Yingjie Wang", "affiliation": "Department of Electrical and Computer Engineering, University of Massachusetts, Dartmouth, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jin", "surname": "Wang", "fullName": "Jin Wang", "affiliation": "Department of Electrical and Computer Engineering, University of Massachusetts, Dartmouth, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Honggang", "surname": "Wang", "fullName": "Honggang Wang", "affiliation": "Department of Electrical and Computer Engineering, University of Massachusetts, Dartmouth, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Zhenpeng", "surname": "Li", "fullName": "Zhenpeng Li", "affiliation": "Department of Mathematics and Computer Sciences, Dali University, Dali, Yunnan, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2021-01-01 00:00:00", "pubType": "trans", "pages": "147-157", "year": "2021", "issn": "2332-7790", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/dsia/2017/2198/0/08339084", "title": "A progressive k-d tree for approximate k-nearest neighbors", "doi": null, "abstractUrl": "/proceedings-article/dsia/2017/08339084/12OmNAXPymK", "parentPublication": { "id": "proceedings/dsia/2017/2198/0", "title": "2017 IEEE Workshop on Data Systems for Interactive Analysis (DSIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2014/2555/0/06816688", "title": "Practical k nearest neighbor queries with location privacy", "doi": null, "abstractUrl": "/proceedings-article/icde/2014/06816688/12OmNAkWvIe", "parentPublication": { "id": "proceedings/icde/2014/2555/0", "title": "2014 IEEE 30th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574742", "title": "A K-Nearest-Neighbor-Pooling method for graph matching", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574742/12OmNBqv2mn", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2017/3835/0/3835a853", "title": "Online Nearest Neighbor Search in Binary Space", "doi": null, "abstractUrl": "/proceedings-article/icdm/2017/3835a853/12OmNrJiCR2", "parentPublication": { "id": "proceedings/icdm/2017/3835/0", "title": "2017 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2016/2239/0/07515874", "title": "Projection search for approximate nearest neighbor", "doi": null, "abstractUrl": "/proceedings-article/snpd/2016/07515874/12OmNwwd2NU", "parentPublication": { "id": "proceedings/snpd/2016/2239/0", "title": "2016 17th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2016/2020/0/07498401", "title": "Metric all-k-nearest-neighbor search", "doi": null, "abstractUrl": "/proceedings-article/icde/2016/07498401/12OmNy6Zs1r", "parentPublication": { "id": "proceedings/icde/2016/2020/0", "title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nbis/2014/4224/0/4224a615", "title": "A Simple Routing Method for Reverse k-Nearest Neighbor Queries in Spatial Networks", "doi": null, "abstractUrl": "/proceedings-article/nbis/2014/4224a615/12OmNyrIaAH", "parentPublication": { "id": "proceedings/nbis/2014/4224/0", "title": "2014 17th International Conference on Network-Based Information Systems (NBiS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2018/5035/0/08622307", "title": "K-nearest Neighbor Search by Random Projection Forests", "doi": null, "abstractUrl": "/proceedings-article/big-data/2018/08622307/17D45XacGim", "parentPublication": { "id": "proceedings/big-data/2018/5035/0", "title": "2018 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icedcs/2022/5541/0/554100a104", "title": "A Kernel-Based Local Mean k-Nearest Centroid Neighbor Method for Classification", "doi": null, "abstractUrl": "/proceedings-article/icedcs/2022/554100a104/1JC1u0GMjHG", "parentPublication": { "id": "proceedings/icedcs/2022/5541/0", "title": "2022 International Conference on Electronics and Devices, Computational Science (ICEDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdacs/2021/2561/0/256100a225", "title": "k-Nearest Neighbor algorithm based on feature subspace", "doi": null, "abstractUrl": "/proceedings-article/bdacs/2021/256100a225/1wiRuV5ox4k", "parentPublication": { "id": "proceedings/bdacs/2021/2561/0", "title": "2021 International Conference on Big Data Analysis and Computer Science (BDACS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08676277", "articleId": "18Nk6OAnAYw", "__typename": "AdjacentArticleType" }, "next": { "fno": "08676337", "articleId": "18Nk6pc8fAI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNy7Qfqa", "title": "Feb.", "year": "2013", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgarjt", "doi": "10.1109/TVCG.2012.109", "abstract": "Harmonic functions are the critical points of a Dirichlet energy functional, the linear projections of conformal maps. They play an important role in computer graphics, particularly for gradient-domain image processing and shape-preserving geometric computation. We propose Poisson coordinates, a novel transfinite interpolation scheme based on the Poisson integral formula, as a rapid way to estimate a harmonic function on a certain domain with desired boundary values. Poisson coordinates are an extension of the Mean Value coordinates (MVCs) which inherit their linear precision, smoothness, and kernel positivity. We give explicit formulas for Poisson coordinates in both continuous and 2D discrete forms. Superior to MVCs, Poisson coordinates are proved to be pseudoharmonic (i.e., they reproduce harmonic functions on n-dimensional balls). Our experimental results show that Poisson coordinates have lower Dirichlet energies than MVCs on a number of typical 2D domains (particularly convex domains). As well as presenting a formula, our approach provides useful insights for further studies on coordinates-based interpolation and fast estimation of harmonic functions.", "abstracts": [ { "abstractType": "Regular", "content": "Harmonic functions are the critical points of a Dirichlet energy functional, the linear projections of conformal maps. They play an important role in computer graphics, particularly for gradient-domain image processing and shape-preserving geometric computation. We propose Poisson coordinates, a novel transfinite interpolation scheme based on the Poisson integral formula, as a rapid way to estimate a harmonic function on a certain domain with desired boundary values. Poisson coordinates are an extension of the Mean Value coordinates (MVCs) which inherit their linear precision, smoothness, and kernel positivity. We give explicit formulas for Poisson coordinates in both continuous and 2D discrete forms. Superior to MVCs, Poisson coordinates are proved to be pseudoharmonic (i.e., they reproduce harmonic functions on n-dimensional balls). Our experimental results show that Poisson coordinates have lower Dirichlet energies than MVCs on a number of typical 2D domains (particularly convex domains). As well as presenting a formula, our approach provides useful insights for further studies on coordinates-based interpolation and fast estimation of harmonic functions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Harmonic functions are the critical points of a Dirichlet energy functional, the linear projections of conformal maps. They play an important role in computer graphics, particularly for gradient-domain image processing and shape-preserving geometric computation. We propose Poisson coordinates, a novel transfinite interpolation scheme based on the Poisson integral formula, as a rapid way to estimate a harmonic function on a certain domain with desired boundary values. Poisson coordinates are an extension of the Mean Value coordinates (MVCs) which inherit their linear precision, smoothness, and kernel positivity. We give explicit formulas for Poisson coordinates in both continuous and 2D discrete forms. Superior to MVCs, Poisson coordinates are proved to be pseudoharmonic (i.e., they reproduce harmonic functions on n-dimensional balls). Our experimental results show that Poisson coordinates have lower Dirichlet energies than MVCs on a number of typical 2D domains (particularly convex domains). As well as presenting a formula, our approach provides useful insights for further studies on coordinates-based interpolation and fast estimation of harmonic functions.", "title": "Poisson Coordinates", "normalizedTitle": "Poisson Coordinates", "fno": "ttg2013020344", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Stochastic Processes", "Computational Geometry", "Computer Graphics", "Gradient Methods", "Interpolation", "Coordinates Based Interpolation", "Poisson Coordinates", "Harmonic Functions", "Dirichlet Energy Functional", "Linear Projections", "Conformal Maps", "Computer Graphics", "Gradient Domain Image Processing", "Shape Preserving Geometric Computation", "Transfinite Interpolation Scheme", "Poisson Integral Formula", "Mean Value Coordinates", "MVC", "2 D Discrete Forms", "Dirichlet Energies", "Interpolation", "Harmonic Analysis", "Kernel", "Equations", "Integral Equations", "Closed Form Solutions", "Image Processing", "Pseudoharmonic", "Poisson Integral Formula", "Transfinite Interpolation", "Barycentric Coordinates" ], "authors": [ { "givenName": null, "surname": "Xian-Ying Li", "fullName": "Xian-Ying Li", "affiliation": "Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Shi-Min Hu", "fullName": "Shi-Min Hu", "affiliation": "Dept. of Comput. Sci. & Technol., Tsinghua Univ., Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2013-02-01 00:00:00", "pubType": "trans", "pages": "344-352", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ettandgrs/2008/3563/2/3563b340", "title": "Improving Algorithm to Compute Geodetic Coordinates", "doi": null, "abstractUrl": "/proceedings-article/ettandgrs/2008/3563b340/12OmNqH9hhh", "parentPublication": { "id": "ettandgrs/2008/3563/2", "title": "Education Technology and Training &amp; Geoscience and Remote Sensing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2012/4899/0/4899a443", "title": "2D Shape Manipulations with Holomorphic Coordinates", "doi": null, "abstractUrl": "/proceedings-article/icdh/2012/4899a443/12OmNs5rkRH", "parentPublication": { "id": "proceedings/icdh/2012/4899/0", "title": "4th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2012/4899/0/4899a278", "title": "Mesh Merging with Mean Value Coordinates", "doi": null, "abstractUrl": "/proceedings-article/icdh/2012/4899a278/12OmNx19jRY", "parentPublication": { "id": "proceedings/icdh/2012/4899/0", "title": "4th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2008/3278/0/3278a397", "title": "Image Editing without Color Inconsistency Using Modified Poisson Equation", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2008/3278a397/12OmNyoiYXH", "parentPublication": { "id": "proceedings/iih-msp/2008/3278/0", "title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ca/1997/7984/0/79840093", "title": "Dirichlet Free-Form Deformations and their Application to Hand Simulation", "doi": null, "abstractUrl": "/proceedings-article/ca/1997/79840093/12OmNz5JChG", "parentPublication": { "id": "proceedings/ca/1997/7984/0", "title": "Computer Animation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2010/4256/0/4256a148", "title": "Sequential Latent Dirichlet Allocation: Discover Underlying Topic Structures within a Document", "doi": null, "abstractUrl": "/proceedings-article/icdm/2010/4256a148/12OmNzdGnxE", "parentPublication": { "id": "proceedings/icdm/2010/4256/0", "title": "2010 IEEE International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2010/4077/3/4077e977", "title": "The Study for Algorithm of Linear Interpolation Based on Bipolar Coordinates", "doi": null, "abstractUrl": "/proceedings-article/icicta/2010/4077e977/12OmNzkMlGe", "parentPublication": { "id": "proceedings/icicta/2010/4077/3", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/06/ttg2009061531", "title": "Continuous Parallel Coordinates", "doi": null, "abstractUrl": "/journal/tg/2009/06/ttg2009061531/13rRUxZRbnX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08449116", "title": "Poisson Vector Graphics (PVG)", "doi": null, "abstractUrl": "/journal/tg/2020/02/08449116/13rRUyeCkaq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2018/5500/0/550000b153", "title": "Mixed Tensor Product of q-Bezier-Poisson Surfaces", "doi": null, "abstractUrl": "/proceedings-article/icisce/2018/550000b153/17D45WgziRw", "parentPublication": { "id": "proceedings/icisce/2018/5500/0", "title": "2018 5th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013020331", "articleId": "13rRUytF41x", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgGC", "name": "ttg2013020344s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013020344s.pdf", "extension": "pdf", "size": "84.2 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zdLz0NqD7O", "title": "Nov.-Dec.", "year": "2021", "issueNum": "06", "idPrefix": "cg", "pubType": "magazine", "volume": "41", "label": "Nov.-Dec.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1ndVAyjqoVO", "doi": "10.1109/MCG.2020.3024870", "abstract": "This article presents a simple yet effective algorithm for automatically transferring face colors in portrait videos. We extract the facial features and vectorize the faces in the input video using Poisson vector graphics, which encodes the low-frequency colors as the boundary colors of diffusion curves, and the high-frequency colors as Poisson regions. Then, we transfer the face color of a reference image/video to the first frame of the input video by applying optimal mass transport between the boundary colors of diffusion curves. Next the boundary color of the first frame is transferred to the subsequent frames by matching the curves. Finally, with the original or modified Poisson regions, we render the video using an efficient random-access Poisson solver. Thanks to our efficient diffusion curve matching algorithm, transferring colors for the vectorized video takes less than 1 millisecond per frame. Our method is particularly desired for frequent transfer from multiple references due to its information reuse nature. The simple diffusion curve matching also greatly improves the performance of video vectorization, since we only need to solve an optimization problem for the first frame. Since our method does not require correspondence between the reference image/video and the input video, it is flexible and robust to handle faces with significantly different geometries and postures, which often pose challenges to the existing methods. Moreover, by manipulating Poisson regions, we can enhance or reduce the highlight and contrast so that the reference color can fit into the input video naturally. We demonstrate the efficacy of our method on image-to-video transfer and color swap in videos.", "abstracts": [ { "abstractType": "Regular", "content": "This article presents a simple yet effective algorithm for automatically transferring face colors in portrait videos. We extract the facial features and vectorize the faces in the input video using Poisson vector graphics, which encodes the low-frequency colors as the boundary colors of diffusion curves, and the high-frequency colors as Poisson regions. Then, we transfer the face color of a reference image/video to the first frame of the input video by applying optimal mass transport between the boundary colors of diffusion curves. Next the boundary color of the first frame is transferred to the subsequent frames by matching the curves. Finally, with the original or modified Poisson regions, we render the video using an efficient random-access Poisson solver. Thanks to our efficient diffusion curve matching algorithm, transferring colors for the vectorized video takes less than 1 millisecond per frame. Our method is particularly desired for frequent transfer from multiple references due to its information reuse nature. The simple diffusion curve matching also greatly improves the performance of video vectorization, since we only need to solve an optimization problem for the first frame. Since our method does not require correspondence between the reference image/video and the input video, it is flexible and robust to handle faces with significantly different geometries and postures, which often pose challenges to the existing methods. Moreover, by manipulating Poisson regions, we can enhance or reduce the highlight and contrast so that the reference color can fit into the input video naturally. We demonstrate the efficacy of our method on image-to-video transfer and color swap in videos.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article presents a simple yet effective algorithm for automatically transferring face colors in portrait videos. We extract the facial features and vectorize the faces in the input video using Poisson vector graphics, which encodes the low-frequency colors as the boundary colors of diffusion curves, and the high-frequency colors as Poisson regions. Then, we transfer the face color of a reference image/video to the first frame of the input video by applying optimal mass transport between the boundary colors of diffusion curves. Next the boundary color of the first frame is transferred to the subsequent frames by matching the curves. Finally, with the original or modified Poisson regions, we render the video using an efficient random-access Poisson solver. Thanks to our efficient diffusion curve matching algorithm, transferring colors for the vectorized video takes less than 1 millisecond per frame. Our method is particularly desired for frequent transfer from multiple references due to its information reuse nature. The simple diffusion curve matching also greatly improves the performance of video vectorization, since we only need to solve an optimization problem for the first frame. Since our method does not require correspondence between the reference image/video and the input video, it is flexible and robust to handle faces with significantly different geometries and postures, which often pose challenges to the existing methods. Moreover, by manipulating Poisson regions, we can enhance or reduce the highlight and contrast so that the reference color can fit into the input video naturally. We demonstrate the efficacy of our method on image-to-video transfer and color swap in videos.", "title": "Poisson Vector Graphics (PVG)-Guided Face Color Transfer in Videos", "normalizedTitle": "Poisson Vector Graphics (PVG)-Guided Face Color Transfer in Videos", "fno": "09200660", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Feature Extraction", "Image Colour Analysis", "Image Matching", "Optimisation", "Rendering Computer Graphics", "Stochastic Processes", "Color Swap", "Poisson Vector Graphics Guided Face Color Transfer", "Automatically Transferring Face Colors", "Portrait Videos", "Input Video", "Low Frequency Colors", "Boundary Color", "Diffusion Curves", "High Frequency Colors", "Subsequent Frames", "Original Modified Poisson Regions", "Random Access Poisson Solver", "Efficient Diffusion Curve Matching Algorithm", "Vectorized Video", "Frequent Transfer", "Simple Diffusion Curve", "Video Vectorization", "Manipulating Poisson Regions", "Reference Color", "Image To Video Transfer", "Image Color Analysis", "Videos", "Face Recognition", "Feature Extraction", "Graphics", "Poisson Equations", "Facial Features", "Portrait Videos", "Color Transfer", "Optimal Mass Transportation", "Diffusion Curves", "Poisson Vector Graphics" ], "authors": [ { "givenName": "Qian", "surname": "Fu", "fullName": "Qian Fu", "affiliation": "Nanyang Technological University, Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Ying", "surname": "He", "fullName": "Ying He", "affiliation": "Nanyang Technological University, Singapore, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Fei", "surname": "Hou", "fullName": "Fei Hou", "affiliation": "Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qian", "surname": "Sun", "fullName": "Qian Sun", "affiliation": "Tianjin University, Tianjin, China", "__typename": "ArticleAuthorType" }, { "givenName": "Anxiang", "surname": "Zeng", "fullName": "Anxiang Zeng", "affiliation": "Alibaba Group, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhenchuan", "surname": "Huang", "fullName": "Zhenchuan Huang", "affiliation": "Alibaba Group, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Juyong", "surname": "Zhang", "fullName": "Juyong Zhang", "affiliation": "University of Science and Technology of China, Anhui, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yong-Jin", "surname": "Liu", "fullName": "Yong-Jin Liu", "affiliation": "Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2021-11-01 00:00:00", "pubType": "mags", "pages": "152-163", "year": "2021", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/gmp/2000/0562/0/05620141", "title": "Poisson Approximation", "doi": null, "abstractUrl": "/proceedings-article/gmp/2000/05620141/12OmNA1mbdW", "parentPublication": { "id": "proceedings/gmp/2000/0562/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgciot/2015/7910/0/07380559", "title": "Automatic hair color de-identification", "doi": null, "abstractUrl": "/proceedings-article/icgciot/2015/07380559/12OmNANBZo5", "parentPublication": { "id": "proceedings/icgciot/2015/7910/0", "title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciii/2011/4523/3/4523c278", "title": "Fractional Diffusion Models of European Option with Poisson Process", "doi": null, "abstractUrl": "/proceedings-article/iciii/2011/4523c278/12OmNAkEU6w", "parentPublication": { "id": "proceedings/iciii/2011/4523/3", "title": "International Conference on Information Management, Innovation Management and Industrial Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiaiaai/2014/4174/0/06913374", "title": "An Unsupervised Emotional Scene Retrieval Framework for Lifelog Videos", "doi": null, "abstractUrl": "/proceedings-article/iiaiaai/2014/06913374/12OmNBtl1Eg", "parentPublication": { "id": "proceedings/iiaiaai/2014/4174/0", "title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmap/2000/0562/0/00838246", "title": "Poisson approximation", "doi": null, "abstractUrl": "/proceedings-article/gmap/2000/00838246/12OmNvStcUa", "parentPublication": { "id": "proceedings/gmap/2000/0562/0", "title": "Proceedings Geometric Modeling and Processing 2000. Theory and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2013/5005/0/5005a444", "title": "Impressive Scene Detection from Lifelog Videos by Unsupervised Facial Expression Recognition", "doi": null, "abstractUrl": "/proceedings-article/snpd/2013/5005a444/12OmNx57HO5", "parentPublication": { "id": "proceedings/snpd/2013/5005/0", "title": "2013 14th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020344", "title": "Poisson Coordinates", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020344/13rRUEgarjt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/1992/07/e0624", "title": "Compound-Poisson Software Reliability Model", "doi": null, "abstractUrl": "/journal/ts/1992/07/e0624/13rRUxly979", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08449116", "title": "Poisson Vector Graphics (PVG)", "doi": null, "abstractUrl": "/journal/tg/2020/02/08449116/13rRUyeCkaq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545283", "title": "Deep Age Estimation Model Stabilization from Images to Videos", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545283/17D45WXIkDh", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09646541", "articleId": "1zdLGlshqoM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09646533", "articleId": "1zdLEz8z0ac", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zdLDKGvqLe", "name": "mcg202106-09200660s1-supp1-3024870.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/mcg202106-09200660s1-supp1-3024870.mp4", "extension": "mp4", "size": "15.6 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwHhp0D", "title": "Nov.-Dec.", "year": "2019", "issueNum": "06", "idPrefix": "tb", "pubType": "journal", "volume": "16", "label": "Nov.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwI5Uey", "doi": "10.1109/TCBB.2018.2824814", "abstract": "Machine vision for plant phenotyping is an emerging research area for producing high throughput in agriculture and crop science applications. Since 2D based approaches have their inherent limitations, 3D plant analysis is becoming state of the art for current phenotyping technologies. We present an automated system for analyzing plant growth in indoor conditions. A gantry robot system is used to perform scanning tasks in an automated manner throughout the lifetime of the plant. A 3D laser scanner mounted as the robot's payload captures the surface point cloud data of the plant from multiple views. The plant is monitored from the vegetative to reproductive stages in light/dark cycles inside a controllable growth chamber. An efficient 3D reconstruction algorithm is used, by which multiple scans are aligned together to obtain a 3D mesh of the plant, followed by surface area and volume computations. The whole system, including the programmable growth chamber, robot, scanner, data transfer, and analysis is fully automated in such a way that a naive user can, in theory, start the system with a mouse click and get back the growth analysis results at the end of the lifetime of the plant with no intermediate intervention. As evidence of its functionality, we show and analyze quantitative results of the rhythmic growth patterns of the dicot Arabidopsis thaliana (L.), and the monocot barley (Hordeum vulgare L.) plants under their diurnal light/dark cycles.", "abstracts": [ { "abstractType": "Regular", "content": "Machine vision for plant phenotyping is an emerging research area for producing high throughput in agriculture and crop science applications. Since 2D based approaches have their inherent limitations, 3D plant analysis is becoming state of the art for current phenotyping technologies. We present an automated system for analyzing plant growth in indoor conditions. A gantry robot system is used to perform scanning tasks in an automated manner throughout the lifetime of the plant. A 3D laser scanner mounted as the robot's payload captures the surface point cloud data of the plant from multiple views. The plant is monitored from the vegetative to reproductive stages in light/dark cycles inside a controllable growth chamber. An efficient 3D reconstruction algorithm is used, by which multiple scans are aligned together to obtain a 3D mesh of the plant, followed by surface area and volume computations. The whole system, including the programmable growth chamber, robot, scanner, data transfer, and analysis is fully automated in such a way that a naive user can, in theory, start the system with a mouse click and get back the growth analysis results at the end of the lifetime of the plant with no intermediate intervention. As evidence of its functionality, we show and analyze quantitative results of the rhythmic growth patterns of the dicot Arabidopsis thaliana (L.), and the monocot barley (Hordeum vulgare L.) plants under their diurnal light/dark cycles.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Machine vision for plant phenotyping is an emerging research area for producing high throughput in agriculture and crop science applications. Since 2D based approaches have their inherent limitations, 3D plant analysis is becoming state of the art for current phenotyping technologies. We present an automated system for analyzing plant growth in indoor conditions. A gantry robot system is used to perform scanning tasks in an automated manner throughout the lifetime of the plant. A 3D laser scanner mounted as the robot's payload captures the surface point cloud data of the plant from multiple views. The plant is monitored from the vegetative to reproductive stages in light/dark cycles inside a controllable growth chamber. An efficient 3D reconstruction algorithm is used, by which multiple scans are aligned together to obtain a 3D mesh of the plant, followed by surface area and volume computations. The whole system, including the programmable growth chamber, robot, scanner, data transfer, and analysis is fully automated in such a way that a naive user can, in theory, start the system with a mouse click and get back the growth analysis results at the end of the lifetime of the plant with no intermediate intervention. As evidence of its functionality, we show and analyze quantitative results of the rhythmic growth patterns of the dicot Arabidopsis thaliana (L.), and the monocot barley (Hordeum vulgare L.) plants under their diurnal light/dark cycles.", "title": "Machine Vision System for 3D Plant Phenotyping", "normalizedTitle": "Machine Vision System for 3D Plant Phenotyping", "fno": "08334629", "hasPdf": true, "idPrefix": "tb", "keywords": [ "Biology Computing", "Botany", "Crops", "Image Reconstruction", "Optical Scanners", "Robot Vision", "Solid Modelling", "Vegetation", "Plant Growth", "3 D Reconstruction Algorithm", "Controllable Growth Chamber", "Surface Point Cloud Data", "3 D Laser Scanner", "Scanning Tasks", "Gantry Robot System", "Phenotyping Technologies", "2 D Based Approaches", "Crop Science Applications", "Agriculture", "Plant Phenotyping", "Machine Vision System", "Rhythmic Growth Patterns", "Surface Area", "Three Dimensional Displays", "Robots", "Plants Biology", "Image Reconstruction", "Solid Modeling", "Agriculture", "Machine Vision", "Robotic Imaging", "Arabidopsis Thaliana", "Barley", "3 D Plant Growth", "Multi View Reconstruction", "Diurnal Growth Pattern", "Phenotyping" ], "authors": [ { "givenName": "Ayan", "surname": "Chaudhury", "fullName": "Ayan Chaudhury", "affiliation": "Department of Computer Science, University of Western Ontario, London, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Christopher", "surname": "Ward", "fullName": "Christopher Ward", "affiliation": "Canadian Surgical Technologies & Advanced Robotics, University of Western Ontario, London, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Ali", "surname": "Talasaz", "fullName": "Ali Talasaz", "affiliation": "Stryker Mako Surgical Corporation, Fort Lauderdale, FL", "__typename": "ArticleAuthorType" }, { "givenName": "Alexander G.", "surname": "Ivanov", "fullName": "Alexander G. Ivanov", "affiliation": "Institute of Biophysics & Biomedical Engineering, Bulgarian Academy of Sciences, Sofia, Bulgaria", "__typename": "ArticleAuthorType" }, { "givenName": "Mark", "surname": "Brophy", "fullName": "Mark Brophy", "affiliation": "Department of Computer Science, University of Western Ontario, London, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Bernard", "surname": "Grodzinski", "fullName": "Bernard Grodzinski", "affiliation": "Department of Plant Agriculture, University of Guelph, Guelph, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Norman P. A.", "surname": "Hüner", "fullName": "Norman P. A. Hüner", "affiliation": "Department of Biology & The Biotron Centre for Experimental Climate Change Research, University of Western Ontario, London, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "Rajnikant V.", "surname": "Patel", "fullName": "Rajnikant V. Patel", "affiliation": "Canadian Surgical Technologies & Advanced Robotics, University of Western Ontario, London, ON, Canada", "__typename": "ArticleAuthorType" }, { "givenName": "John L.", "surname": "Barron", "fullName": "John L. Barron", "affiliation": "Department of Computer Science, University of Western Ontario, London, ON, Canada", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2019-11-01 00:00:00", "pubType": "trans", "pages": "2009-2022", "year": "2019", "issn": "1545-5963", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2017/1034/0/1034c038", "title": "An Easy-to-Setup 3D Phenotyping Platform for KOMATSUNA Dataset", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c038/12OmNCvcLIC", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2017/4284/0/4284a445", "title": "Prerequisite Study for the Development of Embedded Instrumentation for Plant Phenotyping Using Computational Vision", "doi": null, "abstractUrl": "/proceedings-article/icsc/2017/4284a445/12OmNqHqSwj", "parentPublication": { "id": "proceedings/icsc/2017/4284/0", "title": "2017 IEEE 11th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloudtech/2017/1115/0/08284718", "title": "Cloud architecture for digital phenotyping and automation", "doi": null, "abstractUrl": "/proceedings-article/cloudtech/2017/08284718/12OmNwswg7F", "parentPublication": { "id": "proceedings/cloudtech/2017/1115/0", "title": "2017 3rd International Conference of Cloud Computing Technologies and Applications (CloudTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034c055", "title": "Deep Learning for Multi-task Plant Phenotyping", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c055/12OmNxWcHbl", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a290", "title": "Computer Vision Based Autonomous Robotic System for 3D Plant Growth Measurement", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a290/12OmNyuy9Mq", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aipr/2017/1235/0/08457935", "title": "Unsupervised Learning Method for Plant and Leaf Segmentation", "doi": null, "abstractUrl": "/proceedings-article/aipr/2017/08457935/13xI8B2zWrD", "parentPublication": { "id": "proceedings/aipr/2017/1235/0", "title": "2017 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2018/5035/0/08622428", "title": "3D Reconstruction of Plant Leaves for High-Throughput Phenotyping", "doi": null, "abstractUrl": "/proceedings-article/big-data/2018/08622428/17D45WrVg2O", "parentPublication": { "id": "proceedings/big-data/2018/5035/0", "title": "2018 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2020/06/08698802", "title": "Active Vision and Surface Reconstruction for 3D Plant Shoot Modelling", "doi": null, "abstractUrl": "/journal/tb/2020/06/08698802/19w8qhKJM1G", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500c968", "title": "In-Field Phenotyping Based on Crop Leaf and Plant Instance Segmentation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500c968/1B13rO8epPO", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300c149", "title": "3D Shape Reconstruction of Plant Roots in a Cylindrical Tank From Multiview Images", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300c149/1i5mpQZEfa8", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08335791", "articleId": "13rRUwbs1R4", "__typename": "AdjacentArticleType" }, "next": { "fno": "08350295", "articleId": "13rRUynHuhC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvmXJ4h", "title": "Nov.-Dec.", "year": "2020", "issueNum": "06", "idPrefix": "tb", "pubType": "journal", "volume": "17", "label": "Nov.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19w8qhKJM1G", "doi": "10.1109/TCBB.2019.2896908", "abstract": "Plant phenotyping is the quantitative description of a plant's physiological, biochemical, and anatomical status which can be used in trait selection and helps to provide mechanisms to link underlying genetics with yield. Here, an active vision- based pipeline is presented which aims to contribute to reducing the bottleneck associated with phenotyping of architectural traits. The pipeline provides a fully automated response to photometric data acquisition and the recovery of three-dimensional (3D) models of plants without the dependency of botanical expertise, whilst ensuring a non-intrusive and non-destructive approach. Access to complete and accurate 3D models of plants supports computation of a wide variety of structural measurements. An Active Vision Cell (AVC) consisting of a camera-mounted robot arm plus combined software interface and a novel surface reconstruction algorithm is proposed. This pipeline provides a robust, flexible, and accurate method for automating the 3D reconstruction of plants. The reconstruction algorithm can reduce noise and provides a promising and extendable framework for high throughput phenotyping, improving current state-of-the-art methods. Furthermore, the pipeline can be applied to any plant species or form due to the application of an active vision framework combined with the automatic selection of key parameters for surface reconstruction.", "abstracts": [ { "abstractType": "Regular", "content": "Plant phenotyping is the quantitative description of a plant's physiological, biochemical, and anatomical status which can be used in trait selection and helps to provide mechanisms to link underlying genetics with yield. Here, an active vision- based pipeline is presented which aims to contribute to reducing the bottleneck associated with phenotyping of architectural traits. The pipeline provides a fully automated response to photometric data acquisition and the recovery of three-dimensional (3D) models of plants without the dependency of botanical expertise, whilst ensuring a non-intrusive and non-destructive approach. Access to complete and accurate 3D models of plants supports computation of a wide variety of structural measurements. An Active Vision Cell (AVC) consisting of a camera-mounted robot arm plus combined software interface and a novel surface reconstruction algorithm is proposed. This pipeline provides a robust, flexible, and accurate method for automating the 3D reconstruction of plants. The reconstruction algorithm can reduce noise and provides a promising and extendable framework for high throughput phenotyping, improving current state-of-the-art methods. Furthermore, the pipeline can be applied to any plant species or form due to the application of an active vision framework combined with the automatic selection of key parameters for surface reconstruction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Plant phenotyping is the quantitative description of a plant's physiological, biochemical, and anatomical status which can be used in trait selection and helps to provide mechanisms to link underlying genetics with yield. Here, an active vision- based pipeline is presented which aims to contribute to reducing the bottleneck associated with phenotyping of architectural traits. The pipeline provides a fully automated response to photometric data acquisition and the recovery of three-dimensional (3D) models of plants without the dependency of botanical expertise, whilst ensuring a non-intrusive and non-destructive approach. Access to complete and accurate 3D models of plants supports computation of a wide variety of structural measurements. An Active Vision Cell (AVC) consisting of a camera-mounted robot arm plus combined software interface and a novel surface reconstruction algorithm is proposed. This pipeline provides a robust, flexible, and accurate method for automating the 3D reconstruction of plants. The reconstruction algorithm can reduce noise and provides a promising and extendable framework for high throughput phenotyping, improving current state-of-the-art methods. Furthermore, the pipeline can be applied to any plant species or form due to the application of an active vision framework combined with the automatic selection of key parameters for surface reconstruction.", "title": "Active Vision and Surface Reconstruction for 3D Plant Shoot Modelling", "normalizedTitle": "Active Vision and Surface Reconstruction for 3D Plant Shoot Modelling", "fno": "08698802", "hasPdf": true, "idPrefix": "tb", "keywords": [ "Active Vision", "Biology Computing", "Botany", "Cameras", "Cellular Biophysics", "Data Acquisition", "Genetics", "Image Reconstruction", "Robot Vision", "Solid Modelling", "Camera Mounted Robot Arm", "Combined Software Interface", "Surface Reconstruction Algorithm", "Robust Method", "High Throughput Phenotyping", "Plant Species", "Active Vision Framework", "3 D Plant Shoot Modelling", "Plant Phenotyping", "Trait Selection", "Underlying Genetics", "Architectural Traits", "Fully Automated Response", "Photometric Data Acquisition", "Botanical Expertise", "Nondestructive Approach", "Active Vision Cell", "Active Vision Based Pipeline", "AVC", "Three Dimensional Displays", "Calibration", "Solid Modeling", "Robots", "Computational Modeling", "Surface Reconstruction", "3 D Reconstruction", "Active Vision", "Calibration", "Plant Phenotyping" ], "authors": [ { "givenName": "Jonathon A.", "surname": "Gibbs", "fullName": "Jonathon A. Gibbs", "affiliation": "School of Computer Science, University of Nottingham, Nottingham, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Michael P.", "surname": "Pound", "fullName": "Michael P. Pound", "affiliation": "School of Computer Science, University of Nottingham, Nottingham, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew P.", "surname": "French", "fullName": "Andrew P. French", "affiliation": "School of Computer Science, University of Nottingham, Nottingham, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Darren M.", "surname": "Wells", "fullName": "Darren M. Wells", "affiliation": "School of Biosciences, University of Nottingham, Nottingham, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Erik H.", "surname": "Murchie", "fullName": "Erik H. Murchie", "affiliation": "School of Biosciences, University of Nottingham, Nottingham, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Tony P.", "surname": "Pridmore", "fullName": "Tony P. Pridmore", "affiliation": "School of Computer Science, University of Nottingham, Nottingham, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2020-11-01 00:00:00", "pubType": "trans", "pages": "1907-1917", "year": "2020", "issn": "1545-5963", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2017/1034/0/1034c427", "title": "Computer Vision Meets Geometric Modeling: Multi-view Reconstruction of Surface Points and Normals Using Affine Correspondences", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c427/12OmNxeut0E", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2018/4652/0/465201a278", "title": "Surface Reconstruction from Arbitrarily Large Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/irc/2018/465201a278/12OmNyKrH7P", "parentPublication": { "id": "proceedings/irc/2018/4652/0", "title": "2018 Second IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2015/1986/0/1986a290", "title": "Computer Vision Based Autonomous Robotic System for 3D Plant Growth Measurement", "doi": null, "abstractUrl": "/proceedings-article/crv/2015/1986a290/12OmNyuy9Mq", "parentPublication": { "id": "proceedings/crv/2015/1986/0", "title": "2015 12th Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034c046", "title": "Drought Stress Classification Using 3D Plant Models", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034c046/12OmNzA6GKz", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2019/06/08334629", "title": "Machine Vision System for 3D Plant Phenotyping", "doi": null, "abstractUrl": "/journal/tb/2019/06/08334629/13rRUwI5Uey", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/10/ttg2013101700", "title": "Cylinder Detection in Large-Scale Point Cloud of Pipeline Plant", "doi": null, "abstractUrl": "/journal/tg/2013/10/ttg2013101700/13rRUygT7yc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2018/5035/0/08622428", "title": "3D Reconstruction of Plant Leaves for High-Throughput Phenotyping", "doi": null, "abstractUrl": "/proceedings-article/big-data/2018/08622428/17D45WrVg2O", "parentPublication": { "id": "proceedings/big-data/2018/5035/0", "title": "2018 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006497", "title": "Plant Event Detection from Time-Varying Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006497/1hJrP2OrOwM", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1647", "title": "Deep Active Surface Models", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1647/1yeIkxNx0pa", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900f753", "title": "Polka Lines: Learning Structured Illumination and Reconstruction for Active Stereo", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900f753/1yeKVia6bTi", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08664185", "articleId": "1poqLQTlEKQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "08692651", "articleId": "19hcqimFeCY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1CpcG1DISYM", "title": "May", "year": "2022", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1B0Y24wmlm8", "doi": "10.1109/TVCG.2022.3150503", "abstract": "In optical see-through augmented reality (AR), information is often distributed between real and virtual contexts, and often appears at different distances from the user. To integrate information, users must repeatedly switch context and change focal distance. If the user&#x0027;s task is conducted under time pressure, they may attempt to integrate information while their eye is still changing focal distance, a phenomenon we term <italic>transient focal blur</italic>. Previously, Gabbard, Mehra, and Swan (2018) examined these issues, using a text-based visual search task on a one-eye optical see-through AR display. This paper reports an experiment that partially replicates and extends this task on a custom-built AR Haploscope. The experiment examined the effects of context switching, focal switching distance, binocular and monocular viewing, and transient focal blur on task performance and eye fatigue. Context switching increased eye fatigue but did not decrease performance. Increasing focal switching distance increased eye fatigue and decreased performance. Monocular viewing also increased eye fatigue and decreased performance. The transient focal blur effect resulted in additional performance decrements, and is an addition to knowledge about AR user interface design issues.", "abstracts": [ { "abstractType": "Regular", "content": "In optical see-through augmented reality (AR), information is often distributed between real and virtual contexts, and often appears at different distances from the user. To integrate information, users must repeatedly switch context and change focal distance. If the user&#x0027;s task is conducted under time pressure, they may attempt to integrate information while their eye is still changing focal distance, a phenomenon we term <italic>transient focal blur</italic>. Previously, Gabbard, Mehra, and Swan (2018) examined these issues, using a text-based visual search task on a one-eye optical see-through AR display. This paper reports an experiment that partially replicates and extends this task on a custom-built AR Haploscope. The experiment examined the effects of context switching, focal switching distance, binocular and monocular viewing, and transient focal blur on task performance and eye fatigue. Context switching increased eye fatigue but did not decrease performance. Increasing focal switching distance increased eye fatigue and decreased performance. Monocular viewing also increased eye fatigue and decreased performance. The transient focal blur effect resulted in additional performance decrements, and is an addition to knowledge about AR user interface design issues.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In optical see-through augmented reality (AR), information is often distributed between real and virtual contexts, and often appears at different distances from the user. To integrate information, users must repeatedly switch context and change focal distance. If the user's task is conducted under time pressure, they may attempt to integrate information while their eye is still changing focal distance, a phenomenon we term transient focal blur. Previously, Gabbard, Mehra, and Swan (2018) examined these issues, using a text-based visual search task on a one-eye optical see-through AR display. This paper reports an experiment that partially replicates and extends this task on a custom-built AR Haploscope. The experiment examined the effects of context switching, focal switching distance, binocular and monocular viewing, and transient focal blur on task performance and eye fatigue. Context switching increased eye fatigue but did not decrease performance. Increasing focal switching distance increased eye fatigue and decreased performance. Monocular viewing also increased eye fatigue and decreased performance. The transient focal blur effect resulted in additional performance decrements, and is an addition to knowledge about AR user interface design issues.", "title": "The Effect of Context Switching, Focal Switching Distance, Binocular and Monocular Viewing, and Transient Focal Blur on Human Performance in Optical See-Through Augmented Reality", "normalizedTitle": "The Effect of Context Switching, Focal Switching Distance, Binocular and Monocular Viewing, and Transient Focal Blur on Human Performance in Optical See-Through Augmented Reality", "fno": "09714039", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Optical Switches", "Task Analysis", "Monitoring", "Transient Analysis", "Fatigue", "Augmented Reality", "Meters", "Augmented Reality", "Context Switching", "Focal Distance Switching", "Transient Focal Blur", "Accommodation" ], "authors": [ { "givenName": "Mohammed Safayet", "surname": "Arefin", "fullName": "Mohammed Safayet Arefin", "affiliation": "Mississippi State University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Nate", "surname": "Phillips", "fullName": "Nate Phillips", "affiliation": "Mississippi State University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Alexander", "surname": "Plopski", "fullName": "Alexander Plopski", "affiliation": "University of Otago, New Zealand", "__typename": "ArticleAuthorType" }, { "givenName": "Joseph L.", "surname": "Gabbard", "fullName": "Joseph L. Gabbard", "affiliation": "Virginia Tech, USA", "__typename": "ArticleAuthorType" }, { "givenName": "J. Edward", "surname": "Swan", "fullName": "J. Edward Swan", "affiliation": "Mississippi State University, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2022-05-01 00:00:00", "pubType": "trans", "pages": "2014-2025", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismarw/2016/3740/0/07836523", "title": "Human Attention and fatigue for AR Head-Up Displays", "doi": null, "abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp", "parentPublication": { "id": "proceedings/ismarw/2016/3740/0", "title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2008/2840/0/04637321", "title": "An optical see-through head mounted display with addressable focal planes", "doi": null, "abstractUrl": "/proceedings-article/ismar/2008/04637321/12OmNwe2IAw", "parentPublication": { "id": "proceedings/ismar/2008/2840/0", "title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2015/7367/0/7367a324", "title": "Understanding the Influences of Trend and Fatigue in Individuals' SNS Switching Intention", "doi": null, "abstractUrl": "/proceedings-article/hicss/2015/7367a324/12OmNzICEKp", "parentPublication": { "id": "proceedings/hicss/2015/7367/0", "title": "2015 48th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08353823", "title": "Effects of AR Display Context Switching and Focal Distance Switching on Human Performance", "doi": null, "abstractUrl": "/journal/tg/2019/06/08353823/13rRUwInvBe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/02/08462792", "title": "The Effect of Focal Distance, Age, and Brightness on Near-Field Augmented Reality Depth Matching", "doi": null, "abstractUrl": "/journal/tg/2020/02/08462792/13w3loWnQPK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a207", "title": "Investigation on the Correlation between Eye Movement and Reaction Time under Mental Fatigue Influence", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a207/17D45WHONlB", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mipr/2022/9548/0/954800a246", "title": "Comparison of Virtual-Real Integration Efficiency between Light Field and Conventional Near-Eye AR Displays", "doi": null, "abstractUrl": "/proceedings-article/mipr/2022/954800a246/1GvditqC14Q", "parentPublication": { "id": "proceedings/mipr/2022/9548/0", "title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798340", "title": "Augmented Reality Map Navigation with Freehand Gestures", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798340/1cJ1fg0gjAY", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090479", "title": "Impact of AR Display Context Switching and Focal Distance Switching on Human Performance: Replication on an AR Haploscope", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090479/1jIxlrWEUmc", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a096", "title": "Effects of a Distracting Background and Focal Switching Distance in an Augmented Reality System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a096/1yeQC2Aw0De", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09714046", "articleId": "1B0Y1GfEIQ8", "__typename": "AdjacentArticleType" }, "next": { "fno": "09714041", "articleId": "1B0XXsRVUIM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1CubRG8reQ8", "name": "ttg202205-09714039s1-supp1-3150503.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202205-09714039s1-supp1-3150503.pdf", "extension": "pdf", "size": "55.3 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1I6No9Att7y", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tk", "pubType": "journal", "volume": "34", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1rNOi7erMzu", "doi": "10.1109/TKDE.2021.3064233", "abstract": "WeChat is the largest social instant messaging platform in China, with 1.1 billion monthly active users. &#x201C;Top Stories&#x201D; is a novel friend-enhanced recommendation engine in WeChat, in which users can read articles based on preferences of both their own and their friends. Specifically, when a user reads an article by opening it, the &#x201C;click&#x201D; behavior is private. Moreover, if the user clicks the &#x201C;wow&#x201D; button, (only) her/his direct connections will be aware of this action/preference. Based on the unique WeChat data, we aim to understand user preferences and &#x201C;wow&#x201D; diffusion in Top Stories at different levels. We have made some interesting discoveries. For instance, the &#x201C;wow&#x201D; probability of one user is negatively correlated with the number of connected components that are formed by her/his active friends, but the click probability is the opposite. We further study to what extent users&#x2019; &#x201C;wow&#x201D; and click behavior can be predicted from their social connections. To address this problem, we present a hierarchical graph representation learning based model DiffuseGNN, which is capable of capturing the structure-based social observations discovered above. Our experiments show that the proposed method can significantly improve the prediction performance compared with alternative methods.", "abstracts": [ { "abstractType": "Regular", "content": "WeChat is the largest social instant messaging platform in China, with 1.1 billion monthly active users. &#x201C;Top Stories&#x201D; is a novel friend-enhanced recommendation engine in WeChat, in which users can read articles based on preferences of both their own and their friends. Specifically, when a user reads an article by opening it, the &#x201C;click&#x201D; behavior is private. Moreover, if the user clicks the &#x201C;wow&#x201D; button, (only) her/his direct connections will be aware of this action/preference. Based on the unique WeChat data, we aim to understand user preferences and &#x201C;wow&#x201D; diffusion in Top Stories at different levels. We have made some interesting discoveries. For instance, the &#x201C;wow&#x201D; probability of one user is negatively correlated with the number of connected components that are formed by her/his active friends, but the click probability is the opposite. We further study to what extent users&#x2019; &#x201C;wow&#x201D; and click behavior can be predicted from their social connections. To address this problem, we present a hierarchical graph representation learning based model DiffuseGNN, which is capable of capturing the structure-based social observations discovered above. Our experiments show that the proposed method can significantly improve the prediction performance compared with alternative methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "WeChat is the largest social instant messaging platform in China, with 1.1 billion monthly active users. “Top Stories” is a novel friend-enhanced recommendation engine in WeChat, in which users can read articles based on preferences of both their own and their friends. Specifically, when a user reads an article by opening it, the “click” behavior is private. Moreover, if the user clicks the “wow” button, (only) her/his direct connections will be aware of this action/preference. Based on the unique WeChat data, we aim to understand user preferences and “wow” diffusion in Top Stories at different levels. We have made some interesting discoveries. For instance, the “wow” probability of one user is negatively correlated with the number of connected components that are formed by her/his active friends, but the click probability is the opposite. We further study to what extent users’ “wow” and click behavior can be predicted from their social connections. To address this problem, we present a hierarchical graph representation learning based model DiffuseGNN, which is capable of capturing the structure-based social observations discovered above. Our experiments show that the proposed method can significantly improve the prediction performance compared with alternative methods.", "title": "Understanding WeChat User Preferences and &#x201C;Wow&#x201D; Diffusion", "normalizedTitle": "Understanding WeChat User Preferences and “Wow” Diffusion", "fno": "09372844", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Data Privacy", "Electronic Messaging", "Graph Theory", "Social Networking Online", "Active Friends", "Billion Monthly Active Users", "Click Behavior", "Click Probability", "Connected Components", "Direct Connections", "Extent Users", "Largest Social Instant Messaging Platform", "Novel Friend Enhanced Recommendation Engine", "Social Connections", "Structure Based Social Observations", "Unique We Chat Data", "User Preferences", "Wow Button", "Wow Probability", "Social Networking Online", "Message Service", "Knowledge Engineering", "Computer Science", "Terminology", "Instant Messaging", "IEEE Fellows", "Social Networks", "Social Influence", "Information Diffusion", "User Behavior", "User Modeling" ], "authors": [ { "givenName": "Fanjin", "surname": "Zhang", "fullName": "Fanjin Zhang", "affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jie", "surname": "Tang", "fullName": "Jie Tang", "affiliation": "Tsinghua-Bosch Joint ML Center, Department of Computer Science and Technology, Tsinghua National Laboratory for Information Science and Technology (TNList), Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xueyi", "surname": "Liu", "fullName": "Xueyi Liu", "affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhenyu", "surname": "Hou", "fullName": "Zhenyu Hou", "affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yuxiao", "surname": "Dong", "fullName": "Yuxiao Dong", "affiliation": "Microsoft Research, Redmond, WA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jing", "surname": "Zhang", "fullName": "Jing Zhang", "affiliation": "Information School, Renmin University of China, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiao", "surname": "Liu", "fullName": "Xiao Liu", "affiliation": "Department of Computer Science and Technology, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ruobing", "surname": "Xie", "fullName": "Ruobing Xie", "affiliation": "WeChat Search Application Department, Tencent, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Kai", "surname": "Zhuang", "fullName": "Kai Zhuang", "affiliation": "WeChat Search Application Department, Tencent, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xu", "surname": "Zhang", "fullName": "Xu Zhang", "affiliation": "WeChat Search Application Department, Tencent, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Leyu", "surname": "Lin", "fullName": "Leyu Lin", "affiliation": "WeChat Search Application Department, Tencent, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Philip S.", "surname": "Yu", "fullName": "Philip S. Yu", "affiliation": "Department of Computer Science, University of Illinois at Chicago, Chicago, IL, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "6033-6046", "year": "2022", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hpcc-smartcity-dss/2018/6614/0/661400b147", "title": "Research on Group Social Function and User Differentiation – A Case Study of WeChat and QQ", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2018/661400b147/183rAduG426", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2018/6614/0", "title": "2018 IEEE 20th International Conference on High Performance Computing and Communications; IEEE 16th International Conference on Smart City; IEEE 4th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictech/2022/9694/0/969400a503", "title": "Design and Implementation of Shared Book System Based on Wechat Applet", "doi": null, "abstractUrl": "/proceedings-article/ictech/2022/969400a503/1FWmBgVJMo8", "parentPublication": { "id": "proceedings/ictech/2022/9694/0", "title": "2022 11th International Conference of Information and Communication Technology (ICTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscv/2020/8041/0/09204232", "title": "&#x201C;Stress-free&#x201D; mobile app for Moroccan university students: &#x201C;relaxation program&#x201D; validation", "doi": null, "abstractUrl": "/proceedings-article/iscv/2020/09204232/1nmidsDQbdu", "parentPublication": { "id": "proceedings/iscv/2020/8041/0", "title": "2020 International Conference on Intelligent Systems and Computer Vision (ISCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isctt/2020/8575/0/857500a060", "title": "University Library WeChat Public Account Operation Analysis Based on the &#x201C;New Ranking Index&#x201D; Data Platform - A Case Study of Wuhan University", "doi": null, "abstractUrl": "/proceedings-article/isctt/2020/857500a060/1rHeOBioSZy", "parentPublication": { "id": "proceedings/isctt/2020/8575/0", "title": "2020 5th International Conference on Information Science, Computer Technology and Transportation (ISCTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cipae/2020/8223/0/822300a364", "title": "Research on Project-based Curriculum and &#x201C;Mini-Class&#x201D; Teaching Method", "doi": null, "abstractUrl": "/proceedings-article/cipae/2020/822300a364/1rSRhnBVSIo", "parentPublication": { "id": "proceedings/cipae/2020/8223/0", "title": "2020 International Conference on Computers, Information Processing and Advanced Education (CIPAE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeim/2020/9623/0/962300a406", "title": "The Connection among &#x201C;Threat&#x201D;, &#x201C;Enticement&#x201D;, &#x201C;Fraud&#x201D; and &#x201C;Psychological Coercion&#x201D; on the basis of big data: Centered on Paragraph 2 of Article 40 of the Supervision Law", "doi": null, "abstractUrl": "/proceedings-article/icmeim/2020/962300a406/1syvdfbiH1m", "parentPublication": { "id": "proceedings/icmeim/2020/9623/0", "title": "2020 International Conference on Modern Education and Information Management (ICMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmeim/2020/9623/0/962300a421", "title": "An Empirical Study on the Communication Effect of Mental Health Related WeChat Official Account Take &#x201C;Yi Xinli&#x201D; as An Example", "doi": null, "abstractUrl": "/proceedings-article/icmeim/2020/962300a421/1syvhj1L3ck", "parentPublication": { "id": "proceedings/icmeim/2020/9623/0", "title": "2020 International Conference on Modern Education and Information Management (ICMEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieit/2021/2563/0/256300a132", "title": "New Media Communication Research of &#x201C;Dingxiang Doctor&#x201D;", "doi": null, "abstractUrl": "/proceedings-article/ieit/2021/256300a132/1wHKk8lC73a", "parentPublication": { "id": "proceedings/ieit/2021/2563/0", "title": "2021 International Conference on Internet, Education and Information Technology (IEIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaie/2021/2492/0/249200a287", "title": "Analysis of WeChat Application Strategy in Parent-Kindergarten Cooperation", "doi": null, "abstractUrl": "/proceedings-article/icaie/2021/249200a287/1wV1LKzhMxq", "parentPublication": { "id": "proceedings/icaie/2021/2492/0", "title": "2021 2nd International Conference on Artificial Intelligence and Education (ICAIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eimss/2021/2707/0/270700a342", "title": "The Design of &#x201C;Cloud Teaching&#x201D; Process for &#x201C;Business and Finance Integration&#x201D; Course", "doi": null, "abstractUrl": "/proceedings-article/eimss/2021/270700a342/1yEZJhAJN7i", "parentPublication": { "id": "proceedings/eimss/2021/2707/0", "title": "2021 International Conference on Education, Information Management and Service Science (EIMSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09360523", "articleId": "1rqztKfC0pO", "__typename": "AdjacentArticleType" }, "next": { "fno": "09392297", "articleId": "1sq7pCMZtyU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxvO04X", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tp", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1JrMA4xh8o8", "doi": "10.1109/TPAMI.2022.3232502", "abstract": "The recently proposed neural radiance fields (NeRF) use a continuous function formulated as a multi-layer perceptron (MLP) to model the appearance and geometry of a 3D scene. This enables realistic synthesis of novel views, even for scenes with view dependent appearance. Many follow-up works have since extended NeRFs in different ways. However, a fundamental restriction of the method remains that it requires a large number of images captured from densely placed viewpoints for high-quality synthesis and the quality of the results quickly degrades when the number of captured views is insufficient. To address this problem, we propose a novel NeRF-based framework capable of high-quality view synthesis using only a sparse set of RGB-D images, which can be easily captured using cameras and LiDAR sensors on current consumer devices. First, a geometric proxy of the scene is reconstructed from the captured RGB-D images. Renderings of the reconstructed scene along with precise camera parameters can then be used to pre-train a network. Finally, the network is fine-tuned with a small number of real captured images. We further introduce a patch discriminator to supervise the network under novel views during fine-tuning, as well as a 3D color prior to improve synthesis quality. We demonstrate that our method can generate arbitrary novel views of a 3D scene from as few as 6 RGB-D images. Extensive experiments show the improvements of our method compared with the existing NeRF-based methods, including approaches that also aim to reduce the number of input images.", "abstracts": [ { "abstractType": "Regular", "content": "The recently proposed neural radiance fields (NeRF) use a continuous function formulated as a multi-layer perceptron (MLP) to model the appearance and geometry of a 3D scene. This enables realistic synthesis of novel views, even for scenes with view dependent appearance. Many follow-up works have since extended NeRFs in different ways. However, a fundamental restriction of the method remains that it requires a large number of images captured from densely placed viewpoints for high-quality synthesis and the quality of the results quickly degrades when the number of captured views is insufficient. To address this problem, we propose a novel NeRF-based framework capable of high-quality view synthesis using only a sparse set of RGB-D images, which can be easily captured using cameras and LiDAR sensors on current consumer devices. First, a geometric proxy of the scene is reconstructed from the captured RGB-D images. Renderings of the reconstructed scene along with precise camera parameters can then be used to pre-train a network. Finally, the network is fine-tuned with a small number of real captured images. We further introduce a patch discriminator to supervise the network under novel views during fine-tuning, as well as a 3D color prior to improve synthesis quality. We demonstrate that our method can generate arbitrary novel views of a 3D scene from as few as 6 RGB-D images. Extensive experiments show the improvements of our method compared with the existing NeRF-based methods, including approaches that also aim to reduce the number of input images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The recently proposed neural radiance fields (NeRF) use a continuous function formulated as a multi-layer perceptron (MLP) to model the appearance and geometry of a 3D scene. This enables realistic synthesis of novel views, even for scenes with view dependent appearance. Many follow-up works have since extended NeRFs in different ways. However, a fundamental restriction of the method remains that it requires a large number of images captured from densely placed viewpoints for high-quality synthesis and the quality of the results quickly degrades when the number of captured views is insufficient. To address this problem, we propose a novel NeRF-based framework capable of high-quality view synthesis using only a sparse set of RGB-D images, which can be easily captured using cameras and LiDAR sensors on current consumer devices. First, a geometric proxy of the scene is reconstructed from the captured RGB-D images. Renderings of the reconstructed scene along with precise camera parameters can then be used to pre-train a network. Finally, the network is fine-tuned with a small number of real captured images. We further introduce a patch discriminator to supervise the network under novel views during fine-tuning, as well as a 3D color prior to improve synthesis quality. We demonstrate that our method can generate arbitrary novel views of a 3D scene from as few as 6 RGB-D images. Extensive experiments show the improvements of our method compared with the existing NeRF-based methods, including approaches that also aim to reduce the number of input images.", "title": "Neural Radiance Fields from Sparse RGB-D Images for High-Quality View Synthesis", "normalizedTitle": "Neural Radiance Fields from Sparse RGB-D Images for High-Quality View Synthesis", "fno": "09999509", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Rendering Computer Graphics", "Geometry", "Cameras", "Training", "Image Reconstruction", "Three Dimensional Displays", "Task Analysis", "Novel View Synthesis", "Neural Rendering", "Neural Radiance Fields" ], "authors": [ { "givenName": "Yu-Jie", "surname": "Yuan", "fullName": "Yu-Jie Yuan", "affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yu-Kun", "surname": "Lai", "fullName": "Yu-Kun Lai", "affiliation": "School of Computer Science & Informatics, Cardiff University, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Yi-Hua", "surname": "Huang", "fullName": "Yi-Hua Huang", "affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, China", "__typename": "ArticleAuthorType" }, { "givenName": "Leif", "surname": "Kobbelt", "fullName": "Leif Kobbelt", "affiliation": "Institute for Computer Graphics and Multimedia, RWTH Aachen University, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Lin", "surname": "Gao", "fullName": "Lin Gao", "affiliation": "Beijing Key Laboratory of Mobile Computing and Pervasive Device, Institute of Computing Technology, Chinese Academy of Sciences, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "1-16", "year": "5555", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200f569", "title": "UNISURF: Unifying Neural Implicit Surfaces and Radiance Fields for Multi-View Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f569/1BmEEU96fmg", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200f845", "title": "Nerfies: Deformable Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200f845/1BmL0KETWzm", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2882", "title": "Dense Depth Priors for Neural Radiance Fields from Sparse Input Views", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2882/1H0Nhx2wMqQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f460", "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f460/1H0OphoghaM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2922", "title": "Urban Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2922/1H1iyxjTNmg", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5170", "title": "Aug-NeRF: Training Stronger Neural Radiance Fields with Triple-Level Physically-Grounded Augmentations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5170/1H1jhjLRpRu", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f470", "title": "RegNeRF: Regularizing Neural Radiance Fields for View Synthesis from Sparse Inputs", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f470/1H1mpdxQEq4", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8388", "title": "NeRFReN: Neural Radiance Fields with Reflections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8388/1H1nhdo3vFe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a795", "title": "Beyond RGB: Scene-Property Synthesis with Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a795/1KxVhi7yhR6", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0313", "title": "D-NeRF: Neural Radiance Fields for Dynamic Scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0313/1yeLrBwGgik", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09999492", "articleId": "1JrMzLkNsRy", "__typename": "AdjacentArticleType" }, "next": { "fno": "10002302", "articleId": "1JtvFymXfA4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1JDoYDGZFmM", "name": "ttp555501-09999509s1-supp1-3232502.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp555501-09999509s1-supp1-3232502.pdf", "extension": "pdf", "size": "27.6 MB", "__typename": "WebExtraType" }, { "id": "1JDoYgC5KFy", "name": "ttp555501-09999509s1-supp2-3232502.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttp555501-09999509s1-supp2-3232502.mp4", "extension": "mp4", "size": "218 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvqEvRo", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1LH8EZ3NEGI", "doi": "10.1109/TVCG.2023.3260001", "abstract": "During traditional surgeries, planning and instrument guidance is displayed on an external screen. Recent developments of augmented reality (AR) techniques can overcome obstacles including hand-eye discoordination and heavy mental load. Among these AR technologies, optical see-through (OST) schemes with stereoscopic displays can provide depth perception and retain the physical scene for safety considerations. However, limitations still exist in certain AR systems and the influence of these factors on surgical performance is yet to explore. To this end, experiments of multi-scale surgical tasks were carried out to compare head-mounted display (HMD) AR and autostereoscopic image overlay (AIO) AR, concerning objective performance and subjective evaluation. To solely analyze effects brought by display techniques, the tracking system in each included display system was identical and similar tracking accuracy was proved by a preliminary experiment. Focus and context rendering was utilized to enhance in-situ visualization for surgical guidance. Latency values of all display systems were assessed and a delay experiment proved the latency differences had no significant impact on user performance. Results of multi-scale surgical tasks showed that HMD outperformed in detailed operations probably due to stable resolution along the depth axis, while AIO had better performance in larger-scale operations for better depth perception. This paper helps point out the critical limitations of current OST AR techniques and potentially promotes the progress of AR applications in surgical guidance.", "abstracts": [ { "abstractType": "Regular", "content": "During traditional surgeries, planning and instrument guidance is displayed on an external screen. Recent developments of augmented reality (AR) techniques can overcome obstacles including hand-eye discoordination and heavy mental load. Among these AR technologies, optical see-through (OST) schemes with stereoscopic displays can provide depth perception and retain the physical scene for safety considerations. However, limitations still exist in certain AR systems and the influence of these factors on surgical performance is yet to explore. To this end, experiments of multi-scale surgical tasks were carried out to compare head-mounted display (HMD) AR and autostereoscopic image overlay (AIO) AR, concerning objective performance and subjective evaluation. To solely analyze effects brought by display techniques, the tracking system in each included display system was identical and similar tracking accuracy was proved by a preliminary experiment. Focus and context rendering was utilized to enhance in-situ visualization for surgical guidance. Latency values of all display systems were assessed and a delay experiment proved the latency differences had no significant impact on user performance. Results of multi-scale surgical tasks showed that HMD outperformed in detailed operations probably due to stable resolution along the depth axis, while AIO had better performance in larger-scale operations for better depth perception. This paper helps point out the critical limitations of current OST AR techniques and potentially promotes the progress of AR applications in surgical guidance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "During traditional surgeries, planning and instrument guidance is displayed on an external screen. Recent developments of augmented reality (AR) techniques can overcome obstacles including hand-eye discoordination and heavy mental load. Among these AR technologies, optical see-through (OST) schemes with stereoscopic displays can provide depth perception and retain the physical scene for safety considerations. However, limitations still exist in certain AR systems and the influence of these factors on surgical performance is yet to explore. To this end, experiments of multi-scale surgical tasks were carried out to compare head-mounted display (HMD) AR and autostereoscopic image overlay (AIO) AR, concerning objective performance and subjective evaluation. To solely analyze effects brought by display techniques, the tracking system in each included display system was identical and similar tracking accuracy was proved by a preliminary experiment. Focus and context rendering was utilized to enhance in-situ visualization for surgical guidance. Latency values of all display systems were assessed and a delay experiment proved the latency differences had no significant impact on user performance. Results of multi-scale surgical tasks showed that HMD outperformed in detailed operations probably due to stable resolution along the depth axis, while AIO had better performance in larger-scale operations for better depth perception. This paper helps point out the critical limitations of current OST AR techniques and potentially promotes the progress of AR applications in surgical guidance.", "title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance", "normalizedTitle": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance", "fno": "10077744", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Surgery", "Resists", "Navigation", "Task Analysis", "Optical Imaging", "Display Systems", "Biomedical Optical Imaging", "Augmented Reality", "Autostereoscopic Image Overlay", "Head Mounted Display", "Optical See Through", "Surgical Guidance" ], "authors": [ { "givenName": "Ruiyang", "surname": "Li", "fullName": "Ruiyang Li", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Boxuan", "surname": "Han", "fullName": "Boxuan Han", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Haowei", "surname": "Li", "fullName": "Haowei Li", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Longfei", "surname": "Ma", "fullName": "Longfei Ma", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xinran", "surname": "Zhang", "fullName": "Xinran Zhang", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhe", "surname": "Zhao", "fullName": "Zhe Zhao", "affiliation": "Department of Orthopaedics, Tsinghua University, Beijing Tsinghua Changgung Hospital. School of Clinical Medicine, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongen", "surname": "Liao", "fullName": "Hongen Liao", "affiliation": "Department of Biomedical Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1-13", "year": "5555", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2015/7660/0/7660a043", "title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a052", "title": "[POSTER] Hybrid Video/Optical See-Through HMD", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a052/12OmNy4r3Ph", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2012/2049/0/06266298", "title": "Inexpensive monocular pico-projector-based augmented reality display for surgical microscope", "doi": null, "abstractUrl": "/proceedings-article/cbms/2012/06266298/12OmNzvhvy0", "parentPublication": { "id": "proceedings/cbms/2012/2049/0", "title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/var4good/2018/5977/0/08576884", "title": "Augmented Visual Instruction for Surgical Practice and Training", "doi": null, "abstractUrl": "/proceedings-article/var4good/2018/08576884/17D45WODasn", "parentPublication": { "id": "proceedings/var4good/2018/5977/0", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a402", "title": "AR-Assisted Surgical Guidance System for Ventriculostomy", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a402/1CJdTYykk5W", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797682", "title": "Interactive and Multimodal-based Augmented Reality for Remote Assistance using a Digital Surgical Microscope", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797682/1cJ12jTP75S", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998139", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a080", "title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a454", "title": "Augmented Reality based Surgical Navigation for Percutaneous Endoscopic Transforaminal Discectomy", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a454/1tnWxe3BhxS", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a456", "title": "Augmented Reality based Surgical Navigation for Percutaneous Endoscopic Transforaminal Discectomy", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a456/1tnXaPRVToI", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10077087", "articleId": "1LFQ7zitdtK", "__typename": "AdjacentArticleType" }, "next": { "fno": "10081444", "articleId": "1LRbQOpNdTO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1LINp9BoKBy", "name": "ttg555501-010077744s1-supp1-3260001.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010077744s1-supp1-3260001.mp4", "extension": "mp4", "size": "21.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNBl6EKh", "title": "April", "year": "2017", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwcS1D1", "doi": "10.1109/TVCG.2017.2657058", "abstract": "Accommodative depth cues, a wide field of view, and ever-higher resolutions all present major hardware design challenges for near-eye displays. Optimizing a design to overcome one of these challenges typically leads to a trade-off in the others. We tackle this problem by introducing an all-in-one solution - a new wide field of view, gaze-tracked near-eye display for augmented reality applications. The key component of our solution is the use of a single see-through, varifocal deformable membrane mirror for each eye reflecting a display. They are controlled by airtight cavities and change the effective focal power to present a virtual image at a target depth plane which is determined by the gaze tracker. The benefits of using the membranes include wide field of view (100&#x00B0; diagonal) and fast depth switching (from 20 cm to infinity within 300 ms). Our subjective experiment verifies the prototype and demonstrates its potential benefits for near-eye see-through displays.", "abstracts": [ { "abstractType": "Regular", "content": "Accommodative depth cues, a wide field of view, and ever-higher resolutions all present major hardware design challenges for near-eye displays. Optimizing a design to overcome one of these challenges typically leads to a trade-off in the others. We tackle this problem by introducing an all-in-one solution - a new wide field of view, gaze-tracked near-eye display for augmented reality applications. The key component of our solution is the use of a single see-through, varifocal deformable membrane mirror for each eye reflecting a display. They are controlled by airtight cavities and change the effective focal power to present a virtual image at a target depth plane which is determined by the gaze tracker. The benefits of using the membranes include wide field of view (100&#x00B0; diagonal) and fast depth switching (from 20 cm to infinity within 300 ms). Our subjective experiment verifies the prototype and demonstrates its potential benefits for near-eye see-through displays.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Accommodative depth cues, a wide field of view, and ever-higher resolutions all present major hardware design challenges for near-eye displays. Optimizing a design to overcome one of these challenges typically leads to a trade-off in the others. We tackle this problem by introducing an all-in-one solution - a new wide field of view, gaze-tracked near-eye display for augmented reality applications. The key component of our solution is the use of a single see-through, varifocal deformable membrane mirror for each eye reflecting a display. They are controlled by airtight cavities and change the effective focal power to present a virtual image at a target depth plane which is determined by the gaze tracker. The benefits of using the membranes include wide field of view (100° diagonal) and fast depth switching (from 20 cm to infinity within 300 ms). Our subjective experiment verifies the prototype and demonstrates its potential benefits for near-eye see-through displays.", "title": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors", "normalizedTitle": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors", "fno": "07829412", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Gaze Tracking", "Image Processing", "Mirrors", "Field Of View", "FOV", "Near Eye Display", "NED", "Varifocal Deformable Membrane Mirror", "Gaze Tracking", "Augmented Reality", "AR", "Virtual Image", "Mirrors", "Image Resolution", "Prototypes", "Optical Imaging", "Holography", "Holographic Optical Components", "Augmented Reality", "Displays", "Focus Accommodation", "Perception", "User Study" ], "authors": [ { "givenName": "David", "surname": "Dunn", "fullName": "David Dunn", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Cary", "surname": "Tippets", "fullName": "Cary Tippets", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Kent", "surname": "Torell", "fullName": "Kent Torell", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Petr", "surname": "Kellnhofer", "fullName": "Petr Kellnhofer", "affiliation": "MPI Informatik", "__typename": "ArticleAuthorType" }, { "givenName": "Kaan", "surname": "Akşit", "fullName": "Kaan Akşit", "affiliation": "NVIDIA Research", "__typename": "ArticleAuthorType" }, { "givenName": "Piotr", "surname": "Didyk", "fullName": "Piotr Didyk", "affiliation": "MMCISaarland University", "__typename": "ArticleAuthorType" }, { "givenName": "Karol", "surname": "Myszkowski", "fullName": "Karol Myszkowski", "affiliation": "MPI Informatik", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Luebke", "fullName": "David Luebke", "affiliation": "NVIDIA Research", "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Fuchs", "fullName": "Henry Fuchs", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2017-04-01 00:00:00", "pubType": "trans", "pages": "1322-1331", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccp/2010/7023/0/05585090", "title": "Computational photography and compressive holography", "doi": null, "abstractUrl": "/proceedings-article/iccp/2010/05585090/12OmNvonIOE", "parentPublication": { "id": "proceedings/iccp/2010/7023/0", "title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icii/2001/7010/4/00983826", "title": "Research on principles of combined polygon scanner for bar code identification", "doi": null, "abstractUrl": "/proceedings-article/icii/2001/00983826/12OmNz61cXo", "parentPublication": { "id": "icii/2001/7010/4", "title": "2001 International Conferences on Info-tech and Info-net. Proceedings", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/11/07523375", "title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a912", "title": "Eye Movement Driving Analysis during Parallel Parking along Roadways: Comparison of Experienced and Novice Drivers", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a912/1BLnrXODDri", "parentPublication": { "id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0", "title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a746", "title": "Metameric Varifocal Holograms", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a746/1CJcc750PQI", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a418", "title": "Realistic Defocus Blur for Multiplane Computer-Generated Holography", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a418/1MNgFZaCqiI", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798273", "title": "Required Accuracy of Gaze Tracking for Varifocal Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798273/1cJ0T4CUJTq", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794584", "title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a312", "title": "Towards Eyeglass-style Holographic Near-eye Displays with Statically", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a312/1pysyaCOe76", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09383112", "title": "Beaming Displays", "doi": null, "abstractUrl": "/journal/tg/2021/05/09383112/1saZzKxYSqI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07835276", "articleId": "13rRUILLkvw", "__typename": "AdjacentArticleType" }, "next": { "fno": "07833188", "articleId": "13rRUxBrGh6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgG4", "name": "ttg201704-07829412s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201704-07829412s1.zip", "extension": "zip", "size": "836 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvAiSp1", "title": "Nov.", "year": "2018", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "14M3DXKXjwc", "doi": "10.1109/TVCG.2018.2868532", "abstract": "We describe a system which corrects dynamically for the focus of the real world surrounding the near-eye display of the user and simultaneously the internal display for augmented synthetic imagery, with an aim of completely replacing the user prescription eyeglasses. The ability to adjust focus for both real and virtual stimuli will be useful for a wide variety of users, but especially for users over 40 years of age who have limited accommodation range. Our proposed solution employs a tunable-focus lens for dynamic prescription vision correction, and a varifocal internal display for setting the virtual imagery at appropriate spatially registered depths. We also demonstrate a proof of concept prototype to verify our design and discuss the challenges to building an auto-focus augmented reality eyeglasses for both real and virtual.", "abstracts": [ { "abstractType": "Regular", "content": "We describe a system which corrects dynamically for the focus of the real world surrounding the near-eye display of the user and simultaneously the internal display for augmented synthetic imagery, with an aim of completely replacing the user prescription eyeglasses. The ability to adjust focus for both real and virtual stimuli will be useful for a wide variety of users, but especially for users over 40 years of age who have limited accommodation range. Our proposed solution employs a tunable-focus lens for dynamic prescription vision correction, and a varifocal internal display for setting the virtual imagery at appropriate spatially registered depths. We also demonstrate a proof of concept prototype to verify our design and discuss the challenges to building an auto-focus augmented reality eyeglasses for both real and virtual.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a system which corrects dynamically for the focus of the real world surrounding the near-eye display of the user and simultaneously the internal display for augmented synthetic imagery, with an aim of completely replacing the user prescription eyeglasses. The ability to adjust focus for both real and virtual stimuli will be useful for a wide variety of users, but especially for users over 40 years of age who have limited accommodation range. Our proposed solution employs a tunable-focus lens for dynamic prescription vision correction, and a varifocal internal display for setting the virtual imagery at appropriate spatially registered depths. We also demonstrate a proof of concept prototype to verify our design and discuss the challenges to building an auto-focus augmented reality eyeglasses for both real and virtual.", "title": "FocusAR: Auto-focus Augmented Reality Eyeglasses for both Real World and Virtual Imagery", "normalizedTitle": "FocusAR: Auto-focus Augmented Reality Eyeglasses for both Real World and Virtual Imagery", "fno": "08458263", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Lenses", "Meters", "Liquids", "Prototypes", "Augmented Reality", "Glass", "Apertures", "Augmented Reality", "Displays", "Auto Focus", "Focus Accommodation", "Prescription Correction" ], "authors": [ { "givenName": "Praneeth", "surname": "Chakravarthula", "fullName": "Praneeth Chakravarthula", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Dunn", "fullName": "David Dunn", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Kaan", "surname": "Akşit", "fullName": "Kaan Akşit", "affiliation": "NVIDIA Research", "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Fuchs", "fullName": "Henry Fuchs", "affiliation": "UNC, Chapel Hill", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2018-11-01 00:00:00", "pubType": "trans", "pages": "2906-2916", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2009/4442/0/05457520", "title": "Single image focus editing", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457520/12OmNxwWozJ", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/04/mcg2017040084", "title": "A Virtual Try-On System for Prescription Eyeglasses", "doi": null, "abstractUrl": "/magazine/cg/2017/04/mcg2017040084/13rRUxcsYRn", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486588", "title": "Depth Aware Portrait Segmentation Using Dual Focus Images", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486588/14jQfP3PmvM", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714124", "title": "Video See-Through Mixed Reality with Focus Cues", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714124/1B0XWyWo5KE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797780", "title": "[DC] Auto-focus Augmented Reality Eyeglasses for both Real World and Virtual Imagery", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797780/1cJ0O5YpzLW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794584", "title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08868217", "title": "Computational Phase-Modulated Eyeglasses", "doi": null, "abstractUrl": "/journal/tg/2021/03/08868217/1e7BZyDZnvq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a245", "title": "Compact Light Field Augmented Reality Display with Eliminated Stray Light using Discrete Structures", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a245/1gysiY6ymKQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08999805", "title": "Illuminated Focus: Vision Augmentation using Spatial Defocusing via Focal Sweep Eyeglasses and High-Speed Projector", "doi": null, "abstractUrl": "/journal/tg/2020/05/08999805/1hpPCtKIAaA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c542", "title": "Blur Aware Calibration of Multi-Focus Plenoptic Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c542/1m3nIj7S0cU", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08492363", "articleId": "14M3E1hwrFS", "__typename": "AdjacentArticleType" }, "next": { "fno": "08462799", "articleId": "14M3DYGRu3m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNy5hRch", "title": "Nov.", "year": "2019", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1dgvaPxmhbi", "doi": "10.1109/TVCG.2019.2933120", "abstract": "Optical see-through augmented reality (AR) systems are a next-generation computing platform that offer unprecedented user experiences by seamlessly combining physical and digital content. Many of the traditional challenges of these displays have been significantly improved over the last few years, but AR experiences offered by today's systems are far from seamless and perceptually realistic. Mutually consistent occlusions between physical and digital objects are typically not supported. When mutual occlusion is supported, it is only supported for a fixed depth. We propose a new optical see-through AR display system that renders mutual occlusion in a depth-dependent, perceptually realistic manner. To this end, we introduce varifocal occlusion displays based on focus-tunable optics, which comprise a varifocal lens system and spatial light modulators that enable depth-corrected hard-edge occlusions for AR experiences. We derive formal optimization methods and closed-form solutions for driving this tunable lens system and demonstrate a monocular varifocal occlusion-capable optical see-through AR display capable of perceptually realistic occlusion across a large depth range.", "abstracts": [ { "abstractType": "Regular", "content": "Optical see-through augmented reality (AR) systems are a next-generation computing platform that offer unprecedented user experiences by seamlessly combining physical and digital content. Many of the traditional challenges of these displays have been significantly improved over the last few years, but AR experiences offered by today's systems are far from seamless and perceptually realistic. Mutually consistent occlusions between physical and digital objects are typically not supported. When mutual occlusion is supported, it is only supported for a fixed depth. We propose a new optical see-through AR display system that renders mutual occlusion in a depth-dependent, perceptually realistic manner. To this end, we introduce varifocal occlusion displays based on focus-tunable optics, which comprise a varifocal lens system and spatial light modulators that enable depth-corrected hard-edge occlusions for AR experiences. We derive formal optimization methods and closed-form solutions for driving this tunable lens system and demonstrate a monocular varifocal occlusion-capable optical see-through AR display capable of perceptually realistic occlusion across a large depth range.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Optical see-through augmented reality (AR) systems are a next-generation computing platform that offer unprecedented user experiences by seamlessly combining physical and digital content. Many of the traditional challenges of these displays have been significantly improved over the last few years, but AR experiences offered by today's systems are far from seamless and perceptually realistic. Mutually consistent occlusions between physical and digital objects are typically not supported. When mutual occlusion is supported, it is only supported for a fixed depth. We propose a new optical see-through AR display system that renders mutual occlusion in a depth-dependent, perceptually realistic manner. To this end, we introduce varifocal occlusion displays based on focus-tunable optics, which comprise a varifocal lens system and spatial light modulators that enable depth-corrected hard-edge occlusions for AR experiences. We derive formal optimization methods and closed-form solutions for driving this tunable lens system and demonstrate a monocular varifocal occlusion-capable optical see-through AR display capable of perceptually realistic occlusion across a large depth range.", "title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics", "normalizedTitle": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics", "fno": "08827571", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Computer Displays", "Lenses", "Optimisation", "Spatial Light Modulators", "Perceptually Realistic Occlusion", "Monocular Varifocal Occlusion Capable", "Tunable Lens System", "Depth Corrected Hard Edge Occlusions", "Varifocal Lens System", "Varifocal Occlusion Displays", "Perceptually Realistic Manner", "AR Display System", "Fixed Depth", "Mutual Occlusion", "Digital Objects", "Physical Objects", "Mutually Consistent Occlusions", "AR Experiences", "Digital Content", "Physical Content", "Focus Tunable Optics", "Augmented Reality Display", "Optical Imaging", "Adaptive Optics", "Optical Design", "Optical Distortion", "Lenses", "Optical Diffraction", "Augmented Reality", "Computational Displays", "Varifocal Display", "Occlusion" ], "authors": [ { "givenName": "Kishore", "surname": "Rathinavel", "fullName": "Kishore Rathinavel", "affiliation": "UNC Chapel Hill", "__typename": "ArticleAuthorType" }, { "givenName": "Gordon", "surname": "Wetzstein", "fullName": "Gordon Wetzstein", "affiliation": "Stanford University", "__typename": "ArticleAuthorType" }, { "givenName": "Henry", "surname": "Fuchs", "fullName": "Henry Fuchs", "affiliation": "UNC Chapel Hill", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2019-11-01 00:00:00", "pubType": "trans", "pages": "3125-3134", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2016/0836/0/07504749", "title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402574", "title": "Occlusion capable optical see-through head-mounted display using freeform optics", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2016/0842/0/07460049", "title": "SharpView: Improved clarity of defocused content on optical see-through head-mounted displays", "doi": null, "abstractUrl": "/proceedings-article/3dui/2016/07460049/12OmNBWzHQi", "parentPublication": { "id": "proceedings/3dui/2016/0842/0", "title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223394", "title": "A procedure for accurate calibration of a tabletop haploscope AR environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223394/12OmNBh8gZM", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/04/07064856", "title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08676155", "title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask", "doi": null, "abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10050791", "title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable", "doi": null, "abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a237", "title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998139", "title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a301", "title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08794584", "articleId": "1dNHlOrNW5W", "__typename": "AdjacentArticleType" }, "next": { "fno": "08794519", "articleId": "1cr2Z4zxQ9q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1LUpyYLBfeo", "title": "May", "year": "2023", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1KYovYmwfpm", "doi": "10.1109/TVCG.2023.3247072", "abstract": "Augmented Reality (AR) and Virtual Reality (VR) are pushing from the labs towards consumers, especially with social applications. These applications require visual representations of humans and intelligent entities. However, displaying and animating photo-realistic models comes with a high technical cost while low-fidelity representations may evoke eeriness and overall could degrade an experience. Thus, it is important to carefully select what kind of avatar to display. This article investigates the effects of rendering style and visible body parts in AR and VR by adopting a systematic literature review. We analyzed 72 papers that compare various avatar representations. Our analysis includes an outline of the research published between 2015 and 2022 on the topic of avatars and agents in AR and VR displayed using head-mounted displays, covering aspects like visible body parts (e.g., hands only, hands and head, full-body) and rendering style (e.g., abstract, cartoon, realistic); an overview of collected objective and subjective measures (e.g., task performance, presence, user experience, body ownership); and a classification of tasks where avatars and agents were used into task domains (physical activity, hand interaction, communication, game-like scenarios, and education/training). We discuss and synthesize our results within the context of today's AR and VR ecosystem, provide guidelines for practitioners, and finally identify and present promising research opportunities to encourage future research of avatars and agents in AR/VR environments.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented Reality (AR) and Virtual Reality (VR) are pushing from the labs towards consumers, especially with social applications. These applications require visual representations of humans and intelligent entities. However, displaying and animating photo-realistic models comes with a high technical cost while low-fidelity representations may evoke eeriness and overall could degrade an experience. Thus, it is important to carefully select what kind of avatar to display. This article investigates the effects of rendering style and visible body parts in AR and VR by adopting a systematic literature review. We analyzed 72 papers that compare various avatar representations. Our analysis includes an outline of the research published between 2015 and 2022 on the topic of avatars and agents in AR and VR displayed using head-mounted displays, covering aspects like visible body parts (e.g., hands only, hands and head, full-body) and rendering style (e.g., abstract, cartoon, realistic); an overview of collected objective and subjective measures (e.g., task performance, presence, user experience, body ownership); and a classification of tasks where avatars and agents were used into task domains (physical activity, hand interaction, communication, game-like scenarios, and education/training). We discuss and synthesize our results within the context of today's AR and VR ecosystem, provide guidelines for practitioners, and finally identify and present promising research opportunities to encourage future research of avatars and agents in AR/VR environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented Reality (AR) and Virtual Reality (VR) are pushing from the labs towards consumers, especially with social applications. These applications require visual representations of humans and intelligent entities. However, displaying and animating photo-realistic models comes with a high technical cost while low-fidelity representations may evoke eeriness and overall could degrade an experience. Thus, it is important to carefully select what kind of avatar to display. This article investigates the effects of rendering style and visible body parts in AR and VR by adopting a systematic literature review. We analyzed 72 papers that compare various avatar representations. Our analysis includes an outline of the research published between 2015 and 2022 on the topic of avatars and agents in AR and VR displayed using head-mounted displays, covering aspects like visible body parts (e.g., hands only, hands and head, full-body) and rendering style (e.g., abstract, cartoon, realistic); an overview of collected objective and subjective measures (e.g., task performance, presence, user experience, body ownership); and a classification of tasks where avatars and agents were used into task domains (physical activity, hand interaction, communication, game-like scenarios, and education/training). We discuss and synthesize our results within the context of today's AR and VR ecosystem, provide guidelines for practitioners, and finally identify and present promising research opportunities to encourage future research of avatars and agents in AR/VR environments.", "title": "A Systematic Review on the Visualization of Avatars and Agents in AR &#x0026; VR displayed using Head-Mounted Displays", "normalizedTitle": "A Systematic Review on the Visualization of Avatars and Agents in AR & VR displayed using Head-Mounted Displays", "fno": "10049669", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Avatars", "Computer Animation", "Helmet Mounted Displays", "Rendering Computer Graphics", "Virtual Reality", "Abstract Cartoon Realistic", "AR Amp VR", "Augmented Reality", "Avatar Representations", "Avatars", "Body Ownership", "Cartoon Cartoon Realistic", "Head Mounted Displays", "High Technical Cost", "Low Fidelity Representations", "Photo Realistic Models", "Rendering Style", "Social Applications", "Systematic Literature Review", "Systematic Review", "Virtual Reality", "Visible Body Parts", "VR Ecosystem", "Avatars", "Rendering Computer Graphics", "Task Analysis", "Visualization", "Systematics", "Head Mounted Displays", "Databases", "Virtual Reality", "Augmented Reality", "Avatars", "Visualization" ], "authors": [ { "givenName": "Florian", "surname": "Weidner", "fullName": "Florian Weidner", "affiliation": "Virtual Worlds and Digital Games Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Gerd", "surname": "Boettcher", "fullName": "Gerd Boettcher", "affiliation": "Virtual Worlds and Digital Games Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Stephanie Arevalo", "surname": "Arboleda", "fullName": "Stephanie Arevalo Arboleda", "affiliation": "Audiovisual Technology Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Chenyao", "surname": "Diao", "fullName": "Chenyao Diao", "affiliation": "Audiovisual Technology Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Luljeta", "surname": "Sinani", "fullName": "Luljeta Sinani", "affiliation": "Audiovisual Technology Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Kunert", "fullName": "Christian Kunert", "affiliation": "Virtual Worlds and Digital Games Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Christoph", "surname": "Gerhardt", "fullName": "Christoph Gerhardt", "affiliation": "Virtual Worlds and Digital Games Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Wolfgang", "surname": "Broll", "fullName": "Wolfgang Broll", "affiliation": "Virtual Worlds and Digital Games Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Alexander", "surname": "Raake", "fullName": "Alexander Raake", "affiliation": "Audiovisual Technology Group, Technische Universität Ilmenau, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2023-05-01 00:00:00", "pubType": "trans", "pages": "2596-2606", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2019/05/08648222", "title": "The Virtual Caliper: Rapid Creation of Metrically Accurate Avatars from 3D Measurements", "doi": null, "abstractUrl": "/journal/tg/2019/05/08648222/17QjJf0qqr2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a350", "title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8632", "title": "Neural Head Avatars from Monocular RGB Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8632/1H1htwlAaNa", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a666", "title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798169", "title": "Stand-alone, Wearable System for Full Body VR Avatars: Towards Physics-based 3D Interaction", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798169/1cJ126EVaVi", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a026", "title": "The Kuroko Paradigm: The Implications of Augmenting Physical Interaction with AR Avatars", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a026/1gysn4uy67C", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2020/7463/0/746300a060", "title": "Photorealistic avatars to enhance the efficacy of Selfattachment psychotherapy", "doi": null, "abstractUrl": "/proceedings-article/aivr/2020/746300a060/1qpzCwDcDKM", "parentPublication": { "id": "proceedings/aivr/2020/7463/0", "title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a715", "title": "[DC] Privacy in VR: Empowering Users with Emotional Privacy from Verbal and Non-verbal Behavior of Their Avatars", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a715/1tnXsX6EMBa", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a065", "title": "The Embodiment of Photorealistic Avatars Influences Female Body Weight Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a065/1tuAAOZpdoQ", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523831", "title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10050341", "articleId": "1KYorVtFExW", "__typename": "AdjacentArticleType" }, "next": { "fno": "10049736", "articleId": "1KYowRibw1q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvqEvRo", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1E5LEepCqTC", "doi": "10.1109/TVCG.2022.3181262", "abstract": "In virtual reality, VR sickness resulting from continuous locomotion via controllers or joysticks is still a significant problem. In this paper, we present a set of algorithms to mitigate VR sickness that dynamically modulate the user&#x2019;s field of view by modifying the contrast of the periphery based on movement, color, and depth. In contrast with previous work, this vision modulator is a shader that is triggered by specific motions known to cause VR sickness, such as acceleration, strafing, and linear velocity. Moreover, the algorithm is governed by delta velocity, delta angle, and average color of the view. We ran two experiments with different washout periods to investigate the effectiveness of dynamic modulation on the symptoms of VR sickness, in which we compared this approach against baseline and pitch-black field-of-view restrictors. Our first experiment made use of a just-noticeable-sickness design, which can be useful for building experiments with a short washout period.", "abstracts": [ { "abstractType": "Regular", "content": "In virtual reality, VR sickness resulting from continuous locomotion via controllers or joysticks is still a significant problem. In this paper, we present a set of algorithms to mitigate VR sickness that dynamically modulate the user&#x2019;s field of view by modifying the contrast of the periphery based on movement, color, and depth. In contrast with previous work, this vision modulator is a shader that is triggered by specific motions known to cause VR sickness, such as acceleration, strafing, and linear velocity. Moreover, the algorithm is governed by delta velocity, delta angle, and average color of the view. We ran two experiments with different washout periods to investigate the effectiveness of dynamic modulation on the symptoms of VR sickness, in which we compared this approach against baseline and pitch-black field-of-view restrictors. Our first experiment made use of a just-noticeable-sickness design, which can be useful for building experiments with a short washout period.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In virtual reality, VR sickness resulting from continuous locomotion via controllers or joysticks is still a significant problem. In this paper, we present a set of algorithms to mitigate VR sickness that dynamically modulate the user’s field of view by modifying the contrast of the periphery based on movement, color, and depth. In contrast with previous work, this vision modulator is a shader that is triggered by specific motions known to cause VR sickness, such as acceleration, strafing, and linear velocity. Moreover, the algorithm is governed by delta velocity, delta angle, and average color of the view. We ran two experiments with different washout periods to investigate the effectiveness of dynamic modulation on the symptoms of VR sickness, in which we compared this approach against baseline and pitch-black field-of-view restrictors. Our first experiment made use of a just-noticeable-sickness design, which can be useful for building experiments with a short washout period.", "title": "Mitigation of VR Sickness during Locomotion with a Motion-Based Dynamic Vision Modulator", "normalizedTitle": "Mitigation of VR Sickness during Locomotion with a Motion-Based Dynamic Vision Modulator", "fno": "09793626", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Optical Flow", "Modulation", "Angular Velocity", "Image Color Analysis", "Teleportation", "Legged Locomotion", "VR Sickness", "Contrast Manipulation", "Vision Modulation", "Shading And Rendering" ], "authors": [ { "givenName": "Guanghan", "surname": "Zhao", "fullName": "Guanghan Zhao", "affiliation": "Osaka University, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Jason", "surname": "Orlosky", "fullName": "Jason Orlosky", "affiliation": "Osaka University, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Steven", "surname": "Feiner", "fullName": "Steven Feiner", "affiliation": "Columbia University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Photchara", "surname": "Ratsamee", "fullName": "Photchara Ratsamee", "affiliation": "Osaka University, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Yuki", "surname": "Uranishi", "fullName": "Yuki Uranishi", "affiliation": "Osaka University, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-06-01 00:00:00", "pubType": "trans", "pages": "1-13", "year": "5555", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892348", "title": "Steering locomotion by vestibular perturbation in room-scale VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892348/12OmNvrMUgU", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446130", "title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cost/2022/6248/0/624800a169", "title": "Development of VR Motion Sickness Test Platform Based on UE", "doi": null, "abstractUrl": "/proceedings-article/cost/2022/624800a169/1H2pqPKjkAg", "parentPublication": { "id": "proceedings/cost/2022/6248/0", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10075482", "title": "An Evaluation of View Rotation Techniques for Seated Navigation in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/5555/01/10075482/1LAuCOR3RE4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a094", "title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798158", "title": "PhantomLegs: Reducing Virtual Reality Sickness Using Head-Worn Haptic Devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798158/1cJ16zT3GdW", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798136", "title": "VR Sickness in Continuous Exposure to Live-action 180&#x00B0;Video", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798136/1cJ1gPJX2og", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08798880", "title": "Sick Moves! Motion Parameters as Indicators of Simulator Sickness", "doi": null, "abstractUrl": "/journal/tg/2019/11/08798880/1cumZbd4qNG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a735", "title": "[DC] Towards Universal VR Sickness Mitigation Strategies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a735/1tnXDI2lhHq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a380", "title": "Evaluating VR Sickness in VR Locomotion Techniques", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a380/1tnXc1raaxq", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09786815", "articleId": "1DSumaVNxG8", "__typename": "AdjacentArticleType" }, "next": { "fno": "09792437", "articleId": "1E5LEFpMA48", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1Eb19m8rRTO", "name": "ttg555501-09793626s1-supp1-3181262.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09793626s1-supp1-3181262.mp4", "extension": "mp4", "size": "67.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvqEvRo", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1K3XC5MZdGE", "doi": "10.1109/TVCG.2023.3238309", "abstract": "The use of Augmented Reality (AR) for navigation purposes has shown beneficial in assisting physicians during the performance of surgical procedures. These applications commonly require knowing the pose of surgical tools and patients to provide visual information that surgeons can use during the performance of the task. Existing medical-grade tracking systems use infrared cameras placed inside the Operating Room (OR) to identify retro-reflective markers attached to objects of interest and compute their pose. Some commercially available AR Head-Mounted Displays (HMDs) use similar cameras for self-localization, hand tracking, and estimating the objects&#x0027; depth. This work presents a framework that uses the built-in cameras of AR HMDs to enable accurate tracking of retro-reflective markers without the need to integrate any additional electronics into the HMD. The proposed framework can simultaneously track multiple tools without having previous knowledge of their geometry and only requires establishing a local network between the headset and a workstation. Our results show that the tracking and detection of the markers can be achieved with an accuracy of <inline-formula><tex-math notation=\"LaTeX\">Z_$0.09\\pm 0.06\\ mm$_Z</tex-math></inline-formula> on lateral translation, <inline-formula><tex-math notation=\"LaTeX\">Z_$0.42 \\pm 0.32\\ mm$_Z</tex-math></inline-formula> on longitudinal translation and <inline-formula><tex-math notation=\"LaTeX\">Z_$0.80 \\pm 0.39^\\circ$_Z</tex-math></inline-formula> for rotations around the vertical axis. Furthermore, to showcase the relevance of the proposed framework, we evaluate the system&#x0027;s performance in the context of surgical procedures. This use case was designed to replicate the scenarios of k-wire insertions in orthopedic procedures. For evaluation, seven surgeons were provided with visual navigation and asked to perform 24 injections using the proposed framework. A second study with ten participants served to investigate the capabilities of the framework in the context of more general scenarios. Results from these studies provided comparable accuracy to those reported in the literature for AR-based navigation procedures.", "abstracts": [ { "abstractType": "Regular", "content": "The use of Augmented Reality (AR) for navigation purposes has shown beneficial in assisting physicians during the performance of surgical procedures. These applications commonly require knowing the pose of surgical tools and patients to provide visual information that surgeons can use during the performance of the task. Existing medical-grade tracking systems use infrared cameras placed inside the Operating Room (OR) to identify retro-reflective markers attached to objects of interest and compute their pose. Some commercially available AR Head-Mounted Displays (HMDs) use similar cameras for self-localization, hand tracking, and estimating the objects&#x0027; depth. This work presents a framework that uses the built-in cameras of AR HMDs to enable accurate tracking of retro-reflective markers without the need to integrate any additional electronics into the HMD. The proposed framework can simultaneously track multiple tools without having previous knowledge of their geometry and only requires establishing a local network between the headset and a workstation. Our results show that the tracking and detection of the markers can be achieved with an accuracy of <inline-formula><tex-math notation=\"LaTeX\">$0.09\\pm 0.06\\ mm$</tex-math></inline-formula> on lateral translation, <inline-formula><tex-math notation=\"LaTeX\">$0.42 \\pm 0.32\\ mm$</tex-math></inline-formula> on longitudinal translation and <inline-formula><tex-math notation=\"LaTeX\">$0.80 \\pm 0.39^\\circ$</tex-math></inline-formula> for rotations around the vertical axis. Furthermore, to showcase the relevance of the proposed framework, we evaluate the system&#x0027;s performance in the context of surgical procedures. This use case was designed to replicate the scenarios of k-wire insertions in orthopedic procedures. For evaluation, seven surgeons were provided with visual navigation and asked to perform 24 injections using the proposed framework. A second study with ten participants served to investigate the capabilities of the framework in the context of more general scenarios. Results from these studies provided comparable accuracy to those reported in the literature for AR-based navigation procedures.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The use of Augmented Reality (AR) for navigation purposes has shown beneficial in assisting physicians during the performance of surgical procedures. These applications commonly require knowing the pose of surgical tools and patients to provide visual information that surgeons can use during the performance of the task. Existing medical-grade tracking systems use infrared cameras placed inside the Operating Room (OR) to identify retro-reflective markers attached to objects of interest and compute their pose. Some commercially available AR Head-Mounted Displays (HMDs) use similar cameras for self-localization, hand tracking, and estimating the objects' depth. This work presents a framework that uses the built-in cameras of AR HMDs to enable accurate tracking of retro-reflective markers without the need to integrate any additional electronics into the HMD. The proposed framework can simultaneously track multiple tools without having previous knowledge of their geometry and only requires establishing a local network between the headset and a workstation. Our results show that the tracking and detection of the markers can be achieved with an accuracy of - on lateral translation, - on longitudinal translation and - for rotations around the vertical axis. Furthermore, to showcase the relevance of the proposed framework, we evaluate the system's performance in the context of surgical procedures. This use case was designed to replicate the scenarios of k-wire insertions in orthopedic procedures. For evaluation, seven surgeons were provided with visual navigation and asked to perform 24 injections using the proposed framework. A second study with ten participants served to investigate the capabilities of the framework in the context of more general scenarios. Results from these studies provided comparable accuracy to those reported in the literature for AR-based navigation procedures.", "title": "STTAR: Surgical Tool Tracking using Off-the-Shelf Augmented Reality Head-Mounted Displays", "normalizedTitle": "STTAR: Surgical Tool Tracking using Off-the-Shelf Augmented Reality Head-Mounted Displays", "fno": "10021890", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Surgery", "Navigation", "Biomedical Imaging", "Visualization", "Resists", "Task Analysis", "Augmented Reality", "Computer Assisted Medical Procedures", "Navigation", "Tracking" ], "authors": [ { "givenName": "Alejandro", "surname": "Martin-Gomez", "fullName": "Alejandro Martin-Gomez", "affiliation": "Laboratory for Computational Sensing and Robotics, Whiting School of Engineering, Johns Hopkins University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Haowei", "surname": "Li", "fullName": "Haowei Li", "affiliation": "Department of Biomedical Engineering, Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Tianyu", "surname": "Song", "fullName": "Tianyu Song", "affiliation": "Chair for Computer Aided Medical Procedures and Augmented Reality, Department of Informatics, Technical University of Munich, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Sheng", "surname": "Yang", "fullName": "Sheng Yang", "affiliation": "Department of Biomedical Engineering, Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guangzhi", "surname": "Wang", "fullName": "Guangzhi Wang", "affiliation": "Department of Biomedical Engineering, Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hui", "surname": "Ding", "fullName": "Hui Ding", "affiliation": "Department of Biomedical Engineering, Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Nassir", "surname": "Navab", "fullName": "Nassir Navab", "affiliation": "Laboratory for Computational Sensing and Robotics, Whiting School of Engineering, Johns Hopkins University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Zhe", "surname": "Zhao", "fullName": "Zhe Zhao", "affiliation": "Department of Orthopaedics, Beijing Tsinghua Changgung Hospital. School of Clinical Medicine, Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Mehran", "surname": "Armand", "fullName": "Mehran Armand", "affiliation": "Laboratory for Computational Sensing and Robotics, Whiting School of Engineering, Johns Hopkins University, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2023-01-01 00:00:00", "pubType": "trans", "pages": "1-16", "year": "5555", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/var4good/2018/5977/0/08576884", "title": "Augmented Visual Instruction for Surgical Practice and Training", "doi": null, "abstractUrl": "/proceedings-article/var4good/2018/08576884/17D45WODasn", "parentPublication": { "id": "proceedings/var4good/2018/5977/0", "title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/5555/01/09933877", "title": "Matrix-Based Secret Sharing for Reversible Data Hiding in Encrypted Images", "doi": null, "abstractUrl": "/journal/tq/5555/01/09933877/1HWLN6aNgDS", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/09965747", "title": "<inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathcal {X}$_Z</tex-math></inline-formula>-Metric: An N-Dimensional Information-Theoretic Framework for Groupwise Registration and Deep Combined Computing", "doi": null, "abstractUrl": "/journal/tp/5555/01/09965747/1IHMPhf3uW4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a164", "title": "A Practical AR-based Surgical Navigation System Using Optical See-through Head Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a164/1J6hB8fdg1W", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10077744", "title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance", "doi": null, "abstractUrl": "/journal/tg/5555/01/10077744/1LH8EZ3NEGI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2020/02/08730423", "title": "Predicting Carbon Spectrum in Heteronuclear Single Quantum Coherence Spectroscopy for Online Feedback During Surgery", "doi": null, "abstractUrl": "/journal/tb/2020/02/08730423/1aAwyubtkha", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/02/09122448", "title": "ManifoldNet: A Deep Neural Network for Manifold-Valued Data With Applications", "doi": null, "abstractUrl": "/journal/tp/2022/02/09122448/1kRRwHRZ1Li", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2021/10/09194366", "title": "A Novel Measurement for Network Reliability", "doi": null, "abstractUrl": "/journal/tc/2021/10/09194366/1n0EqDZV3X2", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09253561", "title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe", "doi": null, "abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/01/09422157", "title": "On Efficient Large Maximal Biplex Discovery", "doi": null, "abstractUrl": "/journal/tk/2023/01/09422157/1tiTooWy0gg", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10018537", "articleId": "1K0DFSXIg5W", "__typename": "AdjacentArticleType" }, "next": { "fno": "10021892", "articleId": "1K3XDAtRZ8Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1LUpyYLBfeo", "title": "May", "year": "2023", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1KYooqYQbF6", "doi": "10.1109/TVCG.2023.3247098", "abstract": "This article compares two state-of-the-art text input techniques between non-stationary virtual reality (VR) and video see-through augmented reality (VST AR) use-cases as XR display condition. The developed contact-based mid-air virtual tap and word-gesture (swipe) keyboard provide established support functions for text correction, word suggestions, capitalization, and punctuation. A user evaluation with 64 participants revealed that XR displays and input techniques strongly affect text entry performance, while subjective measures are only influenced by the input techniques. We found significantly higher usability and user experience ratings for tap keyboards compared to swipe keyboards in both VR and VST AR. Task load was also lower for tap keyboards. In terms of performance, both input techniques were significantly faster in VR than in VST AR. Further, the tap keyboard was significantly faster than the swipe keyboard in VR. Participants showed a significant learning effect with only ten sentences typed per condition. Our results are consistent with previous work in VR and optical see-through (OST) AR, but additionally provide novel insights into usability and performance of the selected text input techniques for VST AR. The significant differences in subjective and objective measures emphasize the importance of specific evaluations for each possible combination of input techniques and XR displays to provide reusable, reliable, and high-quality text input solutions. With our work, we form a foundation for future research and XR workspaces. Our reference implementation is publicly available to encourage replicability and reuse in future XR workspaces.", "abstracts": [ { "abstractType": "Regular", "content": "This article compares two state-of-the-art text input techniques between non-stationary virtual reality (VR) and video see-through augmented reality (VST AR) use-cases as XR display condition. The developed contact-based mid-air virtual tap and word-gesture (swipe) keyboard provide established support functions for text correction, word suggestions, capitalization, and punctuation. A user evaluation with 64 participants revealed that XR displays and input techniques strongly affect text entry performance, while subjective measures are only influenced by the input techniques. We found significantly higher usability and user experience ratings for tap keyboards compared to swipe keyboards in both VR and VST AR. Task load was also lower for tap keyboards. In terms of performance, both input techniques were significantly faster in VR than in VST AR. Further, the tap keyboard was significantly faster than the swipe keyboard in VR. Participants showed a significant learning effect with only ten sentences typed per condition. Our results are consistent with previous work in VR and optical see-through (OST) AR, but additionally provide novel insights into usability and performance of the selected text input techniques for VST AR. The significant differences in subjective and objective measures emphasize the importance of specific evaluations for each possible combination of input techniques and XR displays to provide reusable, reliable, and high-quality text input solutions. With our work, we form a foundation for future research and XR workspaces. Our reference implementation is publicly available to encourage replicability and reuse in future XR workspaces.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article compares two state-of-the-art text input techniques between non-stationary virtual reality (VR) and video see-through augmented reality (VST AR) use-cases as XR display condition. The developed contact-based mid-air virtual tap and word-gesture (swipe) keyboard provide established support functions for text correction, word suggestions, capitalization, and punctuation. A user evaluation with 64 participants revealed that XR displays and input techniques strongly affect text entry performance, while subjective measures are only influenced by the input techniques. We found significantly higher usability and user experience ratings for tap keyboards compared to swipe keyboards in both VR and VST AR. Task load was also lower for tap keyboards. In terms of performance, both input techniques were significantly faster in VR than in VST AR. Further, the tap keyboard was significantly faster than the swipe keyboard in VR. Participants showed a significant learning effect with only ten sentences typed per condition. Our results are consistent with previous work in VR and optical see-through (OST) AR, but additionally provide novel insights into usability and performance of the selected text input techniques for VST AR. The significant differences in subjective and objective measures emphasize the importance of specific evaluations for each possible combination of input techniques and XR displays to provide reusable, reliable, and high-quality text input solutions. With our work, we form a foundation for future research and XR workspaces. Our reference implementation is publicly available to encourage replicability and reuse in future XR workspaces.", "title": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality", "normalizedTitle": "Text Input for Non-Stationary XR Workspaces: Investigating Tap and Word-Gesture Keyboards in Virtual and Augmented Reality", "fno": "10049665", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Human Computer Interaction", "Human Factors", "Keyboards", "Text Analysis", "User Interfaces", "Virtual Reality", "Augmented Reality Use Cases", "Developed Contact Based Mid Air", "Future XR Workspaces", "High Quality Text Input Solutions", "Higher Usability", "Nonstationary XR Workspaces", "Selected Text Input Techniques", "State Of The Art Text Input Techniques", "Swipe Keyboard", "Tap Keyboard", "Text Correction", "Text Entry Performance", "User Experience Ratings", "Virtual Tap", "VR", "VST AR", "Word Suggestions", "Word Gesture Keyboards", "XR Displays", "Keyboards", "X Reality", "Usability", "Performance Evaluation", "User Experience", "Task Analysis", "Tracking", "Virtual Reality", "Augmented Reality", "Extended Reality", "Text Input", "Keyboard", "Tap", "Swipe", "Word Gesture", "Digital Twin" ], "authors": [ { "givenName": "Florian", "surname": "Kern", "fullName": "Florian Kern", "affiliation": "Human-Computer Interaction (HCI) Group from the University of Würzburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Florian", "surname": "Niebling", "fullName": "Florian Niebling", "affiliation": "Angewandte Informatik from the Hochschule Fulda, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Marc Erich", "surname": "Latoschik", "fullName": "Marc Erich Latoschik", "affiliation": "Human-Computer Interaction (HCI) Group from the University of Würzburg, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "05", "pubDate": "2023-05-01 00:00:00", "pubType": "trans", "pages": "2658-2669", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/aivr/2018/9269/0/926900a258", "title": "Omni-Learning XR Technologies and Visitor-Centered Experience in the Smart Art Museum", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a258/17D45WODasM", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a972", "title": "Aroaro - A Tool for Distributed Immersive Mixed Reality Visualization", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a972/1CJefXNbhYs", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/03/09790021", "title": "Situated VR: Toward a Congruent Hybrid Reality Without Experiential Artifacts", "doi": null, "abstractUrl": "/magazine/cg/2022/03/09790021/1E0Nh45Ca64", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a954", "title": "Context-Aware Support of Dexterity Skills in Cross-Reality Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a954/1J7W7ejQ5m8", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a710", "title": "Ex-Cit XR: Expert-elicitation of XR Techniques for Disengaging from IVEs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a710/1J7WgdWP768", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a167", "title": "Flexible XR Prototyping &#x2013; A Sports Spectating Example", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a167/1J7WuYXm6kg", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a489", "title": "Plausibility and Perception of Personalized Virtual Humans between Virtual and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a489/1JrQTvCTbhK", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a104", "title": "XR Management Training Simulator supported by Content-Based scenario recommendation", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a104/1KmF8tEedk4", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090571", "title": "XR Framework for Collaborating Remote Heterogeneous Devices", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090571/1jIxrXeGmdi", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a439", "title": "XR Mobility Platform: Multi-Modal XR System Mounted on Autonomous Vehicle for Passenger&#x2019;s Comfort Improvement", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a439/1yeQPu8aFlm", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10049667", "articleId": "1KYoqrkz9zq", "__typename": "AdjacentArticleType" }, "next": { "fno": "10054238", "articleId": "1L6HOIvywcU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBCZnUt", "title": "Sept.", "year": "2020", "issueNum": "09", "idPrefix": "co", "pubType": "magazine", "volume": "53", "label": "Sept.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1mVFwqrpCy4", "doi": "10.1109/MC.2020.3004605", "abstract": "This installment of Computer’s series highlighting the work published in IEEE Computer Society journals comes from IEEE Transactions on Visualization and Computer Graphics. In “ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality,” Daniel Schneider and colleagues1 show that, in VR, physical keyboards can become much more than a device for entering characters. Their article, which also won the best paper award at IEEE ISMAR 2019, proposes that physical keyboards can be adapted for use in a variety of VR applications and interactions capable of taking advantage of their unique affordances (for example, dozens of unique keys, haptic feedback for both touching and depressing the keys, and broad familiarity to users). The key idea is that VR allows designers to magically change both the appearance and function of any physical object, including the keyboard.", "abstracts": [ { "abstractType": "Regular", "content": "This installment of Computer’s series highlighting the work published in IEEE Computer Society journals comes from IEEE Transactions on Visualization and Computer Graphics. In “ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality,” Daniel Schneider and colleagues1 show that, in VR, physical keyboards can become much more than a device for entering characters. Their article, which also won the best paper award at IEEE ISMAR 2019, proposes that physical keyboards can be adapted for use in a variety of VR applications and interactions capable of taking advantage of their unique affordances (for example, dozens of unique keys, haptic feedback for both touching and depressing the keys, and broad familiarity to users). The key idea is that VR allows designers to magically change both the appearance and function of any physical object, including the keyboard.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This installment of Computer’s series highlighting the work published in IEEE Computer Society journals comes from IEEE Transactions on Visualization and Computer Graphics. In “ReconViguRation: Reconfiguring Physical Keyboards in Virtual Reality,” Daniel Schneider and colleagues1 show that, in VR, physical keyboards can become much more than a device for entering characters. Their article, which also won the best paper award at IEEE ISMAR 2019, proposes that physical keyboards can be adapted for use in a variety of VR applications and interactions capable of taking advantage of their unique affordances (for example, dozens of unique keys, haptic feedback for both touching and depressing the keys, and broad familiarity to users). The key idea is that VR allows designers to magically change both the appearance and function of any physical object, including the keyboard.", "title": "Embracing Physical Keyboards for Virtual Reality", "normalizedTitle": "Embracing Physical Keyboards for Virtual Reality", "fno": "09187469", "hasPdf": true, "idPrefix": "co", "keywords": [ "Data Visualisation", "Virtual Reality", "Computer Graphics", "Data Visualization", "IEEE Computer Society", "Virtual Reality", "Physical Keyboards" ], "authors": [ { "givenName": "Doug A.", "surname": "Bowman", "fullName": "Doug A. Bowman", "affiliation": "Human–Computer Interaction, Virginia Tech, Blacksburg, Virginia United States", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "09", "pubDate": "2020-09-01 00:00:00", "pubType": "mags", "pages": "9-10", "year": "2020", "issn": "0018-9162", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "09187453", "articleId": "1mVFxPcZrGM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09187466", "articleId": "1mVFvcKL4bu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCL", "title": "January", "year": "2011", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwgQpqI", "doi": "10.1109/TVCG.2009.211", "abstract": "The goal of our work is to develop a programmatically controlled peer to bicycle with a human subject for the purpose of studying how social interactions influence road-crossing behavior. The peer is controlled through a combination of reactive controllers that determine the gross motion of the virtual bicycle, action-based controllers that animate the virtual bicyclist and generate verbal behaviors, and a keyboard interface that allows an experimenter to initiate the virtual bicyclist's actions during the course of an experiment. The virtual bicyclist's repertoire of behaviors includes road following, riding alongside the human rider, stopping at intersections, and crossing intersections through specified gaps in traffic. The virtual cyclist engages the human subject through gaze, gesture, and verbal interactions. We describe the structure of the behavior code and report the results of a study examining how 10- and 12-year-old children interact with a peer cyclist that makes either risky or safe choices in selecting gaps in traffic. Results of our study revealed that children who rode with a risky peer were more likely to cross intermediate-sized gaps than children who rode with a safe peer. In addition, children were significantly less likely to stop at the last six intersections after the experience of riding with the risky than the safe peer during the first six intersections. The results of the study and children's reactions to the virtual peer indicate that our virtual peer framework is a promising platform for future behavioral studies of peer influences on children's bicycle riding behavior.", "abstracts": [ { "abstractType": "Regular", "content": "The goal of our work is to develop a programmatically controlled peer to bicycle with a human subject for the purpose of studying how social interactions influence road-crossing behavior. The peer is controlled through a combination of reactive controllers that determine the gross motion of the virtual bicycle, action-based controllers that animate the virtual bicyclist and generate verbal behaviors, and a keyboard interface that allows an experimenter to initiate the virtual bicyclist's actions during the course of an experiment. The virtual bicyclist's repertoire of behaviors includes road following, riding alongside the human rider, stopping at intersections, and crossing intersections through specified gaps in traffic. The virtual cyclist engages the human subject through gaze, gesture, and verbal interactions. We describe the structure of the behavior code and report the results of a study examining how 10- and 12-year-old children interact with a peer cyclist that makes either risky or safe choices in selecting gaps in traffic. Results of our study revealed that children who rode with a risky peer were more likely to cross intermediate-sized gaps than children who rode with a safe peer. In addition, children were significantly less likely to stop at the last six intersections after the experience of riding with the risky than the safe peer during the first six intersections. The results of the study and children's reactions to the virtual peer indicate that our virtual peer framework is a promising platform for future behavioral studies of peer influences on children's bicycle riding behavior.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The goal of our work is to develop a programmatically controlled peer to bicycle with a human subject for the purpose of studying how social interactions influence road-crossing behavior. The peer is controlled through a combination of reactive controllers that determine the gross motion of the virtual bicycle, action-based controllers that animate the virtual bicyclist and generate verbal behaviors, and a keyboard interface that allows an experimenter to initiate the virtual bicyclist's actions during the course of an experiment. The virtual bicyclist's repertoire of behaviors includes road following, riding alongside the human rider, stopping at intersections, and crossing intersections through specified gaps in traffic. The virtual cyclist engages the human subject through gaze, gesture, and verbal interactions. We describe the structure of the behavior code and report the results of a study examining how 10- and 12-year-old children interact with a peer cyclist that makes either risky or safe choices in selecting gaps in traffic. Results of our study revealed that children who rode with a risky peer were more likely to cross intermediate-sized gaps than children who rode with a safe peer. In addition, children were significantly less likely to stop at the last six intersections after the experience of riding with the risky than the safe peer during the first six intersections. The results of the study and children's reactions to the virtual peer indicate that our virtual peer framework is a promising platform for future behavioral studies of peer influences on children's bicycle riding behavior.", "title": "An Immersive Virtual Peer for Studying Social Influences on Child Cyclists' Road-Crossing Behavior", "normalizedTitle": "An Immersive Virtual Peer for Studying Social Influences on Child Cyclists' Road-Crossing Behavior", "fno": "ttg2011010014", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Virtual Humans", "Virtual Reality", "Applied Perception", "3 D Human Computer Interaction" ], "authors": [ { "givenName": "Sabarish V.", "surname": "Babu", "fullName": "Sabarish V. Babu", "affiliation": "Clemson University, Clemson", "__typename": "ArticleAuthorType" }, { "givenName": "Timofey Y.", "surname": "Grechkin", "fullName": "Timofey Y. Grechkin", "affiliation": "University of Iowa, Iowa City", "__typename": "ArticleAuthorType" }, { "givenName": "Benjamin", "surname": "Chihak", "fullName": "Benjamin Chihak", "affiliation": "University of Iowa, Iowa City", "__typename": "ArticleAuthorType" }, { "givenName": "Christine", "surname": "Ziemer", "fullName": "Christine Ziemer", "affiliation": "University of Iowa, Iowa City", "__typename": "ArticleAuthorType" }, { "givenName": "Joseph K.", "surname": "Kearney", "fullName": "Joseph K. Kearney", "affiliation": "University of Iowa, Iowa City", "__typename": "ArticleAuthorType" }, { "givenName": "James F.", "surname": "Cremer", "fullName": "James F. Cremer", "affiliation": "University of Iowa, Iowa City", "__typename": "ArticleAuthorType" }, { "givenName": "Jodie M.", "surname": "Plumert", "fullName": "Jodie M. Plumert", "affiliation": "University of Iowa, Iowa City", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "14-25", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2010/6846/0/05444705", "title": "Immersive virtual studio for architectural exploration", "doi": null, "abstractUrl": "/proceedings-article/3dui/2010/05444705/12OmNAGNCeR", "parentPublication": { "id": "proceedings/3dui/2010/6846/0", "title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2010/4307/0/4307a839", "title": "Load-Balancing Properties of 3D Voronoi Diagrams in Peer-to-Peer Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/icpads/2010/4307a839/12OmNAkWvrt", "parentPublication": { "id": "proceedings/icpads/2010/4307/0", "title": "Parallel and Distributed Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2012/4725/0/4725a010", "title": "A VR Framework for Desktop Applications", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a010/12OmNBrlPwO", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811004", "title": "A Virtual Peer for Investigating Social Influences on Children's Bicycling", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811004/12OmNs0C9Xm", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2006/0225/0/02250103", "title": "Towards a General Model for Selection in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/3dui/2006/02250103/12OmNwF0BWC", "parentPublication": { "id": "proceedings/3dui/2006/0225/0", "title": "3D User Interfaces (3DUI'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2000/0478/0/04780055", "title": "Interaction with Geoscience Data in an Immersive Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2000/04780055/12OmNxRF728", "parentPublication": { "id": "proceedings/vr/2000/0478/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759472", "title": "3D Arrow: A virtual pointer for immersive sculpting", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759472/12OmNy3iFhn", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isip/2010/4261/0/4261a495", "title": "A Preliminary Study on Nonverbal Emotion Interaction of Virtual Characters in Cartoon Games", "doi": null, "abstractUrl": "/proceedings-article/isip/2010/4261a495/12OmNyv7m1C", "parentPublication": { "id": "proceedings/isip/2010/4261/0", "title": "2010 Third International Symposium on Information Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2015/7334/0/7334a423", "title": "Effectiveness of an Immersive Virtual Environment (CAVE) for Teaching Pedestrian Crossing to Children with PDD-NOS", "doi": null, "abstractUrl": "/proceedings-article/icalt/2015/7334a423/12OmNznkK1X", "parentPublication": { "id": "proceedings/icalt/2015/7334/0", "title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08267487", "title": "Locomotive Recalibration and Prism Adaptation of Children and Teens in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2018/04/08267487/13rRUxYrbMo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2011010003", "articleId": "13rRUwbs2aY", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2011010001", "articleId": "13rRUwIF6l4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgxU", "name": "ttg2011010014s.wmv", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2011010014s.wmv", "extension": "wmv", "size": "19.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyRxFiZ", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tp", "pubType": "journal", "volume": "39", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYSWmg", "doi": "10.1109/TPAMI.2016.2596722", "abstract": "Complex geometric variations of 3D models usually pose great challenges in 3D shape matching and retrieval. In this paper, we propose a novel 3D shape feature learning method to extract high-level shape features that are insensitive to geometric deformations of shapes. Our method uses a discriminative deep auto-encoder to learn deformation-invariant shape features. First, a multiscale shape distribution is computed and used as input to the auto-encoder. We then impose the Fisher discrimination criterion on the neurons in the hidden layer to develop a deep discriminative auto-encoder. Finally, the outputs from the hidden layers of the discriminative auto-encoders at different scales are concatenated to form the shape descriptor. The proposed method is evaluated on four benchmark datasets that contain 3D models with large geometric variations: McGill, SHREC’10 ShapeGoogle, SHREC’14 Human and SHREC’14 Large Scale Comprehensive Retrieval Track Benchmark datasets. Experimental results on the benchmark datasets demonstrate the effectiveness of the proposed method for 3D shape retrieval.", "abstracts": [ { "abstractType": "Regular", "content": "Complex geometric variations of 3D models usually pose great challenges in 3D shape matching and retrieval. In this paper, we propose a novel 3D shape feature learning method to extract high-level shape features that are insensitive to geometric deformations of shapes. Our method uses a discriminative deep auto-encoder to learn deformation-invariant shape features. First, a multiscale shape distribution is computed and used as input to the auto-encoder. We then impose the Fisher discrimination criterion on the neurons in the hidden layer to develop a deep discriminative auto-encoder. Finally, the outputs from the hidden layers of the discriminative auto-encoders at different scales are concatenated to form the shape descriptor. The proposed method is evaluated on four benchmark datasets that contain 3D models with large geometric variations: McGill, SHREC’10 ShapeGoogle, SHREC’14 Human and SHREC’14 Large Scale Comprehensive Retrieval Track Benchmark datasets. Experimental results on the benchmark datasets demonstrate the effectiveness of the proposed method for 3D shape retrieval.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Complex geometric variations of 3D models usually pose great challenges in 3D shape matching and retrieval. In this paper, we propose a novel 3D shape feature learning method to extract high-level shape features that are insensitive to geometric deformations of shapes. Our method uses a discriminative deep auto-encoder to learn deformation-invariant shape features. First, a multiscale shape distribution is computed and used as input to the auto-encoder. We then impose the Fisher discrimination criterion on the neurons in the hidden layer to develop a deep discriminative auto-encoder. Finally, the outputs from the hidden layers of the discriminative auto-encoders at different scales are concatenated to form the shape descriptor. The proposed method is evaluated on four benchmark datasets that contain 3D models with large geometric variations: McGill, SHREC’10 ShapeGoogle, SHREC’14 Human and SHREC’14 Large Scale Comprehensive Retrieval Track Benchmark datasets. Experimental results on the benchmark datasets demonstrate the effectiveness of the proposed method for 3D shape retrieval.", "title": "DeepShape: Deep-Learned Shape Descriptor for 3D Shape Retrieval", "normalizedTitle": "DeepShape: Deep-Learned Shape Descriptor for 3D Shape Retrieval", "fno": "07526450", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Shape", "Three Dimensional Displays", "Heating", "Kernel", "Feature Extraction", "Neurons", "Solid Modeling", "3 D Shape Retrieval", "Heat Kernel Signature", "Heat Diffusion", "Auto Encoder", "Fisher Discrimination Criterion" ], "authors": [ { "givenName": "Jin", "surname": "Xie", "fullName": "Jin Xie", "affiliation": "Department of Electrical and Computer Engineering, New York University Abu Dhabi, Abu Dhabi, UAE", "__typename": "ArticleAuthorType" }, { "givenName": "Guoxian", "surname": "Dai", "fullName": "Guoxian Dai", "affiliation": "Department of Electrical and Computer Engineering, New York University Abu Dhabi, Abu Dhabi, UAE", "__typename": "ArticleAuthorType" }, { "givenName": "Fan", "surname": "Zhu", "fullName": "Fan Zhu", "affiliation": "Department of Electrical and Computer Engineering, New York University Abu Dhabi, Abu Dhabi, UAE", "__typename": "ArticleAuthorType" }, { "givenName": "Edward K.", "surname": "Wong", "fullName": "Edward K. Wong", "affiliation": "Department of Computer Science and Engineering, Tandon School of Engineering, New York University, New York, NY", "__typename": "ArticleAuthorType" }, { "givenName": "Yi", "surname": "Fang", "fullName": "Yi Fang", "affiliation": "Department of Electrical and Computer Engineering, New York University Abu Dhabi, Abu Dhabi, UAE", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1335-1345", "year": "2017", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851d309", "title": "Learned Binary Spectral Shape Descriptor for 3D Shape Correspondence", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851d309/12OmNBp52AI", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a344", "title": "Non-parametric Spectral Model for Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a344/12OmNrJiCVy", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07298732", "title": "Deepshape: Deep learned shape descriptor for 3D shape matching and retrieval", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298732/12OmNwJPN2w", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981684", "title": "Temperature distribution descriptor for robust 3D shape retrieval", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981684/12OmNx3Zjf0", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/06/07254190", "title": "Scale Space Graph Representation and Kernel Matching for Non Rigid and Textured 3D Shape Retrieval", "doi": null, "abstractUrl": "/journal/tp/2016/06/07254190/13rRUwcS1Eh", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2018/9497/0/949700a311", "title": "Sketch-Based Shape Retrieval via Multi-view Attention and Generalized Similarity", "doi": null, "abstractUrl": "/proceedings-article/icdh/2018/949700a311/17D45VObpQZ", "parentPublication": { "id": "proceedings/icdh/2018/9497/0", "title": "2018 7th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a441", "title": "Non-Rigid 3D Shape Retrieval Based on Multi-view Metric Learning", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a441/1cJ0zjakeqs", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2019/5584/0/558400a662", "title": "Nonrigid 3D Shape Retrieval with HAPPS: A Novel Hybrid Augmented Point Pair Signature", "doi": null, "abstractUrl": "/proceedings-article/csci/2019/558400a662/1jdE1j0yU5q", "parentPublication": { "id": "proceedings/csci/2019/5584/0", "title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j350", "title": "Unsupervised Deep Shape Descriptor With Point Distribution Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j350/1m3nJW3p8Fa", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2020/8666/0/866600a223", "title": "Sketch-based 3D Shape Retrieval with Multi-Silhouette View Based on Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icicta/2020/866600a223/1wRIvGNgH9m", "parentPublication": { "id": "proceedings/icicta/2020/8666/0", "title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07506134", "articleId": "13rRUxC0SPV", "__typename": "AdjacentArticleType" }, "next": { "fno": "07508476", "articleId": "13rRUwfZBWz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAHW0Jb", "title": "Jan.", "year": "2019", "issueNum": "01", "idPrefix": "tp", "pubType": "journal", "volume": "41", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WIXbRu", "doi": "10.1109/TPAMI.2017.2782810", "abstract": "Our goal in this paper is to investigate properties of 3D shape that can be determined from a single image. We define 3D shape attributes—generic properties of the shape that capture curvature, contact and occupied space. Our first objective is to infer these 3D shape attributes from a single image. A second objective is to infer a 3D shape embedding—a low dimensional vector representing the 3D shape. We study how the 3D shape attributes and embedding can be obtained from a single image by training a Convolutional Neural Network (CNN) for this task. We start with synthetic images so that the contribution of various cues and nuisance parameters can be controlled. We then turn to real images and introduce a large scale image dataset of sculptures containing 143K images covering 2197 works from 242 artists. For the CNN trained on the sculpture dataset we show the following: (i) which regions of the imaged sculpture are used by the CNN to infer the 3D shape attributes; (ii) that the shape embedding can be used to match previously unseen sculptures largely independent of viewpoint; and (iii) that the 3D attributes generalize to images of other (non-sculpture) object classes.", "abstracts": [ { "abstractType": "Regular", "content": "Our goal in this paper is to investigate properties of 3D shape that can be determined from a single image. We define 3D shape attributes—generic properties of the shape that capture curvature, contact and occupied space. Our first objective is to infer these 3D shape attributes from a single image. A second objective is to infer a 3D shape embedding—a low dimensional vector representing the 3D shape. We study how the 3D shape attributes and embedding can be obtained from a single image by training a Convolutional Neural Network (CNN) for this task. We start with synthetic images so that the contribution of various cues and nuisance parameters can be controlled. We then turn to real images and introduce a large scale image dataset of sculptures containing 143K images covering 2197 works from 242 artists. For the CNN trained on the sculpture dataset we show the following: (i) which regions of the imaged sculpture are used by the CNN to infer the 3D shape attributes; (ii) that the shape embedding can be used to match previously unseen sculptures largely independent of viewpoint; and (iii) that the 3D attributes generalize to images of other (non-sculpture) object classes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Our goal in this paper is to investigate properties of 3D shape that can be determined from a single image. We define 3D shape attributes—generic properties of the shape that capture curvature, contact and occupied space. Our first objective is to infer these 3D shape attributes from a single image. A second objective is to infer a 3D shape embedding—a low dimensional vector representing the 3D shape. We study how the 3D shape attributes and embedding can be obtained from a single image by training a Convolutional Neural Network (CNN) for this task. We start with synthetic images so that the contribution of various cues and nuisance parameters can be controlled. We then turn to real images and introduce a large scale image dataset of sculptures containing 143K images covering 2197 works from 242 artists. For the CNN trained on the sculpture dataset we show the following: (i) which regions of the imaged sculpture are used by the CNN to infer the 3D shape attributes; (ii) that the shape embedding can be used to match previously unseen sculptures largely independent of viewpoint; and (iii) that the 3D attributes generalize to images of other (non-sculpture) object classes.", "title": "From Images to 3D Shape Attributes", "normalizedTitle": "From Images to 3D Shape Attributes", "fno": "08194914", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Shape", "Three Dimensional Displays", "Computer Vision", "Measurement", "Solid Modeling", "Training", "3 D Understanding", "Shape Perception", "Attributes", "Convolutional Neural Networks" ], "authors": [ { "givenName": "David F.", "surname": "Fouhey", "fullName": "David F. Fouhey", "affiliation": "EECS Department, University of California Berkeley, Berkeley, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Abhinav", "surname": "Gupta", "fullName": "Abhinav Gupta", "affiliation": "Robotics Institute, Carnegie Mellon University, Pittsburgh, PA", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Zisserman", "fullName": "Andrew Zisserman", "affiliation": "Department of Engineering Science, Visual Geometry Group, University of Oxford, Oxford, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2019-01-01 00:00:00", "pubType": "trans", "pages": "93-106", "year": "2019", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851b516", "title": "3D Shape Attributes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851b516/12OmNBBzoiv", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457d587", "title": "Learning Category-Specific 3D Shape Models from Weakly Labeled 2D Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d587/12OmNwbLVoY", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2011/0529/0/05981821", "title": "3D Human pose and shape estimation from multi-view imagery", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2011/05981821/12OmNwtn3o5", "parentPublication": { "id": "proceedings/cvprw/2011/0529/0", "title": "CVPR 2011 WORKSHOPS", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a040", "title": "Facial 3D Shape Estimation from Images for Visual Speech Animation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a040/12OmNyYm2v8", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000d955", "title": "Lions and Tigers and Bears: Capturing Non-rigid, 3D, Articulated Shape from Images", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000d955/17D45WHONjC", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500a628", "title": "Shadow Art Revisited: A Differentiable Rendering Based Approach", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500a628/1B12RNuAqvS", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600c708", "title": "Accurate 3D Body Shape Regression using Metric and Semantic Attributes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600c708/1H0LftmVn5S", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d793", "title": "3D Shape Reconstruction from 2D Images with Disentangled Attribute Flow", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d793/1H0LtLdB8T6", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c232", "title": "Moulding Humans: Non-Parametric 3D Human Shape Estimation From Single Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c232/1hVl9ihABwY", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700c876", "title": "Shape from Caustics: Reconstruction of 3D-Printed Glass from Simulated Caustic Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700c876/1uqGeODWAHC", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08122025", "articleId": "17D45XDIXWd", "__typename": "AdjacentArticleType" }, "next": { "fno": "08219761", "articleId": "17D45WaTkdU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyPQ4Dx", "title": "Dec.", "year": "2012", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "18", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvyakM", "doi": "10.1109/TVCG.2012.208", "abstract": "We present a novel technique-Compressed Adjacency Matrices-for visualizing gene regulatory networks. These directed networks have strong structural characteristics: out-degrees with a scale-free distribution, in-degrees bound by a low maximum, and few and small cycles. Standard visualization techniques, such as node-link diagrams and adjacency matrices, are impeded by these network characteristics. The scale-free distribution of out-degrees causes a high number of intersecting edges in node-link diagrams. Adjacency matrices become space-inefficient due to the low in-degrees and the resulting sparse network. Compressed adjacency matrices, however, exploit these structural characteristics. By cutting open and rearranging an adjacency matrix, we achieve a compact and neatly-arranged visualization. Compressed adjacency matrices allow for easy detection of subnetworks with a specific structure, so-called motifs, which provide important knowledge about gene regulatory networks to domain experts. We summarize motifs commonly referred to in the literature, and relate them to network analysis tasks common to the visualization domain. We show that a user can easily find the important motifs in compressed adjacency matrices, and that this is hard in standard adjacency matrix and node-link diagrams. We also demonstrate that interaction techniques for standard adjacency matrices can be used for our compressed variant. These techniques include rearrangement clustering, highlighting, and filtering.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel technique-Compressed Adjacency Matrices-for visualizing gene regulatory networks. These directed networks have strong structural characteristics: out-degrees with a scale-free distribution, in-degrees bound by a low maximum, and few and small cycles. Standard visualization techniques, such as node-link diagrams and adjacency matrices, are impeded by these network characteristics. The scale-free distribution of out-degrees causes a high number of intersecting edges in node-link diagrams. Adjacency matrices become space-inefficient due to the low in-degrees and the resulting sparse network. Compressed adjacency matrices, however, exploit these structural characteristics. By cutting open and rearranging an adjacency matrix, we achieve a compact and neatly-arranged visualization. Compressed adjacency matrices allow for easy detection of subnetworks with a specific structure, so-called motifs, which provide important knowledge about gene regulatory networks to domain experts. We summarize motifs commonly referred to in the literature, and relate them to network analysis tasks common to the visualization domain. We show that a user can easily find the important motifs in compressed adjacency matrices, and that this is hard in standard adjacency matrix and node-link diagrams. We also demonstrate that interaction techniques for standard adjacency matrices can be used for our compressed variant. These techniques include rearrangement clustering, highlighting, and filtering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel technique-Compressed Adjacency Matrices-for visualizing gene regulatory networks. These directed networks have strong structural characteristics: out-degrees with a scale-free distribution, in-degrees bound by a low maximum, and few and small cycles. Standard visualization techniques, such as node-link diagrams and adjacency matrices, are impeded by these network characteristics. The scale-free distribution of out-degrees causes a high number of intersecting edges in node-link diagrams. Adjacency matrices become space-inefficient due to the low in-degrees and the resulting sparse network. Compressed adjacency matrices, however, exploit these structural characteristics. By cutting open and rearranging an adjacency matrix, we achieve a compact and neatly-arranged visualization. Compressed adjacency matrices allow for easy detection of subnetworks with a specific structure, so-called motifs, which provide important knowledge about gene regulatory networks to domain experts. We summarize motifs commonly referred to in the literature, and relate them to network analysis tasks common to the visualization domain. We show that a user can easily find the important motifs in compressed adjacency matrices, and that this is hard in standard adjacency matrix and node-link diagrams. We also demonstrate that interaction techniques for standard adjacency matrices can be used for our compressed variant. These techniques include rearrangement clustering, highlighting, and filtering.", "title": "Compressed Adjacency Matrices: Untangling Gene Regulatory Networks", "normalizedTitle": "Compressed Adjacency Matrices: Untangling Gene Regulatory Networks", "fno": "ttg2012122457", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Network Theory Graphs", "Biology Computing", "Data Visualisation", "Genetics", "Matrix Algebra", "Rearrangement Clustering", "Compressed Adjacency Matrices", "Gene Regulatory Networks", "Directed Networks", "Structural Characteristics", "Scale Free Distribution", "Standard Visualization", "Node Link Diagrams", "Network Characteristics", "Sparse Network", "Neatly Arranged Visualization", "Motifs", "Visualization Domain", "Standard Adjacency Matrix", "Visualization", "Computer Aided Manufacturing", "Standards", "Sparse Matrices", "Layout", "Bismuth", "Proteins", "Adjacency Matrix", "Network", "Gene Regulation", "Scale Free" ], "authors": [ { "givenName": "K.", "surname": "Dinkla", "fullName": "K. Dinkla", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "M. A.", "surname": "Westenberg", "fullName": "M. A. Westenberg", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "J. J.", "surname": "van Wijk", "fullName": "J. J. van Wijk", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2012-12-01 00:00:00", "pubType": "trans", "pages": "2457-2466", "year": "2012", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/alpit/2008/3273/0/3273a430", "title": "A Routing Protocol Based on Adjacency Matrix in Ad Hoc Mobile Networks", "doi": null, "abstractUrl": "/proceedings-article/alpit/2008/3273a430/12OmNC2fGuB", "parentPublication": { "id": "proceedings/alpit/2008/3273/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/reconfig/2010/4314/0/4314a256", "title": "Hardware Computation of the PageRank Eigenvector", "doi": null, "abstractUrl": "/proceedings-article/reconfig/2010/4314a256/12OmNC8MsM4", "parentPublication": { "id": "proceedings/reconfig/2010/4314/0", "title": "Reconfigurable Computing and FPGAs, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isda/2009/3872/0/3872b388", "title": "Efficient Computation Methods for the Kleene Star in Max-Plus Linear Systems", "doi": null, "abstractUrl": "/proceedings-article/isda/2009/3872b388/12OmNvkYxcj", "parentPublication": { "id": "proceedings/isda/2009/3872/0", "title": "Intelligent Systems Design and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mvhi/2010/4009/0/4009a422", "title": "Algorithms for Random Adjacency Matrixes Generation Used for Scheduling Algorithms Test", "doi": null, "abstractUrl": "/proceedings-article/mvhi/2010/4009a422/12OmNwE9OBG", "parentPublication": { "id": "proceedings/mvhi/2010/4009/0", "title": "Machine Vision and Human-machine Interface, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2012/4637/0/4637a352", "title": "The Minimax Method of Design of Measurement Matrices for Compressed Sensing Based on Incoherence Criterion", "doi": null, "abstractUrl": "/proceedings-article/icicta/2012/4637a352/12OmNwF0BQ9", "parentPublication": { "id": "proceedings/icicta/2012/4637/0", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2017/6721/0/07921910", "title": "Compressed Sensing Performance of Binary Matrices with Binary Column Correlations", "doi": null, "abstractUrl": "/proceedings-article/dcc/2017/07921910/12OmNwF0BSn", "parentPublication": { "id": "proceedings/dcc/2017/6721/0", "title": "2017 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsac/2005/2461/0/24610160", "title": "Understanding Complex Network Attack Graphs through Clustered Adjacency Matrices", "doi": null, "abstractUrl": "/proceedings-article/acsac/2005/24610160/12OmNyKrH6V", "parentPublication": { "id": "proceedings/acsac/2005/2461/0", "title": "Computer Security Applications Conference, Annual", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08438968", "title": "Node-Link or Adjacency Matrices: Old Question, New Insights", "doi": null, "abstractUrl": "/journal/tg/2019/10/08438968/13rRUwjoNx8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2021/9489/0/948900a290", "title": "Construction of Deterministic Compressed Sensing Matrices in Singular Linear Space", "doi": null, "abstractUrl": "/proceedings-article/cis/2021/948900a290/1AUpqG6PdhC", "parentPublication": { "id": "proceedings/cis/2021/9489/0", "title": "2021 17th International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09721695", "title": "A Deep Generative Model for Reordering Adjacency Matrices", "doi": null, "abstractUrl": "/journal/tg/5555/01/09721695/1Bhzo1K76IU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2012122449", "articleId": "13rRUxASuhx", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2012122467", "articleId": "13rRUxC0SOW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgP8", "name": "ttg2012122457s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2012122457s1.zip", "extension": "zip", "size": "28.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzwpU9K", "title": "Nov.", "year": "2014", "issueNum": "11", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcbnCs", "doi": "10.1109/TVCG.2014.2322594", "abstract": "We present a visual representation for dynamic, weighted graphs based on the concept of adjacency lists. Two orthogonal axes are used: one for all nodes of the displayed graph, the other for the corresponding links. Colors and labels are employed to identify the nodes. The usage of color allows us to scale the visualization to single pixel level for large graphs. In contrast to other techniques, we employ an asymmetric mapping that results in an aligned and compact representation of links. Our approach is independent of the specific properties of the graph to be visualized, but certain graphs and tasks benefit from the asymmetry. As we show in our results, the strength of our technique is the visualization of dynamic graphs. In particular, sparse graphs benefit from the compact representation. Furthermore, our approach uses visual encoding by size to represent weights and therefore allows easy quantification and comparison. We evaluate our approach in a quantitative user study that confirms the suitability for dynamic and weighted graphs. Finally, we demonstrate our approach for two examples of dynamic graphs.", "abstracts": [ { "abstractType": "Regular", "content": "We present a visual representation for dynamic, weighted graphs based on the concept of adjacency lists. Two orthogonal axes are used: one for all nodes of the displayed graph, the other for the corresponding links. Colors and labels are employed to identify the nodes. The usage of color allows us to scale the visualization to single pixel level for large graphs. In contrast to other techniques, we employ an asymmetric mapping that results in an aligned and compact representation of links. Our approach is independent of the specific properties of the graph to be visualized, but certain graphs and tasks benefit from the asymmetry. As we show in our results, the strength of our technique is the visualization of dynamic graphs. In particular, sparse graphs benefit from the compact representation. Furthermore, our approach uses visual encoding by size to represent weights and therefore allows easy quantification and comparison. We evaluate our approach in a quantitative user study that confirms the suitability for dynamic and weighted graphs. Finally, we demonstrate our approach for two examples of dynamic graphs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a visual representation for dynamic, weighted graphs based on the concept of adjacency lists. Two orthogonal axes are used: one for all nodes of the displayed graph, the other for the corresponding links. Colors and labels are employed to identify the nodes. The usage of color allows us to scale the visualization to single pixel level for large graphs. In contrast to other techniques, we employ an asymmetric mapping that results in an aligned and compact representation of links. Our approach is independent of the specific properties of the graph to be visualized, but certain graphs and tasks benefit from the asymmetry. As we show in our results, the strength of our technique is the visualization of dynamic graphs. In particular, sparse graphs benefit from the compact representation. Furthermore, our approach uses visual encoding by size to represent weights and therefore allows easy quantification and comparison. We evaluate our approach in a quantitative user study that confirms the suitability for dynamic and weighted graphs. Finally, we demonstrate our approach for two examples of dynamic graphs.", "title": "Visual Adjacency Lists for Dynamic Graphs", "normalizedTitle": "Visual Adjacency Lists for Dynamic Graphs", "fno": "06812198", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Color", "Encoding", "Layout", "Data Visualization", "Scalability", "Image Color Analysis", "Adjacency Lists", "Graph Visualization", "Weighted Graphs", "Dynamic Graphs" ], "authors": [ { "givenName": "Marcel", "surname": "Hlawatsch", "fullName": "Marcel Hlawatsch", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Burch", "fullName": "Michael Burch", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Weiskopf", "fullName": "Daniel Weiskopf", "affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2014-11-01 00:00:00", "pubType": "trans", "pages": "1590-1603", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2015/8493/0/8493a675", "title": "OLAP Visual Analytics on Large Software Call Graphs with Hierarchical ChordMap", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2015/8493a675/12OmNAWpyrN", "parentPublication": { "id": "proceedings/icdmw/2015/8493/0", "title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2013/4797/0/06596126", "title": "Smooth bundling of large streaming and sequence graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2013/06596126/12OmNscfI0r", "parentPublication": { "id": "proceedings/pacificvis/2013/4797/0", "title": "2013 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2016/0252/0/07739664", "title": "Visual analysis of compound graphs", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2016/07739664/12OmNx7G5RX", "parentPublication": { "id": "proceedings/vlhcc/2016/0252/0", "title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2012/0852/0/06344514", "title": "Rapid Serial Visual Presentation in dynamic graph visualization", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2012/06344514/12OmNxYL5fe", "parentPublication": { "id": "proceedings/vlhcc/2012/0852/0", "title": "2012 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2009/4404/0/04906842", "title": "A visual canonical adjacency matrix for graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2009/04906842/12OmNy87QAS", "parentPublication": { "id": "proceedings/pacificvis/2009/4404/0", "title": "2009 IEEE Pacific Visualization Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2004/8779/0/87790191", "title": "Dynamic Drawing of Clustered Graphs", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2004/87790191/12OmNyugyVo", "parentPublication": { "id": "proceedings/ieee-infovis/2004/8779/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2017/0831/0/0831a230", "title": "Dynamic Graph Visualization on Different Temporal Granularities", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a230/12OmNzxPTGk", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222072", "title": "Multiscale Snapshots: Visual Analysis of Temporal Summaries in Dynamic Graphs", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222072/1nTqwNTE1AQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vds/2020/9284/0/928400a032", "title": "dg2pix: Pixel-Based Visual Analysis of Dynamic Graphs", "doi": null, "abstractUrl": "/proceedings-article/vds/2020/928400a032/1rk0ciCrXQQ", "parentPublication": { "id": "proceedings/vds/2020/9284/0", "title": "2020 IEEE Visualization in Data Science (VDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09599560", "title": "Anomaly Detection in Dynamic Graphs via Transformer", "doi": null, "abstractUrl": "/journal/tk/5555/01/09599560/1yeC6nu6NsA", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06811174", "articleId": "13rRUxNmPDU", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCbCrUN", "title": "Dec.", "year": "2013", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUytWF9j", "doi": "10.1109/TVCG.2013.155", "abstract": "Having effective visualizations of filesystem provenance data is valuable for understanding its complex hierarchical structure. The most common visual representation of provenance data is the node-link diagram. While effective for understanding local activity, the node-link diagram fails to offer a high-level summary of activity and inter-relationships within the data. We present a new tool, InProv, which displays filesystem provenance with an interactive radial-based tree layout. The tool also utilizes a new time-based hierarchical node grouping method for filesystem provenance data we developed to match the user's mental model and make data exploration more intuitive. We compared InProv to a conventional node-link based tool, Orbiter, in a quantitative evaluation with real users of filesystem provenance data including provenance data experts, IT professionals, and computational scientists. We also compared in the evaluation our new node grouping method to a conventional method. The results demonstrate that InProv results in higher accuracy in identifying system activity than Orbiter with large complex data sets. The results also show that our new time-based hierarchical node grouping method improves performance in both tools, and participants found both tools significantly easier to use with the new time-based node grouping method. Subjective measures show that participants found InProv to require less mental activity, less physical activity, less work, and is less stressful to use. Our study also reveals one of the first cases of gender differences in visualization; both genders had comparable performance with InProv, but women had a significantly lower average accuracy (56%) compared to men (70%) with Orbiter.", "abstracts": [ { "abstractType": "Regular", "content": "Having effective visualizations of filesystem provenance data is valuable for understanding its complex hierarchical structure. The most common visual representation of provenance data is the node-link diagram. While effective for understanding local activity, the node-link diagram fails to offer a high-level summary of activity and inter-relationships within the data. We present a new tool, InProv, which displays filesystem provenance with an interactive radial-based tree layout. The tool also utilizes a new time-based hierarchical node grouping method for filesystem provenance data we developed to match the user's mental model and make data exploration more intuitive. We compared InProv to a conventional node-link based tool, Orbiter, in a quantitative evaluation with real users of filesystem provenance data including provenance data experts, IT professionals, and computational scientists. We also compared in the evaluation our new node grouping method to a conventional method. The results demonstrate that InProv results in higher accuracy in identifying system activity than Orbiter with large complex data sets. The results also show that our new time-based hierarchical node grouping method improves performance in both tools, and participants found both tools significantly easier to use with the new time-based node grouping method. Subjective measures show that participants found InProv to require less mental activity, less physical activity, less work, and is less stressful to use. Our study also reveals one of the first cases of gender differences in visualization; both genders had comparable performance with InProv, but women had a significantly lower average accuracy (56%) compared to men (70%) with Orbiter.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Having effective visualizations of filesystem provenance data is valuable for understanding its complex hierarchical structure. The most common visual representation of provenance data is the node-link diagram. While effective for understanding local activity, the node-link diagram fails to offer a high-level summary of activity and inter-relationships within the data. We present a new tool, InProv, which displays filesystem provenance with an interactive radial-based tree layout. The tool also utilizes a new time-based hierarchical node grouping method for filesystem provenance data we developed to match the user's mental model and make data exploration more intuitive. We compared InProv to a conventional node-link based tool, Orbiter, in a quantitative evaluation with real users of filesystem provenance data including provenance data experts, IT professionals, and computational scientists. We also compared in the evaluation our new node grouping method to a conventional method. The results demonstrate that InProv results in higher accuracy in identifying system activity than Orbiter with large complex data sets. The results also show that our new time-based hierarchical node grouping method improves performance in both tools, and participants found both tools significantly easier to use with the new time-based node grouping method. Subjective measures show that participants found InProv to require less mental activity, less physical activity, less work, and is less stressful to use. Our study also reveals one of the first cases of gender differences in visualization; both genders had comparable performance with InProv, but women had a significantly lower average accuracy (56%) compared to men (70%) with Orbiter.", "title": "Evaluation of Filesystem Provenance Visualization Tools", "normalizedTitle": "Evaluation of Filesystem Provenance Visualization Tools", "fno": "ttg2013122476", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Context Awareness", "Layout", "Encoding", "Quantitative Evaluation", "Data Visualization", "Context Awareness", "Layout", "Encoding", "Gender Differences", "Provenance Data", "Graph Network Data", "Hierarchy Data" ], "authors": [ { "givenName": "Michelle A.", "surname": "Borkin", "fullName": "Michelle A. Borkin", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Chelsea S.", "surname": "Yeh", "fullName": "Chelsea S. Yeh", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Madelaine", "surname": "Boyd", "fullName": "Madelaine Boyd", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Peter", "surname": "Macko", "fullName": "Peter Macko", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Krzysztof Z.", "surname": "Gajos", "fullName": "Krzysztof Z. Gajos", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Margo", "surname": "Seltzer", "fullName": "Margo Seltzer", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Hanspeter", "surname": "Pfister", "fullName": "Hanspeter Pfister", "affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2013-12-01 00:00:00", "pubType": "trans", "pages": "2476-2485", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0/07847177", "title": "UVisP: User-centric Visualization of Data Provenance with Gestalt Principles", "doi": null, "abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2016/07847177/12OmNBJNL12", "parentPublication": { "id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0", "title": "2016 IEEE Trustcom/BigDataSE/I​SPA", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccgrid/2015/8006/0/8006a797", "title": "Big Data Provenance Analysis and Visualization", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2015/8006a797/12OmNCm7BK6", "parentPublication": { "id": "proceedings/ccgrid/2015/8006/0", "title": "2015 15th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGrid)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi-t/2010/4233/0/4233a001", "title": "Provenance-Enabled Data Exploration and Visualization with VisTrails", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi-t/2010/4233a001/12OmNzlD9mU", "parentPublication": { "id": "proceedings/sibgrapi-t/2010/4233/0", "title": "2010 23RD SIBGRAPI - Conference on Graphics, Patterns and Images Tutorials", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/03/mcg2015030056", "title": "Analytic Provenance for Sensemaking: A Research Agenda", "doi": null, "abstractUrl": "/magazine/cg/2015/03/mcg2015030056/13rRUB7a13F", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192714", "title": "Characterizing Provenance in Visualization and Data Analysis: An Organizational Framework of Provenance Types and Purposes", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192714/13rRUxOdD2F", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2016/02/07038199", "title": "Dictionary Based Secure Provenance Compression for Wireless Sensor Networks", "doi": null, "abstractUrl": "/journal/td/2016/02/07038199/13rRUyY294p", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122139", "title": "An Extensible Framework for Provenance in Human Terrain Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122139/13rRUyfbwqH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/skg/2018/0441/0/08703970", "title": "A Data Provenance Visualization Approach", "doi": null, "abstractUrl": "/proceedings-article/skg/2018/08703970/19JEc4BmzCw", "parentPublication": { "id": "proceedings/skg/2018/0441/0", "title": "2018 14th International Conference on Semantics, Knowledge and Grids (SKG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2018/0548/0/054800a013", "title": "A Blockchain-Based Scheme for Secure Data Provenance in Wireless Sensor Networks", "doi": null, "abstractUrl": "/proceedings-article/msn/2018/054800a013/19m3oindtGo", "parentPublication": { "id": "proceedings/msn/2018/0548/0", "title": "2018 14th International Conference on Mobile Ad-Hoc and Sensor Networks (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09768153", "title": "Understanding How In-Visualization Provenance Can Support Trade-off Analysis", "doi": null, "abstractUrl": "/journal/tg/5555/01/09768153/1D6qPjvIP16", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013122466", "articleId": "13rRUzphDxX", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013122486", "articleId": "13rRUyft7D3", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYet26", "name": "ttg2013122476s.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122476s.mp4", "extension": "mp4", "size": "8.82 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1HMOit1lSk8", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1uIReQZxty8", "doi": "10.1109/TVCG.2021.3092680", "abstract": "Past studies have shown that when a visualization uses pictographs to encode data, they have a positive effect on memory, engagement, and assessment of risk. However, little is known about how pictographs affect one&#x2019;s ability to understand a visualization, beyond memory for values and trends. We conducted two crowdsourced experiments to compare the effectiveness of using pictographs when showing part-to-whole relationships. In Experiment 1, we compared pictograph arrays to more traditional bar and pie charts. We tested participants&#x2019; ability to generate high-level insights following Bloom&#x2019;s taxonomy of educational objectives via 6 free-response questions. We found that accuracy for extracting information and generating insights did not differ overall between the two versions. To explore the motivating differences between the designs, we conducted a second experiment where participants compared charts containing pictograph arrays to more traditional charts on 5 metrics and explained their reasoning. We found that some participants preferred the way that pictographs allowed them to envision the topic more easily, while others preferred traditional bar and pie charts because they seem less cluttered and faster to read. These results suggest that, at least in simple visualizations depicting part-to-whole relationships, the choice of using pictographs has little influence on sensemaking and insight extraction. When deciding whether to use pictograph arrays, designers should consider visual appeal, perceived comprehension time, ease of envisioning the topic, and clutteredness.", "abstracts": [ { "abstractType": "Regular", "content": "Past studies have shown that when a visualization uses pictographs to encode data, they have a positive effect on memory, engagement, and assessment of risk. However, little is known about how pictographs affect one&#x2019;s ability to understand a visualization, beyond memory for values and trends. We conducted two crowdsourced experiments to compare the effectiveness of using pictographs when showing part-to-whole relationships. In Experiment 1, we compared pictograph arrays to more traditional bar and pie charts. We tested participants&#x2019; ability to generate high-level insights following Bloom&#x2019;s taxonomy of educational objectives via 6 free-response questions. We found that accuracy for extracting information and generating insights did not differ overall between the two versions. To explore the motivating differences between the designs, we conducted a second experiment where participants compared charts containing pictograph arrays to more traditional charts on 5 metrics and explained their reasoning. We found that some participants preferred the way that pictographs allowed them to envision the topic more easily, while others preferred traditional bar and pie charts because they seem less cluttered and faster to read. These results suggest that, at least in simple visualizations depicting part-to-whole relationships, the choice of using pictographs has little influence on sensemaking and insight extraction. When deciding whether to use pictograph arrays, designers should consider visual appeal, perceived comprehension time, ease of envisioning the topic, and clutteredness.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Past studies have shown that when a visualization uses pictographs to encode data, they have a positive effect on memory, engagement, and assessment of risk. However, little is known about how pictographs affect one’s ability to understand a visualization, beyond memory for values and trends. We conducted two crowdsourced experiments to compare the effectiveness of using pictographs when showing part-to-whole relationships. In Experiment 1, we compared pictograph arrays to more traditional bar and pie charts. We tested participants’ ability to generate high-level insights following Bloom’s taxonomy of educational objectives via 6 free-response questions. We found that accuracy for extracting information and generating insights did not differ overall between the two versions. To explore the motivating differences between the designs, we conducted a second experiment where participants compared charts containing pictograph arrays to more traditional charts on 5 metrics and explained their reasoning. We found that some participants preferred the way that pictographs allowed them to envision the topic more easily, while others preferred traditional bar and pie charts because they seem less cluttered and faster to read. These results suggest that, at least in simple visualizations depicting part-to-whole relationships, the choice of using pictographs has little influence on sensemaking and insight extraction. When deciding whether to use pictograph arrays, designers should consider visual appeal, perceived comprehension time, ease of envisioning the topic, and clutteredness.", "title": "Designing With Pictographs: Envision Topics Without Sacrificing Understanding", "normalizedTitle": "Designing With Pictographs: Envision Topics Without Sacrificing Understanding", "fno": "09465643", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Aided Instruction", "Data Visualisation", "Educational Courses", "Human Factors", "User Interfaces", "Part To Whole Relationships", "Participants Compared Charts", "Pictograph Arrays", "Pictographs", "Pie Charts", "Traditional Bar", "Data Visualization", "Visualization", "Taxonomy", "Computer Graphics", "Encoding", "Infographics", "Pictographs", "Design", "Graph Comprehension", "Understanding", "Casual Sensemaking" ], "authors": [ { "givenName": "Alyxander", "surname": "Burns", "fullName": "Alyxander Burns", "affiliation": "College of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Cindy", "surname": "Xiong", "fullName": "Cindy Xiong", "affiliation": "Department of Psychology, Northwestern University, Evanston, IL, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Steven", "surname": "Franconeri", "fullName": "Steven Franconeri", "affiliation": "Department of Psychology, Northwestern University, Evanston, IL, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Alberto", "surname": "Cairo", "fullName": "Alberto Cairo", "affiliation": "School of Communication, University of Miami, Coral Gables, FL, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Narges", "surname": "Mahyar", "fullName": "Narges Mahyar", "affiliation": "College of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, MA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "4515-4530", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-infovis/2001/1342/0/13420113", "title": "Pixel Bar Charts: A New Technique for Visualizing Large Multi-Attribute Data Sets without Aggregation", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2001/13420113/12OmNwE9OuO", "parentPublication": { "id": "proceedings/ieee-infovis/2001/1342/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2014/3922/0/07043983", "title": "Your data deserve better than pies and bars: An R graphics workshop for the timid", "doi": null, "abstractUrl": "/proceedings-article/fie/2014/07043983/12OmNz3bdDC", "parentPublication": { "id": "proceedings/fie/2014/3922/0", "title": "2014 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a148", "title": "Plot Balalaika: Simple Chart Designs for Long-Tail Distributed Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a148/12OmNzBOi4c", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1240", "title": "Animated Transitions in Statistical Data Graphics", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1240/13rRUxNW1TO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/04/v0822", "title": "Value-Cell Bar Charts for Visualizing Large Transaction Data Sets", "doi": null, "abstractUrl": "/journal/tg/2007/04/v0822/13rRUygBw71", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a151", "title": "The Cost of Pie Charts", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933547", "title": "Evidence for Area as the Primary Visual Cue in Pie Charts", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933547/1fTgFhkepQk", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933718", "title": "Visual Cues in Estimation of Part-To-Whole Comparisons", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933718/1fTgJRMhWoM", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552227", "title": "Rotate or Wrap? Interactive Visualisations of Cyclical Data on Cylindrical or Toroidal Topologies", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552227/1xibX4wTR8Q", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552881", "title": "Modeling Just Noticeable Differences in Charts", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09465688", "articleId": "1uIReC9hVQY", "__typename": "AdjacentArticleType" }, "next": { "fno": "09468958", "articleId": "1uR9IWtyEi4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1HMOz8zGswM", "name": "ttg202212-09465643s1-tvcg-3092680-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202212-09465643s1-tvcg-3092680-mm.zip", "extension": "zip", "size": "2.99 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNARRYtd", "title": "December", "year": "2011", "issueNum": "12", "idPrefix": "tp", "pubType": "journal", "volume": "33", "label": "December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwvBy9X", "doi": "10.1109/TPAMI.2011.86", "abstract": "Simultaneous estimation of radial distortion, epipolar geometry, and relative camera pose can be formulated as a minimal problem and solved from a minimal number of image points. Finding the solution to this problem leads to solving a system of algebraic equations. In this paper, we provide two different solutions to the problem of estimating radial distortion and epipolar geometry from eight point correspondences in two images. Unlike previous algorithms which were able to solve the problem from nine correspondences only, we enforce the determinant of the fundamental matrix be zero. This leads to a system of eight quadratic and one cubic equation in nine variables. We first simplify this system by eliminating six of these variables and then solve the system by two alternative techniques. The first one is based on the Gröbner basis method and the second one on the polynomial eigenvalue computation. We demonstrate that our solutions are efficient, robust, and practical by experiments on synthetic and real data.", "abstracts": [ { "abstractType": "Regular", "content": "Simultaneous estimation of radial distortion, epipolar geometry, and relative camera pose can be formulated as a minimal problem and solved from a minimal number of image points. Finding the solution to this problem leads to solving a system of algebraic equations. In this paper, we provide two different solutions to the problem of estimating radial distortion and epipolar geometry from eight point correspondences in two images. Unlike previous algorithms which were able to solve the problem from nine correspondences only, we enforce the determinant of the fundamental matrix be zero. This leads to a system of eight quadratic and one cubic equation in nine variables. We first simplify this system by eliminating six of these variables and then solve the system by two alternative techniques. The first one is based on the Gröbner basis method and the second one on the polynomial eigenvalue computation. We demonstrate that our solutions are efficient, robust, and practical by experiments on synthetic and real data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Simultaneous estimation of radial distortion, epipolar geometry, and relative camera pose can be formulated as a minimal problem and solved from a minimal number of image points. Finding the solution to this problem leads to solving a system of algebraic equations. In this paper, we provide two different solutions to the problem of estimating radial distortion and epipolar geometry from eight point correspondences in two images. Unlike previous algorithms which were able to solve the problem from nine correspondences only, we enforce the determinant of the fundamental matrix be zero. This leads to a system of eight quadratic and one cubic equation in nine variables. We first simplify this system by eliminating six of these variables and then solve the system by two alternative techniques. The first one is based on the Gröbner basis method and the second one on the polynomial eigenvalue computation. We demonstrate that our solutions are efficient, robust, and practical by experiments on synthetic and real data.", "title": "A Minimal Solution to Radial Distortion Autocalibration", "normalizedTitle": "A Minimal Solution to Radial Distortion Autocalibration", "fno": "ttp2011122410", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Minimal Problems", "Radial Distortion", "Grobner Bases", "Polynomial Eigenvalue Problems" ], "authors": [ { "givenName": "Zuzana", "surname": "Kukelova", "fullName": "Zuzana Kukelova", "affiliation": "Czech Technical University in Prague, Prague", "__typename": "ArticleAuthorType" }, { "givenName": "Tomas", "surname": "Pajdla", "fullName": "Tomas Pajdla", "affiliation": "Czech Technical University in Prague, Prague", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2011-12-01 00:00:00", "pubType": "trans", "pages": "2410-2422", "year": "2011", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2007/1630/0/04409190", "title": "Two Minimal Problems for Cameras with Radial Distortion", "doi": null, "abstractUrl": "/proceedings-article/iccv/2007/04409190/12OmNBscD31", "parentPublication": { "id": "proceedings/iccv/2007/1630/0", "title": "2007 11th IEEE International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2007/1179/0/04270088", "title": "A minimal solution to the autocalibration of radial distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2007/04270088/12OmNrYCXOy", "parentPublication": { "id": "proceedings/cvpr/2007/1179/0", "title": "2007 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2013/2840/0/2840c816", "title": "Real-Time Solution to the Absolute Pose Problem with Unknown Radial Distortion and Focal Length", "doi": null, "abstractUrl": "/proceedings-article/iccv/2013/2840c816/12OmNrkT7wu", "parentPublication": { "id": "proceedings/iccv/2013/2840/0", "title": "2013 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2013/4989/0/4989b368", "title": "Radial Distortion Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989b368/12OmNvqmUGe", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/1/07501423", "title": "Violating Rotating Camera Geometry: The Effect of Radial Distortion on Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07501423/12OmNyQYtkT", "parentPublication": { "id": "proceedings/icpr/2000/0750/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2015/6964/0/07298663", "title": "Radial distortion homography", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298663/12OmNzBOic1", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587674", "title": "Fast and robust numerical solutions to minimal problems for cameras with radial distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587674/12OmNzQzqh1", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300j673", "title": "Radial Distortion Triangulation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300j673/1gyrQv2dZvO", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900d663", "title": "Fast Solvers for Minimal Radial Distortion Relative Pose Problems", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900d663/1yJYuu3ZLpK", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttp2011122396", "articleId": "13rRUwcAqrl", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttp2011122423", "articleId": "13rRUxC0Sx2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgyA", "name": "ttp2011122410s.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp2011122410s.pdf", "extension": "pdf", "size": "89.2 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNBCZnU9", "title": "August", "year": "2007", "issueNum": "08", "idPrefix": "tp", "pubType": "journal", "volume": "29", "label": "August", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASuqt", "doi": "10.1109/TPAMI.2007.1147", "abstract": "We propose a method of simultaneously calibrating the radial distortion function of a camera and the other internal calibration parameters. The method relies on the use of a planar (or, alternatively, nonplanar) calibration grid which is captured in several images. In this way, the determination of the radial distortion is an easy add-on to the popular calibration method proposed by Zhang [24]. The method is entirely noniterative and, hence, is extremely rapid and immune to the problem of local minima. Our method determines the radial distortion in a parameter-free way, not relying on any particular radial distortion model. This makes it applicable to a large range of cameras from narrow-angle to fish-eye lenses. The method also computes the center of radial distortion, which, we argue, is important in obtaining optimal results. Experiments show that this point may be significantly displaced from the center of the image or the principal point of the camera.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a method of simultaneously calibrating the radial distortion function of a camera and the other internal calibration parameters. The method relies on the use of a planar (or, alternatively, nonplanar) calibration grid which is captured in several images. In this way, the determination of the radial distortion is an easy add-on to the popular calibration method proposed by Zhang [24]. The method is entirely noniterative and, hence, is extremely rapid and immune to the problem of local minima. Our method determines the radial distortion in a parameter-free way, not relying on any particular radial distortion model. This makes it applicable to a large range of cameras from narrow-angle to fish-eye lenses. The method also computes the center of radial distortion, which, we argue, is important in obtaining optimal results. Experiments show that this point may be significantly displaced from the center of the image or the principal point of the camera.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a method of simultaneously calibrating the radial distortion function of a camera and the other internal calibration parameters. The method relies on the use of a planar (or, alternatively, nonplanar) calibration grid which is captured in several images. In this way, the determination of the radial distortion is an easy add-on to the popular calibration method proposed by Zhang [24]. The method is entirely noniterative and, hence, is extremely rapid and immune to the problem of local minima. Our method determines the radial distortion in a parameter-free way, not relying on any particular radial distortion model. This makes it applicable to a large range of cameras from narrow-angle to fish-eye lenses. The method also computes the center of radial distortion, which, we argue, is important in obtaining optimal results. Experiments show that this point may be significantly displaced from the center of the image or the principal point of the camera.", "title": "Parameter-Free Radial Distortion Correction with Center of Distortion Estimation", "normalizedTitle": "Parameter-Free Radial Distortion Correction with Center of Distortion Estimation", "fno": "i1309", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Cameras", "Lenses", "Calibration", "Polynomials", "Iterative Methods", "Convergence", "Cost Function", "Optical Distortion", "Transmission Line Matrix Methods", "Image Analysis", "Fundamental Matrix", "Radial Distortion", "Camera Calibration" ], "authors": [ { "givenName": "Richard", "surname": "Hartley", "fullName": "Richard Hartley", "affiliation": "Australian Nat. Univ., Canberra", "__typename": "ArticleAuthorType" }, { "givenName": "Sing Bing", "surname": "Kang", "fullName": "Sing Bing Kang", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2007-08-01 00:00:00", "pubType": "trans", "pages": "1309-1321", "year": "2007", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2009/3992/0/05206756", "title": "Pose estimation with radial distortion and unknown focal length", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206756/12OmNqG0SQX", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118a033", "title": "Minimal Solvers for Relative Pose with a Single Unknown Radial Distortion", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a033/12OmNrFkeSu", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isme/2010/7669/2/05573869", "title": "A New Camera Calibration Method Based on Two Stages Distortion Model", "doi": null, "abstractUrl": "/proceedings-article/isme/2010/05573869/12OmNwBT1lH", "parentPublication": { "id": "proceedings/isme/2010/7669/2", "title": "2010 International Conference of Information Science and Management Engineering. ISME 2010", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163149", "title": "Correcting radial and perspective distortion by using face shape information", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163149/12OmNyKJilp", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2000/0750/1/07501423", "title": "Violating Rotating Camera Geometry: The Effect of Radial Distortion on Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2000/07501423/12OmNyQYtkT", "parentPublication": { "id": "proceedings/icpr/2000/0750/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c345", "title": "On the Equivalence of Moving Entrance Pupil and Radial Distortion for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c345/12OmNyshmIc", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2014/4337/0/4337a195", "title": "Camera Matrix Calibration Using Circular Control Points and Separate Correction of the Geometric Distortion Field", "doi": null, "abstractUrl": "/proceedings-article/crv/2014/4337a195/12OmNzWx0bU", "parentPublication": { "id": "proceedings/crv/2014/4337/0", "title": "2014 Canadian Conference on Computer and Robot Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h718", "title": "RDCFace: Radial Distortion Correction for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h718/1m3n9WusSUo", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700b750", "title": "Efficient Real-Time Radial Distortion Correction for UAVs", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700b750/1uqGOZv9gly", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "04250458", "articleId": "13rRUyfbwrV", "__typename": "AdjacentArticleType" }, "next": { "fno": "i1322", "articleId": "13rRUEgs2D4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwc3wwx", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tq", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1Fu4UX6IYuI", "doi": "10.1109/TDSC.2022.3193423", "abstract": "Recently, almost all the proposed adaptive video steganographic schemes are based on minimizing an additive embedding distortion. However, they ignore the hard fact that the additive embedding distortion is not quite suitable for video steganography because of the interplay of cover elements in video steganography. In this paper, an adaptive intra prediction mode based (IPM-based) video steganography is proposed by minimizing the non-additive distortion in HEVC. To reduce the complexity of minimizing the non-additive distortion, a multi-layered embedding structure combined with a proposed embed-ding distortion updating strategy is adopted to approximate the non-additive distortion in an additive form. Firstly, all IPMs are decomposed into multiple layers based on the distortion drift graph to offer multi-layered embedding. Each IPM in the same layer is considered to be independent, and syndrome-trellis code (STC) can be applied to embed the message segment into each layer with an additive distortion function sequentially. Then, a distortion function composed of self-distortion and drift-distortion is proposed to initialize the distortion of modifying each IPM. Finally, after embedding the first message segment into the IPMs in the first layer with the initialized distortions, an embedding distortion updating strategy is applied to update the distortions of the IPMs in the remaining layers dynamically. Ex-perimental results demonstrate that the proposed adaptive IPM-based video steganography can achieve much better perceptual quality and security performance than the state-of-the-art.", "abstracts": [ { "abstractType": "Regular", "content": "Recently, almost all the proposed adaptive video steganographic schemes are based on minimizing an additive embedding distortion. However, they ignore the hard fact that the additive embedding distortion is not quite suitable for video steganography because of the interplay of cover elements in video steganography. In this paper, an adaptive intra prediction mode based (IPM-based) video steganography is proposed by minimizing the non-additive distortion in HEVC. To reduce the complexity of minimizing the non-additive distortion, a multi-layered embedding structure combined with a proposed embed-ding distortion updating strategy is adopted to approximate the non-additive distortion in an additive form. Firstly, all IPMs are decomposed into multiple layers based on the distortion drift graph to offer multi-layered embedding. Each IPM in the same layer is considered to be independent, and syndrome-trellis code (STC) can be applied to embed the message segment into each layer with an additive distortion function sequentially. Then, a distortion function composed of self-distortion and drift-distortion is proposed to initialize the distortion of modifying each IPM. Finally, after embedding the first message segment into the IPMs in the first layer with the initialized distortions, an embedding distortion updating strategy is applied to update the distortions of the IPMs in the remaining layers dynamically. Ex-perimental results demonstrate that the proposed adaptive IPM-based video steganography can achieve much better perceptual quality and security performance than the state-of-the-art.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recently, almost all the proposed adaptive video steganographic schemes are based on minimizing an additive embedding distortion. However, they ignore the hard fact that the additive embedding distortion is not quite suitable for video steganography because of the interplay of cover elements in video steganography. In this paper, an adaptive intra prediction mode based (IPM-based) video steganography is proposed by minimizing the non-additive distortion in HEVC. To reduce the complexity of minimizing the non-additive distortion, a multi-layered embedding structure combined with a proposed embed-ding distortion updating strategy is adopted to approximate the non-additive distortion in an additive form. Firstly, all IPMs are decomposed into multiple layers based on the distortion drift graph to offer multi-layered embedding. Each IPM in the same layer is considered to be independent, and syndrome-trellis code (STC) can be applied to embed the message segment into each layer with an additive distortion function sequentially. Then, a distortion function composed of self-distortion and drift-distortion is proposed to initialize the distortion of modifying each IPM. Finally, after embedding the first message segment into the IPMs in the first layer with the initialized distortions, an embedding distortion updating strategy is applied to update the distortions of the IPMs in the remaining layers dynamically. Ex-perimental results demonstrate that the proposed adaptive IPM-based video steganography can achieve much better perceptual quality and security performance than the state-of-the-art.", "title": "An Adaptive IPM-based HEVC Video Steganography Via Minimizing Non-additive Distortion", "normalizedTitle": "An Adaptive IPM-based HEVC Video Steganography Via Minimizing Non-additive Distortion", "fno": "09847125", "hasPdf": true, "idPrefix": "tq", "keywords": [ "Distortion", "Steganography", "Encoding", "Additives", "Standards", "Visualization", "Video Coding", "HEVC", "Video Steganography", "Intra Prediction Mode", "Non Additive Distortion" ], "authors": [ { "givenName": "Jie", "surname": "Wang", "fullName": "Jie Wang", "affiliation": "Guangdong Key Laboratory of Information Security, School of Com- puter Science and Engineering, Sun Yat-Sen University, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xuemei", "surname": "Yin", "fullName": "Xuemei Yin", "affiliation": "Guangdong Key Laboratory of Information Security, School of Com- puter Science and Engineering, Sun Yat-Sen University, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yifang", "surname": "Chen", "fullName": "Yifang Chen", "affiliation": "Guangdong Key Laboratory of Information Security, School of Com- puter Science and Engineering, Sun Yat-Sen University, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jiwu", "surname": "Huang", "fullName": "Jiwu Huang", "affiliation": "Guangdong Key Laboratory of Information Security, School of Com- puter Science and Engineering, Sun Yat-Sen University, Guangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiangui", "surname": "Kang", "fullName": "Xiangui Kang", "affiliation": "Guangdong Key Laboratory of Information Security, School of Com- puter Science and Engineering, Sun Yat-Sen University, Guangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-08-01 00:00:00", "pubType": "trans", "pages": "1-18", "year": "5555", "issn": "1545-5971", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/mines/2011/4559/0/4559a489", "title": "A Blind Detection Method for Additive Noise Steganography in JPEG Decompressed Images", "doi": null, "abstractUrl": "/proceedings-article/mines/2011/4559a489/12OmNCesra2", "parentPublication": { "id": "proceedings/mines/2011/4559/0", "title": "Multimedia Information Networking and Security, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2010/4222/0/4222a212", "title": "Controlled Distortion for High Capacity Data-in-Speech Spectrum Steganography", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2010/4222a212/12OmNx3q6YR", "parentPublication": { "id": "proceedings/iih-msp/2010/4222/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2017/6721/0/07921914", "title": "Cluster Adapted Signalling for Intra Prediction in HEVC", "doi": null, "abstractUrl": "/proceedings-article/dcc/2017/07921914/12OmNy314iz", "parentPublication": { "id": "proceedings/dcc/2017/6721/0", "title": "2017 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2023/01/09684978", "title": "Adaptive HEVC Steganography Based on Steganographic Compression Efficiency Degradation Model", "doi": null, "abstractUrl": "/journal/tq/2023/01/09684978/1Ai9zGHz6X6", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2021/1658/0/165800a967", "title": "A HEVC Video Steganography Algorithm Based on DCT/DST Coefficients with Improved VRCNN", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2021/165800a967/1BBzyBcgInu", "parentPublication": { "id": "proceedings/trustcom/2021/1658/0", "title": "2021 IEEE 20th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/5555/01/09792434", "title": "Cover Selection for Steganography Using Image Similarity", "doi": null, "abstractUrl": "/journal/tq/5555/01/09792434/1E5LHzF4b28", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/5555/01/09795134", "title": "NACA: A Joint Distortion-Based Non-Additive Cost Assignment Method for Video Steganography", "doi": null, "abstractUrl": "/journal/tq/5555/01/09795134/1Ebf9FWHOlW", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/5555/01/09931443", "title": "Cover Reproducible Steganography via Deep Generative Models", "doi": null, "abstractUrl": "/journal/tq/5555/01/09931443/1HOtnz2xJKM", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2022/04/09351637", "title": "DDCA: A Distortion Drift-Based Cost Assignment Method for Adaptive Video Steganography in the Transform Domain", "doi": null, "abstractUrl": "/journal/tq/2022/04/09351637/1r51eygqSSA", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2023/01/09672694", "title": "An Anti-Steganalysis HEVC Video Steganography With High Performance Based on CNN and PU Partition Modes", "doi": null, "abstractUrl": "/journal/tq/2023/01/09672694/1zWzNpFRqW4", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09847046", "articleId": "1Fu4UmoNdzq", "__typename": "AdjacentArticleType" }, "next": { "fno": "09846892", "articleId": "1Fu4V7Vt6Ra", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAkEU38", "title": "Oct.", "year": "2013", "issueNum": "10", "idPrefix": "tk", "pubType": "journal", "volume": "25", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxNmPEg", "doi": "10.1109/TKDE.2012.178", "abstract": "A graph is called hidden if the edges are not explicitly given and edge probe tests are required to detect the presence of edges. This paper studies the Z_$(k)$_Z most connected vertices (Z_$(k)$_ZMCV) problem on hidden bipartite graphs, which has applications in spatial databases, graph databases, and bioinformatics. There is a prior work on the Z_$(k)$_ZMCV problem, which is based on the \"2-vertex testing\" model, i.e., an edge probe test can only reveal the existence of an edge between two individual vertices. We study the Z_$(k)$_ZMCV problem, in the context of a more general edge probe test model called \"group testing.\" A group test can reveal whether there exists some edge between a vertex and a group of vertices. If group testing is used properly, a single invocation of a group test can reveal as much information as multiple invocations of 2-vertex tests. We discuss the cases and applications where group testing could be used, and present an algorithm, namely, GMCV, that adaptively leverages group testing to solve the Z_$(k)$_ZMCV problem.", "abstracts": [ { "abstractType": "Regular", "content": "A graph is called hidden if the edges are not explicitly given and edge probe tests are required to detect the presence of edges. This paper studies the $(k)$ most connected vertices ($(k)$MCV) problem on hidden bipartite graphs, which has applications in spatial databases, graph databases, and bioinformatics. There is a prior work on the $(k)$MCV problem, which is based on the \"2-vertex testing\" model, i.e., an edge probe test can only reveal the existence of an edge between two individual vertices. We study the $(k)$MCV problem, in the context of a more general edge probe test model called \"group testing.\" A group test can reveal whether there exists some edge between a vertex and a group of vertices. If group testing is used properly, a single invocation of a group test can reveal as much information as multiple invocations of 2-vertex tests. We discuss the cases and applications where group testing could be used, and present an algorithm, namely, GMCV, that adaptively leverages group testing to solve the $(k)$MCV problem.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A graph is called hidden if the edges are not explicitly given and edge probe tests are required to detect the presence of edges. This paper studies the - most connected vertices (-MCV) problem on hidden bipartite graphs, which has applications in spatial databases, graph databases, and bioinformatics. There is a prior work on the -MCV problem, which is based on the \"2-vertex testing\" model, i.e., an edge probe test can only reveal the existence of an edge between two individual vertices. We study the -MCV problem, in the context of a more general edge probe test model called \"group testing.\" A group test can reveal whether there exists some edge between a vertex and a group of vertices. If group testing is used properly, a single invocation of a group test can reveal as much information as multiple invocations of 2-vertex tests. We discuss the cases and applications where group testing could be used, and present an algorithm, namely, GMCV, that adaptively leverages group testing to solve the -MCV problem.", "title": "Identifying the Most Connected Vertices in Hidden Bipartite Graphs Using Group Testing", "normalizedTitle": "Identifying the Most Connected Vertices in Hidden Bipartite Graphs Using Group Testing", "fno": "ttk2013102245", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Testing", "Probes", "Image Edge Detection", "Bipartite Graph", "Proteins", "Bioinformatics", "Switches", "Query Processing", "Testing", "Probes", "Image Edge Detection", "Bipartite Graph", "Proteins", "Bioinformatics", "Switches", "Graphs And Networks" ], "authors": [ { "givenName": "Jianguo", "surname": "Wang", "fullName": "Jianguo Wang", "affiliation": "The Hong Kong Polytechnic University, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Eric", "surname": "Lo", "fullName": "Eric Lo", "affiliation": "The Hong Kong Polytechnic University, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Man Lung", "surname": "Yiu", "fullName": "Man Lung Yiu", "affiliation": "The Hong Kong Polytechnic University, Hong Kong", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2013-10-01 00:00:00", "pubType": "trans", "pages": "2245-2256", "year": "2013", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bibm/2011/1799/0/06120450", "title": "Prediction and Evaluation of miRNA -- Target Gene Pairs Using K-means Clustering and Bipartite Graphs with Statistical Scoring", "doi": null, "abstractUrl": "/proceedings-article/bibm/2011/06120450/12OmNvF83m8", "parentPublication": { "id": "proceedings/bibm/2011/1799/0", "title": "2011 IEEE International Conference on Bioinformatics and Biomedicine", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2016/3682/0/3682a480", "title": "Stable Matching Beyond Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2016/3682a480/12OmNzC5T5q", "parentPublication": { "id": "proceedings/ipdpsw/2016/3682/0", "title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2017/06/07513454", "title": "Multi-Block Bipartite Graph for Integrative Genomic Analysis", "doi": null, "abstractUrl": "/journal/tb/2017/06/07513454/13rRUxBJhtK", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2018/7649/0/764900a132", "title": "Finding Maximum Edge Biclique in Bipartite Networks by Integer Programming", "doi": null, "abstractUrl": "/proceedings-article/cse/2018/764900a132/17D45Wda7h6", "parentPublication": { "id": "proceedings/cse/2018/7649/0", "title": "2018 IEEE International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2022/9747/0/974700a304", "title": "Families of Butterfly Counting Algorithms for Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2022/974700a304/1Fu9kUXc6S4", "parentPublication": { "id": "proceedings/ipdpsw/2022/9747/0", "title": "2022 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2022/0883/0/088300a898", "title": "Maximum Biplex Search over Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300a898/1FwFgG7iniE", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/5555/01/10093949", "title": "Splitting Vertices in 2-Layer Graph Drawings", "doi": null, "abstractUrl": "/magazine/cg/5555/01/10093949/1M80M7nF2Mg", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08423100", "title": "The Effect of Edge Bundling and Seriation on Sensemaking of Biclusters in Bipartite Graphs", "doi": null, "abstractUrl": "/journal/tg/2019/10/08423100/1d3e5UbWqis", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2020/2903/0/09101358", "title": "Efficient Bitruss Decomposition for Large-scale Bipartite Graphs", "doi": null, "abstractUrl": "/proceedings-article/icde/2020/09101358/1kaMKNvItzy", "parentPublication": { "id": "proceedings/icde/2020/2903/0", "title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2020/9621/0/962100a412", "title": "Edge-Weighted Online Bipartite Matching", "doi": null, "abstractUrl": "/proceedings-article/focs/2020/962100a412/1qyxvL8VZcc", "parentPublication": { "id": "proceedings/focs/2020/9621/0", "title": "2020 IEEE 61st Annual Symposium on Foundations of Computer Science (FOCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttk2013102231", "articleId": "13rRUxC0SEy", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttk2013102257", "articleId": "13rRUxNW1Ue", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRMm", "name": "ttk2013102245s1.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttk2013102245s1.pdf", "extension": "pdf", "size": "58.7 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1Ax5KStiZmU", "title": "March", "year": "2022", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1mhPPdfu11u", "doi": "10.1109/TVCG.2020.3016645", "abstract": "Triangle meshes are used in many important shape-related applications including geometric modeling, animation production, system simulation, and visualization. However, these meshes are typically generated in raw form with several defects and poor-quality elements, obstructing them from practical application. Over the past decades, different surface remeshing techniques have been presented to improve these poor-quality meshes prior to the downstream utilization. A typical surface remeshing algorithm converts an input mesh into a higher quality mesh with consideration of given quality requirements as well as an acceptable approximation to the input mesh. In recent years, surface remeshing has gained significant attention from researchers and engineers, and several remeshing algorithms have been proposed. However, there has been no survey article on remeshing methods in general with a defined search strategy and article selection mechanism covering the recent approaches in surface remeshing domain with a good connection to classical approaches. In this article, we present a survey on surface remeshing techniques, classifying all collected articles in different categories and analyzing specific methods with their advantages, disadvantages, and possible future improvements. Following the systematic literature review methodology, we define step-by-step guidelines throughout the review process, including search strategy, literature inclusion/exclusion criteria, article quality assessment, and data extraction. With the aim of literature collection and classification based on data extraction, we summarized collected articles, considering the key remeshing objectives, the way the mesh quality is defined and improved, and the way their techniques are compared with other previous methods. Remeshing objectives are described by angle range control, feature preservation, error control, valence optimization, and remeshing compatibility. The metrics used in the literature for the evaluation of surface remeshing algorithms are discussed. Meshing techniques are compared with other related methods via a comprehensive table with indices of the method name, the remeshing challenge met and solved, the category the method belongs to, and the year of publication. We expect this survey to be a practical reference for surface remeshing in terms of literature classification, method analysis, and future prospects.", "abstracts": [ { "abstractType": "Regular", "content": "Triangle meshes are used in many important shape-related applications including geometric modeling, animation production, system simulation, and visualization. However, these meshes are typically generated in raw form with several defects and poor-quality elements, obstructing them from practical application. Over the past decades, different surface remeshing techniques have been presented to improve these poor-quality meshes prior to the downstream utilization. A typical surface remeshing algorithm converts an input mesh into a higher quality mesh with consideration of given quality requirements as well as an acceptable approximation to the input mesh. In recent years, surface remeshing has gained significant attention from researchers and engineers, and several remeshing algorithms have been proposed. However, there has been no survey article on remeshing methods in general with a defined search strategy and article selection mechanism covering the recent approaches in surface remeshing domain with a good connection to classical approaches. In this article, we present a survey on surface remeshing techniques, classifying all collected articles in different categories and analyzing specific methods with their advantages, disadvantages, and possible future improvements. Following the systematic literature review methodology, we define step-by-step guidelines throughout the review process, including search strategy, literature inclusion/exclusion criteria, article quality assessment, and data extraction. With the aim of literature collection and classification based on data extraction, we summarized collected articles, considering the key remeshing objectives, the way the mesh quality is defined and improved, and the way their techniques are compared with other previous methods. Remeshing objectives are described by angle range control, feature preservation, error control, valence optimization, and remeshing compatibility. The metrics used in the literature for the evaluation of surface remeshing algorithms are discussed. Meshing techniques are compared with other related methods via a comprehensive table with indices of the method name, the remeshing challenge met and solved, the category the method belongs to, and the year of publication. We expect this survey to be a practical reference for surface remeshing in terms of literature classification, method analysis, and future prospects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Triangle meshes are used in many important shape-related applications including geometric modeling, animation production, system simulation, and visualization. However, these meshes are typically generated in raw form with several defects and poor-quality elements, obstructing them from practical application. Over the past decades, different surface remeshing techniques have been presented to improve these poor-quality meshes prior to the downstream utilization. A typical surface remeshing algorithm converts an input mesh into a higher quality mesh with consideration of given quality requirements as well as an acceptable approximation to the input mesh. In recent years, surface remeshing has gained significant attention from researchers and engineers, and several remeshing algorithms have been proposed. However, there has been no survey article on remeshing methods in general with a defined search strategy and article selection mechanism covering the recent approaches in surface remeshing domain with a good connection to classical approaches. In this article, we present a survey on surface remeshing techniques, classifying all collected articles in different categories and analyzing specific methods with their advantages, disadvantages, and possible future improvements. Following the systematic literature review methodology, we define step-by-step guidelines throughout the review process, including search strategy, literature inclusion/exclusion criteria, article quality assessment, and data extraction. With the aim of literature collection and classification based on data extraction, we summarized collected articles, considering the key remeshing objectives, the way the mesh quality is defined and improved, and the way their techniques are compared with other previous methods. Remeshing objectives are described by angle range control, feature preservation, error control, valence optimization, and remeshing compatibility. The metrics used in the literature for the evaluation of surface remeshing algorithms are discussed. Meshing techniques are compared with other related methods via a comprehensive table with indices of the method name, the remeshing challenge met and solved, the category the method belongs to, and the year of publication. We expect this survey to be a practical reference for surface remeshing in terms of literature classification, method analysis, and future prospects.", "title": "Surface Remeshing: A Systematic Literature Review of Methods and Research Directions", "normalizedTitle": "Surface Remeshing: A Systematic Literature Review of Methods and Research Directions", "fno": "09167456", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Geometry", "Computer Animation", "Computer Graphics", "Mesh Generation", "Reviews", "Quality Requirements", "Survey Article", "Article Selection Mechanism", "Collected Articles", "Systematic Literature Review Methodology", "Article Quality Assessment", "Literature Collection", "Classification", "Mesh Quality", "Remeshing Compatibility", "Surface Remeshing Algorithms", "Meshing Techniques", "Remeshing Challenge", "Triangle Meshes", "Geometric Modeling Animation Production", "Poor Quality Elements", "Surface Remeshing Techniques", "Poor Quality Meshes", "Shape Related Applications", "Systematics", "Data Mining", "Quality Assessment", "Libraries", "Search Problems", "Three Dimensional Displays", "Solid Modeling", "Mesh Generation", "Surface Remeshing", "Meshing Quality", "Finite Element Method", "Systematic Literature Review" ], "authors": [ { "givenName": "Dawar", "surname": "Khan", "fullName": "Dawar Khan", "affiliation": "IMD Lab, Information Science, Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Alexander", "surname": "Plopski", "fullName": "Alexander Plopski", "affiliation": "IMD Lab, Information Science, Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Yuichiro", "surname": "Fujimoto", "fullName": "Yuichiro Fujimoto", "affiliation": "IMD Lab, Information Science, Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Masayuki", "surname": "Kanbara", "fullName": "Masayuki Kanbara", "affiliation": "IMD Lab, Information Science, Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Gul", "surname": "Jabeen", "fullName": "Gul Jabeen", "affiliation": "School of Software Engineering, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yongjie Jessica", "surname": "Zhang", "fullName": "Yongjie Jessica Zhang", "affiliation": "Department of Mechanical Engineering, Carnegie Mellon University (CMU), Pittsburgh, PA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaopeng", "surname": "Zhang", "fullName": "Xiaopeng Zhang", "affiliation": "NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hirokazu", "surname": "Kato", "fullName": "Hirokazu Kato", "affiliation": "IMD Lab, Information Science, Nara Institute of Science and Technology, Nara, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2022-03-01 00:00:00", "pubType": "trans", "pages": "1680-1713", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/apsec/2017/3681/0/3681a041", "title": "Text-Mining Techniques and Tools for Systematic Literature Reviews: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/apsec/2017/3681a041/12OmNBlofPT", "parentPublication": { "id": "proceedings/apsec/2017/3681/0", "title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gmp/2000/0562/0/05620220", "title": "Using Most Isometric Parametrizations for Remeshing Polygonal Surfaces", "doi": null, "abstractUrl": "/proceedings-article/gmp/2000/05620220/12OmNwl8GJx", "parentPublication": { "id": "proceedings/gmp/2000/0562/0", "title": "Geometric Modeling and Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/esem/2013/5056/0/5056a203", "title": "Identifying Barriers to the Systematic Literature Review Process", "doi": null, "abstractUrl": "/proceedings-article/esem/2013/5056a203/12OmNykCcch", "parentPublication": { "id": "proceedings/esem/2013/5056/0", "title": "2013 ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2010/7259/0/05521462", "title": "Reversely Anisotropic Quad-dominant Remeshing", "doi": null, "abstractUrl": "/proceedings-article/smi/2010/05521462/12OmNywxlVm", "parentPublication": { "id": "proceedings/smi/2010/7259/0", "title": "Shape Modeling International (SMI 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2010/8420/0/05720352", "title": "Template-Based Remeshing for Image Decomposition", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2010/05720352/12OmNzVXNNm", "parentPublication": { "id": "proceedings/sibgrapi/2010/8420/0", "title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smi/2003/1845/0/18450049", "title": "Isotropic Surface Remeshing", "doi": null, "abstractUrl": "/proceedings-article/smi/2003/18450049/12OmNzZmZvh", "parentPublication": { "id": "proceedings/smi/2003/1845/0", "title": "Shape Modeling and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/07/08361045", "title": "Isotropic Surface Remeshing without Large and Small Angles", "doi": null, "abstractUrl": "/journal/tg/2019/07/08361045/13rRUIM2VBN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978684", "title": "Adaptively Isotropic Remeshing based on Curvature Smoothed Field", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978684/1IXUnEM2oc8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seaa/2019/3421/0/342100a379", "title": "Non-functional Requirements Prioritization: A Systematic Literature Review", "doi": null, "abstractUrl": "/proceedings-article/seaa/2019/342100a379/1f8MK3GXuYo", "parentPublication": { "id": "proceedings/seaa/2019/3421/0", "title": "2019 45th Euromicro Conference on Software Engineering and Advanced Applications (SEAA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2005/2372/2/01467592", "title": "Geodesic computation for adaptive remeshing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2005/01467592/1htC5ZzzhkI", "parentPublication": { "id": "proceedings/cvpr/2005/2372/2", "title": "2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09189859", "articleId": "1mYZha0scxO", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1J9y2mtpt3a", "title": "Jan.", "year": "2023", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1H5F2wJXT4Q", "doi": "10.1109/TVCG.2022.3209360", "abstract": "The last decade has witnessed many visual analytics (VA) systems that make successful applications to wide-ranging domains like urban analytics and explainable AI. However, their research rigor and contributions have been extensively challenged within the visualization community. We come in defence of VA systems by contributing two interview studies for gathering critics and responses to those criticisms. First, we interview 24 researchers to collect criticisms the review comments on their VA work. Through an iterative coding and refinement process, the interview feedback is summarized into a list of 36 common criticisms. Second, we interview 17 researchers to validate our list and collect their responses, thereby discussing implications for defending and improving the scientific values and rigor of VA systems. We highlight that the presented knowledge is deep, extensive, but also imperfect, provocative, and controversial, and thus recommend reading with an inclusive and critical eye. We hope our work can provide thoughts and foundations for conducting VA research and spark discussions to promote the research field forward more rigorously and vibrantly.", "abstracts": [ { "abstractType": "Regular", "content": "The last decade has witnessed many visual analytics (VA) systems that make successful applications to wide-ranging domains like urban analytics and explainable AI. However, their research rigor and contributions have been extensively challenged within the visualization community. We come in defence of VA systems by contributing two interview studies for gathering critics and responses to those criticisms. First, we interview 24 researchers to collect criticisms the review comments on their VA work. Through an iterative coding and refinement process, the interview feedback is summarized into a list of 36 common criticisms. Second, we interview 17 researchers to validate our list and collect their responses, thereby discussing implications for defending and improving the scientific values and rigor of VA systems. We highlight that the presented knowledge is deep, extensive, but also imperfect, provocative, and controversial, and thus recommend reading with an inclusive and critical eye. We hope our work can provide thoughts and foundations for conducting VA research and spark discussions to promote the research field forward more rigorously and vibrantly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The last decade has witnessed many visual analytics (VA) systems that make successful applications to wide-ranging domains like urban analytics and explainable AI. However, their research rigor and contributions have been extensively challenged within the visualization community. We come in defence of VA systems by contributing two interview studies for gathering critics and responses to those criticisms. First, we interview 24 researchers to collect criticisms the review comments on their VA work. Through an iterative coding and refinement process, the interview feedback is summarized into a list of 36 common criticisms. Second, we interview 17 researchers to validate our list and collect their responses, thereby discussing implications for defending and improving the scientific values and rigor of VA systems. We highlight that the presented knowledge is deep, extensive, but also imperfect, provocative, and controversial, and thus recommend reading with an inclusive and critical eye. We hope our work can provide thoughts and foundations for conducting VA research and spark discussions to promote the research field forward more rigorously and vibrantly.", "title": "In Defence of Visual Analytics Systems: Replies to Critics", "normalizedTitle": "In Defence of Visual Analytics Systems: Replies to Critics", "fno": "09906559", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Common Criticisms", "Critical Eye", "Explainable AI", "Inclusive Eye", "Interview Feedback", "Iterative Coding", "Refinement Process", "Urban Analytics", "VA Systems", "Visual Analytics Systems", "Visualization Community", "Interviews", "Data Visualization", "Visual Analytics", "Software", "Iterative Methods", "Encoding", "Design Methodology", "Visual Analytics", "Theory", "Qualitative Study", "Design Study", "Application", "Theoretical And Empirical Research" ], "authors": [ { "givenName": "Aoyu", "surname": "Wu", "fullName": "Aoyu Wu", "affiliation": "Hong Kong University of Science and Technology, China", "__typename": "ArticleAuthorType" }, { "givenName": "Dazhen", "surname": "Deng", "fullName": "Dazhen Deng", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Furui", "surname": "Cheng", "fullName": "Furui Cheng", "affiliation": "Hong Kong University of Science and Technology, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shixia", "surname": "Liu", "fullName": "Shixia Liu", "affiliation": "School of Software, Tsinghua University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Huamin", "surname": "Qu", "fullName": "Huamin Qu", "affiliation": "Hong Kong University of Science and Technology, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-01-01 00:00:00", "pubType": "trans", "pages": "1026-1036", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2006/0591/0/04035767", "title": "Visual Analytics Education", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035767/12OmNA14Aii", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892c416", "title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040026", "title": "A Graph Algebra for Scalable Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040026/13rRUILLkpN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534883", "title": "Characterizing Guidance in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534883/13rRUxBa568", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040063", "title": "The Top 10 Challenges in Extreme-Scale Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040063/13rRUxC0SGA", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/02/mcg2015020016", "title": "Preparing Undergraduates for Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2015/02/mcg2015020016/13rRUxjQyjN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040023", "title": "Extreme-Scale Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040023/13rRUxjQyxF", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trex/2020/8514/0/851400a009", "title": "Beyond Trust Building &#x2014; Calibrating Trust in Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/trex/2020/851400a009/1pXm2QUw2ek", "parentPublication": { "id": "proceedings/trex/2020/8514/0", "title": "2020 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a368", "title": "A Characterization of Data Exchange between Visual Analytics Tools", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a368/1rSRaA2LJBK", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trex/2021/1817/0/181700a014", "title": "Making and Trusting Decisions in Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/trex/2021/181700a014/1yQB6h3HL6o", "parentPublication": { "id": "proceedings/trex/2021/1817/0", "title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09903512", "articleId": "1GZol4dym8U", "__typename": "AdjacentArticleType" }, "next": { "fno": "09904866", "articleId": "1H2llxba9ws", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvsDHDY", "title": "Jan.", "year": "2020", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1cG4DVd6FcQ", "doi": "10.1109/TVCG.2019.2934264", "abstract": "Many evaluation methods have been used to assess the usefulness of Visual Analytics (VA) solutions. These methods stem from a variety of origins with different assumptions and goals, which cause confusion about their proofing capabilities. Moreover, the lack of discussion about the evaluation processes may limit our potential to develop new evaluation methods specialized for VA. In this paper, we present an analysis of evaluation methods that have been used to summatively evaluate VA solutions. We provide a survey and taxonomy of the evaluation methods that have appeared in the VAST literature in the past two years. We then analyze these methods in terms of validity and generalizability of their findings, as well as the feasibility of using them. We propose a new metric called summative quality to compare evaluation methods according to their ability to prove usefulness, and make recommendations for selecting evaluation methods based on their summative quality in the VA domain.", "abstracts": [ { "abstractType": "Regular", "content": "Many evaluation methods have been used to assess the usefulness of Visual Analytics (VA) solutions. These methods stem from a variety of origins with different assumptions and goals, which cause confusion about their proofing capabilities. Moreover, the lack of discussion about the evaluation processes may limit our potential to develop new evaluation methods specialized for VA. In this paper, we present an analysis of evaluation methods that have been used to summatively evaluate VA solutions. We provide a survey and taxonomy of the evaluation methods that have appeared in the VAST literature in the past two years. We then analyze these methods in terms of validity and generalizability of their findings, as well as the feasibility of using them. We propose a new metric called summative quality to compare evaluation methods according to their ability to prove usefulness, and make recommendations for selecting evaluation methods based on their summative quality in the VA domain.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many evaluation methods have been used to assess the usefulness of Visual Analytics (VA) solutions. These methods stem from a variety of origins with different assumptions and goals, which cause confusion about their proofing capabilities. Moreover, the lack of discussion about the evaluation processes may limit our potential to develop new evaluation methods specialized for VA. In this paper, we present an analysis of evaluation methods that have been used to summatively evaluate VA solutions. We provide a survey and taxonomy of the evaluation methods that have appeared in the VAST literature in the past two years. We then analyze these methods in terms of validity and generalizability of their findings, as well as the feasibility of using them. We propose a new metric called summative quality to compare evaluation methods according to their ability to prove usefulness, and make recommendations for selecting evaluation methods based on their summative quality in the VA domain.", "title": "The Validity, Generalizability and Feasibility of Summative Evaluation Methods in Visual Analytics", "normalizedTitle": "The Validity, Generalizability and Feasibility of Summative Evaluation Methods in Visual Analytics", "fno": "08805439", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Summative Evaluation Methods", "Visual Analytics", "VAST Literature", "Validity", "Feasibility", "Generalizability", "Summative Quality", "Taxonomy", "Visual Analytics", "Measurement", "Usability", "Guidelines", "Focusing", "Summative Evaluation", "Usefulness", "Evaluation Process", "Taxonomy", "Visual Analytics" ], "authors": [ { "givenName": "Mosab", "surname": "Khayat", "fullName": "Mosab Khayat", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "Morteza", "surname": "Karimzadeh", "fullName": "Morteza Karimzadeh", "affiliation": "University of Colorado Boulder", "__typename": "ArticleAuthorType" }, { "givenName": "David S.", "surname": "Ebert", "fullName": "David S. Ebert", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "Arif", "surname": "Ghafoor", "fullName": "Arif Ghafoor", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2020-01-01 00:00:00", "pubType": "trans", "pages": "353-363", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2006/0591/0/04035767", "title": "Visual Analytics Education", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035767/12OmNA14Aii", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892c416", "title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/04/mcg2013040022", "title": "Customizing Computational Methods for Visual Analytics with Big Data", "doi": null, "abstractUrl": "/magazine/cg/2013/04/mcg2013040022/13rRUB7a1ij", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040026", "title": "A Graph Algebra for Scalable Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040026/13rRUILLkpN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/04/mcg2014040008", "title": "Semantic Interaction for Visual Analytics: Toward Coupling Cognition and Computation", "doi": null, "abstractUrl": "/magazine/cg/2014/04/mcg2014040008/13rRUwwslv3", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/03/06908006", "title": "Personal Visualization and Personal Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2015/03/06908006/13rRUyYBlgA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2018/7325/0/732500a342", "title": "Visual Analytics Interface for Time Series Data Based on Trajectory Manipulation", "doi": null, "abstractUrl": "/proceedings-article/wi/2018/732500a342/17D45WODasq", "parentPublication": { "id": "proceedings/wi/2018/7325/0", "title": "2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08423105", "title": "Commercial Visual Analytics Systems&#x2013;Advances in the Big Data Analytics Field", "doi": null, "abstractUrl": "/journal/tg/2019/10/08423105/1cYd7bZMLp6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a368", "title": "A Characterization of Data Exchange between Visual Analytics Tools", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a368/1rSRaA2LJBK", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise/2020/2261/0/226100a064", "title": "A general evaluation method of university curriculum summative text based on optimized BERT model", "doi": null, "abstractUrl": "/proceedings-article/icise/2020/226100a064/1tnYewq31gk", "parentPublication": { "id": "proceedings/icise/2020/2261/0", "title": "2020 International Conference on Information Science and Education (ICISE-IE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08807301", "articleId": "1cG6uY7sFEs", "__typename": "AdjacentArticleType" }, "next": { "fno": "08805428", "articleId": "1cG4IjitDr2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1i4paPSIrks", "name": "ttg202001-08805439s1.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202001-08805439s1.mp4", "extension": "mp4", "size": "4.37 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzA6GUv", "title": "May", "year": "2019", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17PYEkASbnU", "doi": "10.1109/TVCG.2019.2898798", "abstract": "Spatial perception in virtual environments has been a topic of intense research. Arguably, the majority of this work has focused on distance perception. However, orientation perception is also an important factor. In this paper, we systematically investigate allocentric orientation judgments in both real and virtual contexts over the course of four experiments. A pattern of sinusoidal judgment errors known to exist in 2D perspective displays is found to persist in immersive virtual environments. This pattern also manifests itself in a real world setting using two differing judgment methods. The findings suggest the presence of a radial anisotropy that persists across viewing contexts. Additionally, there is some evidence to suggest that observers have multiple strategies for processing orientations but further investigation is needed to fully describe this phenomenon. We also offer design suggestions for 3D user interfaces where users may perform orientation judgments.", "abstracts": [ { "abstractType": "Regular", "content": "Spatial perception in virtual environments has been a topic of intense research. Arguably, the majority of this work has focused on distance perception. However, orientation perception is also an important factor. In this paper, we systematically investigate allocentric orientation judgments in both real and virtual contexts over the course of four experiments. A pattern of sinusoidal judgment errors known to exist in 2D perspective displays is found to persist in immersive virtual environments. This pattern also manifests itself in a real world setting using two differing judgment methods. The findings suggest the presence of a radial anisotropy that persists across viewing contexts. Additionally, there is some evidence to suggest that observers have multiple strategies for processing orientations but further investigation is needed to fully describe this phenomenon. We also offer design suggestions for 3D user interfaces where users may perform orientation judgments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Spatial perception in virtual environments has been a topic of intense research. Arguably, the majority of this work has focused on distance perception. However, orientation perception is also an important factor. In this paper, we systematically investigate allocentric orientation judgments in both real and virtual contexts over the course of four experiments. A pattern of sinusoidal judgment errors known to exist in 2D perspective displays is found to persist in immersive virtual environments. This pattern also manifests itself in a real world setting using two differing judgment methods. The findings suggest the presence of a radial anisotropy that persists across viewing contexts. Additionally, there is some evidence to suggest that observers have multiple strategies for processing orientations but further investigation is needed to fully describe this phenomenon. We also offer design suggestions for 3D user interfaces where users may perform orientation judgments.", "title": "Orientation Perception in Real and Virtual Environments", "normalizedTitle": "Orientation Perception in Real and Virtual Environments", "fno": "08642384", "hasPdf": true, "idPrefix": "tg", "keywords": [ "User Interfaces", "Virtual Reality", "Visual Perception", "Distance Perception", "Orientation Perception", "Allocentric Orientation Judgments", "Sinusoidal Judgment Errors", "2 D Perspective Displays", "Immersive Virtual Environments", "Spatial Perception", "3 D User Interfaces", "Virtual Environments", "Task Analysis", "Observers", "Anisotropic Magnetoresistance", "Visualization", "Gravity", "Legged Locomotion", "Virtual Environments", "Perception", "Spatial Orientation", "Visual Orientation" ], "authors": [ { "givenName": "J. Adam", "surname": "Jones", "fullName": "J. Adam Jones", "affiliation": "Department of Computer & Information ScienceHigh Fidelity Virtual Environments Lab (Hi5 Lab)University of Mississippi", "__typename": "ArticleAuthorType" }, { "givenName": "Jonathan E.", "surname": "Hopper", "fullName": "Jonathan E. Hopper", "affiliation": "Department of Computer & Information ScienceHigh Fidelity Virtual Environments Lab (Hi5 Lab)University of Mississippi", "__typename": "ArticleAuthorType" }, { "givenName": "Mark T.", "surname": "Bolas", "fullName": "Mark T. Bolas", "affiliation": "Microsoft", "__typename": "ArticleAuthorType" }, { "givenName": "David M.", "surname": "Krum", "fullName": "David M. Krum", "affiliation": "Institute for Creative Technologies - Mixed Reality LabUniversity of Southern California", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2019-05-01 00:00:00", "pubType": "trans", "pages": "2050-2060", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/haptics/2008/2005/0/04479927", "title": "Force Amplitude Perception in Six Orthogonal Directions", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479927/12OmNAndiu9", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2014/2871/0/06802054", "title": "Time perception during walking in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802054/12OmNBpmDG4", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2002/1492/0/14920275", "title": "Perceived Egocentric Distances in Real, Image-Based, and Traditional Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2002/14920275/12OmNwHhoQ2", "parentPublication": { "id": "proceedings/vr/2002/1492/0", "title": "Proceedings IEEE Virtual Reality 2002", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2015/6886/0/07131756", "title": "Distance perception during cooperative virtual locomotion", "doi": null, "abstractUrl": "/proceedings-article/3dui/2015/07131756/12OmNy49sEA", "parentPublication": { "id": "proceedings/3dui/2015/6886/0", "title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811007", "title": "Measurement Protocols for Medium-Field Distance Perception in Large-Screen Immersive Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811007/12OmNyeWdKg", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444791", "title": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444791/12OmNyoAA64", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040701", "title": "Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040701/13rRUx0xPmZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/04/tth2011040263", "title": "Multiple Factors Underlying Haptic Perception of Length and Orientation", "doi": null, "abstractUrl": "/journal/th/2011/04/tth2011040263/13rRUyeTVib", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797826", "title": "Virtual Objects Look Farther on the Sides: The Anisotropy of Distance Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797826/1cJ18Y9D9Di", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a201", "title": "Manipulating Rotational Perception in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a201/1yfxMXu7XhK", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08642347", "articleId": "17PYEjbrJk7", "__typename": "AdjacentArticleType" }, "next": { "fno": "08643571", "articleId": "18LF8zpSgUM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwGqBqr", "title": "Jan.", "year": "2017", "issueNum": "01", "idPrefix": "tp", "pubType": "journal", "volume": "39", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwj7cql", "doi": "10.1109/TPAMI.2016.2622398", "abstract": "Presents the editor's view of the current state of this journal publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the editor's view of the current state of this journal publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the editor's view of the current state of this journal publication.", "title": "State of the Journal", "normalizedTitle": "State of the Journal", "fno": "07765164", "hasPdf": true, "idPrefix": "tp", "keywords": [], "authors": [ { "givenName": "David A.", "surname": "Forsyth", "fullName": "David A. Forsyth", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2017-01-01 00:00:00", "pubType": "trans", "pages": "1-2", "year": "2017", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/ts/2018/01/08249614", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/ts/2018/01/08249614/13rRUxC0SFJ", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2017/01/07865885", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/bd/2017/01/07865885/13rRUxC0SxQ", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/01/ttp2014010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2013/01/tta2013010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/dc/2018/02/08627912", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "doi": null, "abstractUrl": "/journal/dc/2018/02/08627912/17D45WKWnIj", "parentPublication": { "id": "trans/dc", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2019/01/08654019", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/bd/2019/01/08654019/180h18pgnbG", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08974588", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2020/03/08974588/1gZh3n61QTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/02/08952831", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2020/02/08952831/1gqpWPrYFsA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/04/09370089", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2021/04/09370089/1rHavOpA8ow", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/01/09639889", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2022/01/09639889/1z98pqw5KXC", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07765201", "articleId": "13rRUwInvgu", "__typename": "AdjacentArticleType" }, "next": { "fno": "07765166", "articleId": "13rRUyYjK3X", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNC36tSm", "title": "Jan.", "year": "2018", "issueNum": "01", "idPrefix": "ts", "pubType": "journal", "volume": "44", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxC0SFJ", "doi": "10.1109/TSE.2017.2778898", "abstract": "Presents the state of the journal for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the state of the journal for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the state of the journal for this issue of the publication.", "title": "State of the Journal", "normalizedTitle": "State of the Journal", "fno": "08249614", "hasPdf": true, "idPrefix": "ts", "keywords": [], "authors": [ { "givenName": "Matthew", "surname": "Dwyer", "fullName": "Matthew Dwyer", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2018-01-01 00:00:00", "pubType": "trans", "pages": "1-2", "year": "2018", "issn": "0098-5589", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/dc/2016/01/07829494", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits information for authors", "doi": null, "abstractUrl": "/journal/dc/2016/01/07829494/13rRUwcAqvO", "parentPublication": { "id": "trans/dc", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/01/07765164", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2017/01/07765164/13rRUwj7cql", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2017/01/07865885", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/bd/2017/01/07865885/13rRUxC0SxQ", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/01/ttp2014010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2013/01/tta2013010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/dc/2018/02/08627912", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "doi": null, "abstractUrl": "/journal/dc/2018/02/08627912/17D45WKWnIj", "parentPublication": { "id": "trans/dc", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2019/01/08654019", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/bd/2019/01/08654019/180h18pgnbG", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08974588", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2020/03/08974588/1gZh3n61QTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/04/09032252", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2020/04/09032252/1i6VsXPOgJq", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/04/09370089", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2021/04/09370089/1rHavOpA8ow", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "08249710", "articleId": "13rRUygT7uv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNviZlCU", "title": "March", "year": "2017", "issueNum": "01", "idPrefix": "bd", "pubType": "journal", "volume": "3", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxC0SxQ", "doi": "10.1109/TBDATA.2017.2660258", "abstract": "Presents an editorial on the current state of the journal, its scope, and future areas of diretion.", "abstracts": [ { "abstractType": "Regular", "content": "Presents an editorial on the current state of the journal, its scope, and future areas of diretion.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents an editorial on the current state of the journal, its scope, and future areas of diretion.", "title": "State of the Journal Editorial", "normalizedTitle": "State of the Journal Editorial", "fno": "07865885", "hasPdf": true, "idPrefix": "bd", "keywords": [], "authors": [ { "givenName": "Qiang", "surname": "Yang", "fullName": "Qiang Yang", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2017-01-01 00:00:00", "pubType": "trans", "pages": "1", "year": "2017", "issn": "2332-7790", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/td/2018/01/08173510", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/td/2018/01/08173510/13rRUIJuxv3", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2014/01/ttd2014010001", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/td/2014/01/ttd2014010001/13rRUwd9CFL", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2014/01/ttk2014010001", "title": "Editorial [State of the Transactions]", "doi": null, "abstractUrl": "/journal/tk/2014/01/ttk2014010001/13rRUx0PqpT", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/01/ttp2014010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2013/01/tta2013010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2019/01/08654019", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/bd/2019/01/08654019/180h18pgnbG", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08974588", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2020/03/08974588/1gZh3n61QTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2020/01/08945475", "title": "Editorial on the Opening of the New Editorial Year&#x2014;The State of the IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "doi": null, "abstractUrl": "/journal/si/2020/01/08945475/1gbtZxspGUM", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/02/08952831", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2020/02/08952831/1gqpWPrYFsA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/04/09370089", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2021/04/09370089/1rHavOpA8ow", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "07865883", "articleId": "13rRUx0xPNI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAXPyfr", "title": "Jan.-March", "year": "2013", "issueNum": "01", "idPrefix": "ta", "pubType": "journal", "volume": "4", "label": "Jan.-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyY28WG", "doi": "10.1109/T-AFFC.2013.9", "abstract": "With this fourth year of the IEEE Transactions on Affective Computing (TAC), the field of affective computing is strong and vibrant. In 2013 we will mark the fifth International Conferences on Affective Computing in Geneva, Switzerland, which will be cochaired by our associate editor, Catherine Pelachaud. Over the last year, TAC published 43 articles over four issues, up from 26 articles in 2011. We expect to maintain this publication rate over the next year and focus on attracting high-quality articles. The journal continues to encourage interdisciplinary research and we've attracted articles from recognized names in both the computational and social sciences of affect. After these four years of growth, as Editor-in-Chief (EiC) I feel confident in stating that TAC is the premier journal for research on the topic of affective computing. The editorial board has remained steady over the last year, but to handle our increasing paper load we have added one editor focusing on human-robot interaction. I welcome Bilge Mutlu from the University of Wisconsin, Madison, who's bio and photo are provided. In the coming year, my primary goal continues to be to increase the visibility of the journal and for this I need your help. Please help me in spreading awareness of the journal.", "abstracts": [ { "abstractType": "Regular", "content": "With this fourth year of the IEEE Transactions on Affective Computing (TAC), the field of affective computing is strong and vibrant. In 2013 we will mark the fifth International Conferences on Affective Computing in Geneva, Switzerland, which will be cochaired by our associate editor, Catherine Pelachaud. Over the last year, TAC published 43 articles over four issues, up from 26 articles in 2011. We expect to maintain this publication rate over the next year and focus on attracting high-quality articles. The journal continues to encourage interdisciplinary research and we've attracted articles from recognized names in both the computational and social sciences of affect. After these four years of growth, as Editor-in-Chief (EiC) I feel confident in stating that TAC is the premier journal for research on the topic of affective computing. The editorial board has remained steady over the last year, but to handle our increasing paper load we have added one editor focusing on human-robot interaction. I welcome Bilge Mutlu from the University of Wisconsin, Madison, who's bio and photo are provided. In the coming year, my primary goal continues to be to increase the visibility of the journal and for this I need your help. Please help me in spreading awareness of the journal.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With this fourth year of the IEEE Transactions on Affective Computing (TAC), the field of affective computing is strong and vibrant. In 2013 we will mark the fifth International Conferences on Affective Computing in Geneva, Switzerland, which will be cochaired by our associate editor, Catherine Pelachaud. Over the last year, TAC published 43 articles over four issues, up from 26 articles in 2011. We expect to maintain this publication rate over the next year and focus on attracting high-quality articles. The journal continues to encourage interdisciplinary research and we've attracted articles from recognized names in both the computational and social sciences of affect. After these four years of growth, as Editor-in-Chief (EiC) I feel confident in stating that TAC is the premier journal for research on the topic of affective computing. The editorial board has remained steady over the last year, but to handle our increasing paper load we have added one editor focusing on human-robot interaction. I welcome Bilge Mutlu from the University of Wisconsin, Madison, who's bio and photo are provided. In the coming year, my primary goal continues to be to increase the visibility of the journal and for this I need your help. Please help me in spreading awareness of the journal.", "title": "Editorial: State of the Journal", "normalizedTitle": "Editorial: State of the Journal", "fno": "tta2013010001", "hasPdf": true, "idPrefix": "ta", "keywords": [], "authors": [ { "givenName": "Jonathan", "surname": "Gratch", "fullName": "Jonathan Gratch", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "01", "pubDate": "2013-01-01 00:00:00", "pubType": "trans", "pages": "1", "year": "2013", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "tta2013010002", "articleId": "13rRUwvT9f5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqJZgIx", "title": "Dec.", "year": "2018", "issueNum": "02", "idPrefix": "dc", "pubType": "journal", "volume": "4", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WKWnIj", "doi": "10.1109/JXCDC.2018.2868537", "abstract": "These instructions give guidelines for preparing papers for this publication. Presents information for authors publishing in this journal.", "abstracts": [ { "abstractType": "Regular", "content": "These instructions give guidelines for preparing papers for this publication. Presents information for authors publishing in this journal.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "These instructions give guidelines for preparing papers for this publication. Presents information for authors publishing in this journal.", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "normalizedTitle": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "fno": "08627912", "hasPdf": true, "idPrefix": "dc", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "02", "pubDate": "2018-01-01 00:00:00", "pubType": "trans", "pages": "C3-C3", "year": "2018", "issn": "2329-­9231", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "08730455", "articleId": "1aAxeRL6P4s", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAlvHDL", "title": "March", "year": "2019", "issueNum": "01", "idPrefix": "bd", "pubType": "journal", "volume": "5", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "180h18pgnbG", "doi": "10.1109/TBDATA.2019.2895402", "abstract": "Presents the editor's review on the current state of the journal.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the editor's review on the current state of the journal.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the editor's review on the current state of the journal.", "title": "State of the Journal", "normalizedTitle": "State of the Journal", "fno": "08654019", "hasPdf": true, "idPrefix": "bd", "keywords": [], "authors": [ { "givenName": "Qiang", "surname": "Yang", "fullName": "Qiang Yang", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2019-01-01 00:00:00", "pubType": "trans", "pages": "1-1", "year": "2019", "issn": "2332-7790", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tp/2017/01/07765164", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2017/01/07765164/13rRUwj7cql", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/01/ttp2013010001", "title": "Farewell state of the journal", "doi": null, "abstractUrl": "/journal/tp/2013/01/ttp2013010001/13rRUx0xPjl", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2018/01/08249614", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/ts/2018/01/08249614/13rRUxC0SFJ", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2017/01/07865885", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/bd/2017/01/07865885/13rRUxC0SxQ", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/01/ttp2014010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2013/01/tta2013010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/dc/2018/02/08627912", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "doi": null, "abstractUrl": "/journal/dc/2018/02/08627912/17D45WKWnIj", "parentPublication": { "id": "trans/dc", "title": "IEEE Journal on Exploratory Solid-State Computational Devices and Circuits", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08974588", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2020/03/08974588/1gZh3n61QTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2019/2627/0/262700a395", "title": "Possibility and Prevention of Inappropriate Data Manipulation in Polar Data Journal", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2019/262700a395/1hrLHtI6fDy", "parentPublication": { "id": "proceedings/iiai-aai/2019/2627/0", "title": "2019 8th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/04/09370089", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2021/04/09370089/1rHavOpA8ow", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "08654018", "articleId": "180h18bDCWB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1rHavp5Sn5K", "title": "April", "year": "2021", "issueNum": "04", "idPrefix": "tp", "pubType": "journal", "volume": "43", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1rHavOpA8ow", "doi": "10.1109/TPAMI.2020.3047719", "abstract": "Presents the state of the journal review for this issue of the publication.", "abstracts": [ { "abstractType": "Regular", "content": "Presents the state of the journal review for this issue of the publication.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Presents the state of the journal review for this issue of the publication.", "title": "State of the Journal Editorial", "normalizedTitle": "State of the Journal Editorial", "fno": "09370089", "hasPdf": true, "idPrefix": "tp", "keywords": [], "authors": [ { "givenName": "Sven", "surname": "Dickinson", "fullName": "Sven Dickinson", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "04", "pubDate": "2021-04-01 00:00:00", "pubType": "trans", "pages": "1119-1128", "year": "2021", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tp/2017/01/07765164", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2017/01/07765164/13rRUwj7cql", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2018/01/08249614", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/ts/2018/01/08249614/13rRUxC0SFJ", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2017/01/07865885", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/bd/2017/01/07865885/13rRUxC0SxQ", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2014/01/ttp2014010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2013/01/tta2013010001", "title": "Editorial: State of the Journal", "doi": null, "abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2015/01/06980169", "title": "Farewell State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tc/2015/01/06980169/13rRUytF48K", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2019/01/08654019", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/bd/2019/01/08654019/180h18pgnbG", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08974588", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tg/2020/03/08974588/1gZh3n61QTC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/02/08952831", "title": "State of the Journal Editorial", "doi": null, "abstractUrl": "/journal/tp/2020/02/08952831/1gqpWPrYFsA", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2020/04/09032252", "title": "State of the Journal", "doi": null, "abstractUrl": "/journal/tc/2020/04/09032252/1i6VsXPOgJq", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09370018", "articleId": "1rHaEvbWBCU", "__typename": "AdjacentArticleType" }, "next": { "fno": "08877866", "articleId": "1epRQKKzh0Q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1pmLUKKHZAc", "title": "Jan.", "year": "2021", "issueNum": "01", "idPrefix": "tk", "pubType": "journal", "volume": "33", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1bemgLe3vDG", "doi": "10.1109/TKDE.2019.2924894", "abstract": "Matrix factorization (MF) has earned great success on recommender systems. However, the common-used regression-based MF not only is sensitive to outliers but also unable to guarantee that the predicted values are in line with the user preference orders, which is the basis of common measures of recommender systems, e.g., nDCG. To overcome the aforementioned drawbacks, we propose a framework for personalized ranking of Poisson factorization that utilizes learning-to-rank based posteriori instead of the classical regression-based ones. Owing to the combination, the proposed framework not only preserves user preference but also performs well on a sparse matrix. Since the posteriori that combines learning to rank and Poisson factorization does not follow the conjugate prior relationship, we estimate variational parameters approximately and propose two optimization approaches based on variational inference. As long as the used learning-to-rank model has the 1st and 2nd order partial derivatives, by exploiting our framework, the proposed optimizing algorithm can maximize the posteriori whichever the used learning-to-rank model is. In the experiment, we show that the proposed framework outperforms the state-of-the-art methods and achieves promising results on consuming log and rating datasets for multiple recommendation tasks.", "abstracts": [ { "abstractType": "Regular", "content": "Matrix factorization (MF) has earned great success on recommender systems. However, the common-used regression-based MF not only is sensitive to outliers but also unable to guarantee that the predicted values are in line with the user preference orders, which is the basis of common measures of recommender systems, e.g., nDCG. To overcome the aforementioned drawbacks, we propose a framework for personalized ranking of Poisson factorization that utilizes learning-to-rank based posteriori instead of the classical regression-based ones. Owing to the combination, the proposed framework not only preserves user preference but also performs well on a sparse matrix. Since the posteriori that combines learning to rank and Poisson factorization does not follow the conjugate prior relationship, we estimate variational parameters approximately and propose two optimization approaches based on variational inference. As long as the used learning-to-rank model has the 1st and 2nd order partial derivatives, by exploiting our framework, the proposed optimizing algorithm can maximize the posteriori whichever the used learning-to-rank model is. In the experiment, we show that the proposed framework outperforms the state-of-the-art methods and achieves promising results on consuming log and rating datasets for multiple recommendation tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Matrix factorization (MF) has earned great success on recommender systems. However, the common-used regression-based MF not only is sensitive to outliers but also unable to guarantee that the predicted values are in line with the user preference orders, which is the basis of common measures of recommender systems, e.g., nDCG. To overcome the aforementioned drawbacks, we propose a framework for personalized ranking of Poisson factorization that utilizes learning-to-rank based posteriori instead of the classical regression-based ones. Owing to the combination, the proposed framework not only preserves user preference but also performs well on a sparse matrix. Since the posteriori that combines learning to rank and Poisson factorization does not follow the conjugate prior relationship, we estimate variational parameters approximately and propose two optimization approaches based on variational inference. As long as the used learning-to-rank model has the 1st and 2nd order partial derivatives, by exploiting our framework, the proposed optimizing algorithm can maximize the posteriori whichever the used learning-to-rank model is. In the experiment, we show that the proposed framework outperforms the state-of-the-art methods and achieves promising results on consuming log and rating datasets for multiple recommendation tasks.", "title": "The Framework of Personalized Ranking on Poisson Factorization", "normalizedTitle": "The Framework of Personalized Ranking on Poisson Factorization", "fno": "08745536", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Learning Artificial Intelligence", "Mathematics Computing", "Matrix Decomposition", "Optimisation", "Recommender Systems", "Regression Analysis", "Search Engines", "Stochastic Processes", "Variational Techniques", "Recommender Systems", "User Preference Orders", "Personalized Ranking", "Poisson Factorization", "Sparse Matrix", "Learning To Rank Model", "Multiple Recommendation Tasks", "Matrix Factorization", "Regression Based MF", "Variational Parameter Estimation", "Recommender Systems", "Sparse Matrices", "Linear Programming", "Estimation", "Logistics", "Optimization", "Approximation Algorithms", "Poisson Factorization", "Learning To Rank", "Recommender Systems" ], "authors": [ { "givenName": "Li-Yen", "surname": "Kuo", "fullName": "Li-Yen Kuo", "affiliation": "Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Chung-Kuang", "surname": "Chou", "fullName": "Chung-Kuang Chou", "affiliation": "Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Ming-Syan", "surname": "Chen", "fullName": "Ming-Syan Chen", "affiliation": "Department of Electrical Engineering, National Taiwan University, Taipei, Taiwan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2021-01-01 00:00:00", "pubType": "trans", "pages": "287-301", "year": "2021", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icmla/2016/6167/0/07838224", "title": "A New Approach of Matrix Factorization and Its Application in Recommender Systems", "doi": null, "abstractUrl": "/proceedings-article/icmla/2016/07838224/12OmNqH9hgR", "parentPublication": { "id": "proceedings/icmla/2016/6167/0", "title": "2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2016/9005/0/07840707", "title": "Incremental learning for matrix factorization in recommender systems", "doi": null, "abstractUrl": "/proceedings-article/big-data/2016/07840707/12OmNvwkulb", "parentPublication": { "id": "proceedings/big-data/2016/9005/0", "title": "2016 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2015/9504/0/9504a967", "title": "Nonparametric Poisson Factorization Machine", "doi": null, "abstractUrl": "/proceedings-article/icdm/2015/9504a967/12OmNwOnn1M", "parentPublication": { "id": "proceedings/icdm/2015/9504/0", "title": "2015 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2017/4822/0/482201a097", "title": "Local Probabilistic Matrix Factorization for Personal Recommendation", "doi": null, "abstractUrl": "/proceedings-article/cis/2017/482201a097/12OmNxWuis5", "parentPublication": { "id": "proceedings/cis/2017/4822/0", "title": "2017 13th International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2016/8985/0/8985a103", "title": "An Improvement of Matrix Factorization with Bound Constraints for Recommender Systems", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2016/8985a103/12OmNzayN0i", "parentPublication": { "id": "proceedings/iiai-aai/2016/8985/0", "title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2018/07/07955082", "title": "MSGD: A Novel Matrix Factorization Approach for Large-Scale Collaborative Filtering Recommender Systems on GPUs", "doi": null, "abstractUrl": "/journal/td/2018/07/07955082/13rRUIIVlk6", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2017/08/07888519", "title": "Semiring Rank Matrix Factorization", "doi": null, "abstractUrl": "/journal/tk/2017/08/07888519/13rRUxBrGhp", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2018/9159/0/08595009", "title": "Variational Bayesian Inference for Robust Streaming Tensor Factorization and Completion", "doi": null, "abstractUrl": "/proceedings-article/icdm/2018/08595009/17D45VTRoEE", "parentPublication": { "id": "proceedings/icdm/2018/9159/0", "title": "2018 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-iucc/2017/3790/0/379001b144", "title": "CuSNMF: A Sparse Non-Negative Matrix Factorization Approach for Large-Scale Collaborative Filtering Recommender Systems on Multi-GPU", "doi": null, "abstractUrl": "/proceedings-article/ispa-iucc/2017/379001b144/17D45VUZMZt", "parentPublication": { "id": "proceedings/ispa-iucc/2017/3790/0", "title": "2017 IEEE International Symposium on Parallel and Distributed Processing with Applications and 2017 IEEE International Conference on Ubiquitous Computing and Communications (ISPA/IUCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-iucc/2017/3790/0/379001a511", "title": "An Efficient Parallelization Approach for Large-Scale Sparse Non-Negative Matrix Factorization Using Kullback-Leibler Divergence on Multi-GPU", "doi": null, "abstractUrl": "/proceedings-article/ispa-iucc/2017/379001a511/17D45XH89od", "parentPublication": { "id": "proceedings/ispa-iucc/2017/3790/0", "title": "2017 IEEE International Symposium on Parallel and Distributed Processing with Applications and 2017 IEEE International Conference on Ubiquitous Computing and Communications (ISPA/IUCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08742624", "articleId": "1b154W6R2oM", "__typename": "AdjacentArticleType" }, "next": { "fno": "08742537", "articleId": "1b154sQKE6s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNz5apxc", "title": "July", "year": "2017", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcbnCv", "doi": "10.1109/TVCG.2016.2574705", "abstract": "We introduce an interactive user-driven method to reconstruct high-relief 3D geometry from a single photo. Particularly, we consider two novel but challenging reconstruction issues: i) common non-rigid objects whose shapes are organic rather than polyhedral/symmetric, and ii) double-sided structures, where front and back sides of some curvy object parts are revealed simultaneously on image. To address these issues, we develop a three-stage computational pipeline. First, we construct a 2.5D model from the input image by user-driven segmentation, automatic layering, and region completion, handling three common types of occlusion. Second, users can interactively mark-up slope and curvature cues on the image to guide our constrained optimization model to inflate and lift up the image layers. We provide real-time preview of the inflated geometry to allow interactive editing. Third, we stitch and optimize the inflated layers to produce a high-relief 3D model. Compared to previous work, we can generate high-relief geometry with large viewing angles, handle complex organic objects with multiple occluded regions and varying shape profiles, and reconstruct objects with double-sided structures. Lastly, we demonstrate the applicability of our method on a wide variety of input images with human, animals, flowers, etc.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce an interactive user-driven method to reconstruct high-relief 3D geometry from a single photo. Particularly, we consider two novel but challenging reconstruction issues: i) common non-rigid objects whose shapes are organic rather than polyhedral/symmetric, and ii) double-sided structures, where front and back sides of some curvy object parts are revealed simultaneously on image. To address these issues, we develop a three-stage computational pipeline. First, we construct a 2.5D model from the input image by user-driven segmentation, automatic layering, and region completion, handling three common types of occlusion. Second, users can interactively mark-up slope and curvature cues on the image to guide our constrained optimization model to inflate and lift up the image layers. We provide real-time preview of the inflated geometry to allow interactive editing. Third, we stitch and optimize the inflated layers to produce a high-relief 3D model. Compared to previous work, we can generate high-relief geometry with large viewing angles, handle complex organic objects with multiple occluded regions and varying shape profiles, and reconstruct objects with double-sided structures. Lastly, we demonstrate the applicability of our method on a wide variety of input images with human, animals, flowers, etc.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce an interactive user-driven method to reconstruct high-relief 3D geometry from a single photo. Particularly, we consider two novel but challenging reconstruction issues: i) common non-rigid objects whose shapes are organic rather than polyhedral/symmetric, and ii) double-sided structures, where front and back sides of some curvy object parts are revealed simultaneously on image. To address these issues, we develop a three-stage computational pipeline. First, we construct a 2.5D model from the input image by user-driven segmentation, automatic layering, and region completion, handling three common types of occlusion. Second, users can interactively mark-up slope and curvature cues on the image to guide our constrained optimization model to inflate and lift up the image layers. We provide real-time preview of the inflated geometry to allow interactive editing. Third, we stitch and optimize the inflated layers to produce a high-relief 3D model. Compared to previous work, we can generate high-relief geometry with large viewing angles, handle complex organic objects with multiple occluded regions and varying shape profiles, and reconstruct objects with double-sided structures. Lastly, we demonstrate the applicability of our method on a wide variety of input images with human, animals, flowers, etc.", "title": "Interactive High-Relief Reconstruction for Organic and Double-Sided Objects from a Photo", "normalizedTitle": "Interactive High-Relief Reconstruction for Organic and Double-Sided Objects from a Photo", "fno": "07482721", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Three Dimensional Displays", "Image Reconstruction", "Geometry", "Shape", "Solid Modeling", "Image Segmentation", "Surface Reconstruction", "Reconstruction", "High Relief", "Lenticular Posters", "Single Image", "Folded", "Double Sided", "Object Modeling", "Depth Cues", "Completion", "Inflation" ], "authors": [ { "givenName": "Chih-Kuo", "surname": "Yeh", "fullName": "Chih-Kuo Yeh", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan City, Taiwan, R.O.C", "__typename": "ArticleAuthorType" }, { "givenName": "Shi-Yang", "surname": "Huang", "fullName": "Shi-Yang Huang", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan City, Taiwan, R.O.C", "__typename": "ArticleAuthorType" }, { "givenName": "Pradeep Kumar", "surname": "Jayaraman", "fullName": "Pradeep Kumar Jayaraman", "affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore, 639798, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Chi-Wing", "surname": "Fu", "fullName": "Chi-Wing Fu", "affiliation": "Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Tong-Yee", "surname": "Lee", "fullName": "Tong-Yee Lee", "affiliation": "Department of Computer Science and Information Engineering, National Cheng-Kung University, Tainan City, Taiwan, R.O.C", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "1796-1808", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pg/1997/8028/0/80280091", "title": "Modification of n-sided patches based on variation of blending functions", "doi": null, "abstractUrl": "/proceedings-article/pg/1997/80280091/12OmNAsTgQO", "parentPublication": { "id": "proceedings/pg/1997/8028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2014/7000/1/7000a440", "title": "Merge2-3D: Combining Multiple Normal Maps with 3D Surfaces", "doi": null, "abstractUrl": "/proceedings-article/3dv/2014/7000a440/12OmNx8Ouv7", "parentPublication": { "id": "proceedings/3dv/2014/7000/2", "title": "2014 2nd International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2016/2312/0/2312a022", "title": "A Point Cloud Model Based Image Relief Effect Design", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2016/2312a022/12OmNyNQSQ4", "parentPublication": { "id": "proceedings/icmtma/2016/2312/0", "title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2015/7962/0/7962a001", "title": "Meta-Relief Texture Mapping with Dynamic Texture-Space Ambient Occlusion", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2015/7962a001/12OmNyp9MiX", "parentPublication": { "id": "proceedings/sibgrapi/2015/7962/0", "title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/02/ttg2013020225", "title": "Double-Sided 2.5D Graphics", "doi": null, "abstractUrl": "/journal/tg/2013/02/ttg2013020225/13rRUEgs2M0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2003/03/mcg2003030038", "title": "Generating Organic Textures with Controlled Anisotropy and Directionality", "doi": null, "abstractUrl": "/magazine/cg/2003/03/mcg2003030038/13rRUxCitLE", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/08/08611145", "title": "Portrait Relief Modeling from a Single Image", "doi": null, "abstractUrl": "/journal/tg/2020/08/08611145/17D45XDIXSX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/04/08322258", "title": "Bas-Relief Modeling from Normal Layers", "doi": null, "abstractUrl": "/journal/tg/2019/04/08322258/17YCN5E6cAE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09852330", "title": "Neural Modeling of Portrait Bas-relief from a Single Photograph", "doi": null, "abstractUrl": "/journal/tg/5555/01/09852330/1FFHdt1RWHC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09468903", "title": "Human Bas-Relief Generation From a Single Photograph", "doi": null, "abstractUrl": "/journal/tg/2022/12/09468903/1uR9KNPeety", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07471499", "articleId": "13rRUNvgz9V", "__typename": "AdjacentArticleType" }, "next": { "fno": "07451283", "articleId": "13rRUxC0Sw0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRXt", "name": "ttg201707-07482721s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07482721s1.zip", "extension": "zip", "size": "19.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNBhpS5I", "title": "Nov.", "year": "2013", "issueNum": "11", "idPrefix": "tp", "pubType": "journal", "volume": "35", "label": "Nov.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy3xY3N", "doi": "10.1109/TPAMI.2013.87", "abstract": "Geometric 3D reasoning at the level of objects has received renewed attention recently in the context of visual scene understanding. The level of geometric detail, however, is typically limited to qualitative representations or coarse boxes. This is linked to the fact that today's object class detectors are tuned toward robust 2D matching rather than accurate 3D geometry, encouraged by bounding-box-based benchmarks such as Pascal VOC. In this paper, we revisit ideas from the early days of computer vision, namely, detailed, 3D geometric object class representations for recognition. These representations can recover geometrically far more accurate object hypotheses than just bounding boxes, including continuous estimates of object pose and 3D wireframes with relative 3D positions of object parts. In combination with robust techniques for shape description and inference, we outperform state-of-the-art results in monocular 3D pose estimation. In a series of experiments, we analyze our approach in detail and demonstrate novel applications enabled by such an object class representation, such as fine-grained categorization of cars and bicycles, according to their 3D geometry, and ultrawide baseline matching.", "abstracts": [ { "abstractType": "Regular", "content": "Geometric 3D reasoning at the level of objects has received renewed attention recently in the context of visual scene understanding. The level of geometric detail, however, is typically limited to qualitative representations or coarse boxes. This is linked to the fact that today's object class detectors are tuned toward robust 2D matching rather than accurate 3D geometry, encouraged by bounding-box-based benchmarks such as Pascal VOC. In this paper, we revisit ideas from the early days of computer vision, namely, detailed, 3D geometric object class representations for recognition. These representations can recover geometrically far more accurate object hypotheses than just bounding boxes, including continuous estimates of object pose and 3D wireframes with relative 3D positions of object parts. In combination with robust techniques for shape description and inference, we outperform state-of-the-art results in monocular 3D pose estimation. In a series of experiments, we analyze our approach in detail and demonstrate novel applications enabled by such an object class representation, such as fine-grained categorization of cars and bicycles, according to their 3D geometry, and ultrawide baseline matching.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Geometric 3D reasoning at the level of objects has received renewed attention recently in the context of visual scene understanding. The level of geometric detail, however, is typically limited to qualitative representations or coarse boxes. This is linked to the fact that today's object class detectors are tuned toward robust 2D matching rather than accurate 3D geometry, encouraged by bounding-box-based benchmarks such as Pascal VOC. In this paper, we revisit ideas from the early days of computer vision, namely, detailed, 3D geometric object class representations for recognition. These representations can recover geometrically far more accurate object hypotheses than just bounding boxes, including continuous estimates of object pose and 3D wireframes with relative 3D positions of object parts. In combination with robust techniques for shape description and inference, we outperform state-of-the-art results in monocular 3D pose estimation. In a series of experiments, we analyze our approach in detail and demonstrate novel applications enabled by such an object class representation, such as fine-grained categorization of cars and bicycles, according to their 3D geometry, and ultrawide baseline matching.", "title": "Detailed 3D Representations for Object Recognition and Modeling", "normalizedTitle": "Detailed 3D Representations for Object Recognition and Modeling", "fno": "ttp2013112608", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Three Dimensional Displays", "Solid Modeling", "Geometry", "Shape", "Computational Modeling", "Detectors", "Design Automation", "Ultrawide Baseline Matching", "3 D Representation", "Recognition", "Single Image 3 D Reconstruction", "Scene Understanding" ], "authors": [ { "givenName": "M. Zeeshan", "surname": "Zia", "fullName": "M. Zeeshan Zia", "affiliation": "Photogrammetry & Remote Sensing Lab., ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Stark", "fullName": "M. Stark", "affiliation": "Dept. of Comput. Sci., Stanford Univ., Stanford, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "B.", "surname": "Schiele", "fullName": "B. Schiele", "affiliation": "Comput. Vision & Multimodal Comput. Lab., Max-Planck-Inst. fur Inf., Saarbrucken, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "K.", "surname": "Schindler", "fullName": "K. Schindler", "affiliation": "Photogrammetry & Remote Sensing Lab., ETH Zurich, Zurich, Switzerland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "11", "pubDate": "2013-11-01 00:00:00", "pubType": "trans", "pages": "2608-2623", "year": "2013", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130294", "title": "Revisiting 3D geometric models for accurate object shape and pose", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130294/12OmNAOsMKB", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2013/3022/0/3022a554", "title": "3D Object Representations for Fine-Grained Categorization", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2013/3022a554/12OmNAgGwgs", "parentPublication": { "id": "proceedings/iccvw/2013/3022/0", "title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2015/6759/0/07301358", "title": "3D object class detection in the wild", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301358/12OmNAndiqi", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2013/4989/0/4989d326", "title": "Explicit Occlusion Modeling for 3D Object Class Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989d326/12OmNBlofQz", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032e632", "title": "2D-Driven 3D Object Detection in RGB-D Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032e632/12OmNzZEAoV", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118d678", "title": "Are Cars Just 3D Boxes? Jointly Estimating the 3D Shape of Multiple Objects", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118d678/12OmNzlUKmH", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/11/07053926", "title": "Multi-View and 3D Deformable Part Models", "doi": null, "abstractUrl": "/journal/tp/2015/11/07053926/13rRUxZ0o2N", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200d133", "title": "LIGA-Stereo: Learning LiDAR Geometry Aware Representations for Stereo-based 3D Detector", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200d133/1BmFAZXbK0g", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2022/7260/0/726000a190", "title": "PrimitivePose: 3D Bounding Box Prediction of Unseen Objects via Synthetic Geometric Primitives", "doi": null, "abstractUrl": "/proceedings-article/irc/2022/726000a190/1KckgZavSWk", "parentPublication": { "id": "proceedings/irc/2022/7260/0", "title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900c849", "title": "Accurate 3D Object Detection using Energy-Based Models", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900c849/1yJYf81F69O", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttp2013112592", "articleId": "13rRUNvgzjA", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttp2013112624", "articleId": "13rRUILc8gk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNANBZkf", "title": "January/February", "year": "2006", "issueNum": "01", "idPrefix": "cg", "pubType": "magazine", "volume": "26", "label": "January/February", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyhaIj5", "doi": "10.1109/MCG.2006.13", "abstract": "This articles presents an interface for quick and intuitive development of arbitrary, but physically correct, bidirectional reflectance distribution functions, or BRDFs. The interface, referred to as BRDF-Shop, gives artists the ability to create a BRDF through positioning and manipulating highlights on a spherical canvas. The authors develop a novel mapping between painted highlights and specular lobes of an extended Ward BRDF model. The implementation of BRDF Shop uses programmable graphics hardware to provide a real-time visualization of the material on a complex object in environment lighting.", "abstracts": [ { "abstractType": "Regular", "content": "This articles presents an interface for quick and intuitive development of arbitrary, but physically correct, bidirectional reflectance distribution functions, or BRDFs. The interface, referred to as BRDF-Shop, gives artists the ability to create a BRDF through positioning and manipulating highlights on a spherical canvas. The authors develop a novel mapping between painted highlights and specular lobes of an extended Ward BRDF model. The implementation of BRDF Shop uses programmable graphics hardware to provide a real-time visualization of the material on a complex object in environment lighting.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This articles presents an interface for quick and intuitive development of arbitrary, but physically correct, bidirectional reflectance distribution functions, or BRDFs. The interface, referred to as BRDF-Shop, gives artists the ability to create a BRDF through positioning and manipulating highlights on a spherical canvas. The authors develop a novel mapping between painted highlights and specular lobes of an extended Ward BRDF model. The implementation of BRDF Shop uses programmable graphics hardware to provide a real-time visualization of the material on a complex object in environment lighting.", "title": "BRDF-Shop: Creating Physically Correct Bidirectional Reflectance Distribution Functions", "normalizedTitle": "BRDF-Shop: Creating Physically Correct Bidirectional Reflectance Distribution Functions", "fno": "mcg2006010030", "hasPdf": true, "idPrefix": "cg", "keywords": [ "BRDF", "Materials Editing", "Human Computer Interfaces", "Painting", "Ward BRDF Model", "Modeling Interfaces", "Physically Based Reflection Models" ], "authors": [ { "givenName": "Mark", "surname": "Colbert", "fullName": "Mark Colbert", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Sumanta", "surname": "Pattanaik", "fullName": "Sumanta Pattanaik", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" }, { "givenName": "Jaroslav", "surname": "Kriv?nek", "fullName": "Jaroslav Kriv?nek", "affiliation": "University of Central Florida", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2006-01-01 00:00:00", "pubType": "mags", "pages": "30-36", "year": "2006", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacific-graphics/2010/4205/0/4205a054", "title": "Thread-Based BRDF Rendering on GPU", "doi": null, "abstractUrl": "/proceedings-article/pacific-graphics/2010/4205a054/12OmNweBUCO", "parentPublication": { "id": "proceedings/pacific-graphics/2010/4205/0", "title": "Pacific Conference on Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111824", "title": "Rational BRDF", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111824/13rRUwjGoFZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09444888", "title": "Estimating Homogeneous Data-Driven BRDF Parameters From a Reflectance Map Under Known Natural Lighting", "doi": null, "abstractUrl": "/journal/tg/2022/12/09444888/1u51y8PQCMU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2006010018", "articleId": "13rRUwjoNzC", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2006010037", "articleId": "13rRUxASujU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclo6", "title": "June", "year": "2020", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XreC6e", "doi": "10.1109/TVCG.2018.2886877", "abstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "abstracts": [ { "abstractType": "Regular", "content": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Material appearance of rendered objects depends on the underlying BRDF implementation used by rendering software packages. A lack of standards to exchange material parameters and data (between tools) means that artists in digital 3D prototyping and design, manually match the appearance of materials to a reference image. Since their effect on rendered output is often non-uniform and counter intuitive, selecting appropriate parameterisations for BRDF models is far from straightforward. We present a novel BRDF remapping technique, that automatically computes a mapping (BRDF Difference Probe) to match the appearance of a source material model to a target one. Through quantitative analysis, four user studies and psychometric scaling experiments, we validate our remapping framework and demonstrate that it yields a visually faithful remapping among analytical BRDFs. Most notably, our results show that even when the characteristics of the models are substantially different, such as in the case of a phenomenological model and a physically-based one, our remapped renderings are indistinguishable from the original source model.", "title": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping", "normalizedTitle": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping", "fno": "08576679", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Processing", "Rendering Computer Graphics", "Solid Modelling", "Visually Faithful Remapping", "Remapped Renderings", "Software Packages", "Cross Renderer Analytical BRDF Parameter Remapping", "BRDF Remapping Technique", "BRDF Difference Probe", "Digital 3 D Prototyping", "Rendering Computer Graphics", "Computational Modeling", "Lighting", "Measurement", "Probes", "Visualization", "Optimization", "BRDF", "SVBRDF", "Perceptual Validation", "Virtual Materials", "Surface Perception", "Parameter Remapping" ], "authors": [ { "givenName": "Dar'ya", "surname": "Guarnera", "fullName": "Dar'ya Guarnera", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Giuseppe Claudio", "surname": "Guarnera", "fullName": "Giuseppe Claudio Guarnera", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Matteo", "surname": "Toscani", "fullName": "Matteo Toscani", "affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Mashhuda", "surname": "Glencross", "fullName": "Mashhuda Glencross", "affiliation": "Pismo Software Ltd., Oxford, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Baihua", "surname": "Li", "fullName": "Baihua Li", "affiliation": "Computer Science, Loughborough University, Loughborough, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Jon Yngve", "surname": "Hardeberg", "fullName": "Jon Yngve Hardeberg", "affiliation": "Computer Science, Norwegian University of Science and Technology, Gjøvik, Norway", "__typename": "ArticleAuthorType" }, { "givenName": "Karl R.", "surname": "Gegenfurtner", "fullName": "Karl R. Gegenfurtner", "affiliation": "Psychology, Justus-Liebig-Universität Giessen, Giessen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2020-06-01 00:00:00", "pubType": "trans", "pages": "2258-2272", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/wmsvm/2010/7077/0/05558360", "title": "Modeling and Editing Isotropic BRDF", "doi": null, "abstractUrl": "/proceedings-article/wmsvm/2010/05558360/12OmNARiM3T", "parentPublication": { "id": "proceedings/wmsvm/2010/7077/0", "title": "2010 Second International Conference on Modeling, Simulation and Visualization Methods (WMSVM 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209c047", "title": "Effective Acquisition of Dense Anisotropic BRDF", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c047/12OmNqNXEsZ", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nswctc/2010/4011/1/4011a332", "title": "The Analysis of Global Illumination Rendering Based on BRDF", "doi": null, "abstractUrl": "/proceedings-article/nswctc/2010/4011a332/12OmNyvGynS", "parentPublication": { "id": "proceedings/nswctc/2010/4011/1", "title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d559", "title": "A Gaussian Process Latent Variable Model for BRDF Inference", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d559/12OmNzVoBvI", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/11/ttg2012111824", "title": "Rational BRDF", "doi": null, "abstractUrl": "/journal/tg/2012/11/ttg2012111824/13rRUwjGoFZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09678000", "title": "Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering", "doi": null, "abstractUrl": "/journal/tg/2023/04/09678000/1A4SuYWCI7K", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f416", "title": "Neural Voxel Renderer: Learning an Accurate and Controllable Rendering Tool", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f416/1m3nYbnokEM", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/04/09203787", "title": "Learning-Based Inverse Bi-Scale Material Fitting From Tabular BRDFs", "doi": null, "abstractUrl": "/journal/tg/2022/04/09203787/1nkyY8W8j1m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09444888", "title": "Estimating Homogeneous Data-Driven BRDF Parameters From a Reflectance Map Under Known Natural Lighting", "doi": null, "abstractUrl": "/journal/tg/2022/12/09444888/1u51y8PQCMU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/12/09623493", "title": "Invertible Neural BRDF for Object Inverse Rendering", "doi": null, "abstractUrl": "/journal/tp/2022/12/09623493/1yJT7tLzbi0", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08565948", "articleId": "17D45Wda7ec", "__typename": "AdjacentArticleType" }, "next": { "fno": "08554159", "articleId": "17D45WB0qbp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1js2FLKT2a4", "name": "ttg202006-08576679s1-supplemental_material.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08576679s1-supplemental_material.pdf", "extension": "pdf", "size": "54.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzICEFu", "title": "July-Sept.", "year": "2014", "issueNum": "03", "idPrefix": "ta", "pubType": "journal", "volume": "5", "label": "July-Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxE04s0", "doi": "10.1109/TAFFC.2014.2335740", "abstract": "This article analyses the issues pertaining to the simulation of joint attention with virtual humans. Gaze represents a powerful communication channel illustrated by the pivotal role of joint attention in social interactions. To our knowledge, there have been only few attempts to simulate gazing patterns associated with joint attention as a mean for developing empathic virtual agents. Eye-tracking technologies now enable creating non-invasive gaze-contingent systems that empower the user with the ability to lead a virtual human's focus of attention in real-time. Although gaze control can be deliberate, most of our visual behaviors in everyday life are not. This article reports empirical data suggesting that users only have partial awareness of controlling gaze-contingent displays. The technical challenges induced by detecting the user's focus of attention in virtual reality are reviewed and several solutions are compared. We designed and tested a platform for creating virtual humans endowed with the ability to follow the user's attention. The article discusses the advantages of simulating joint attention for improving interpersonal skills and user engagement. Joint attention plays a major role in the development of autism. The platform we designed is intended for research and treatment of autism and tests included participants with this disorder.", "abstracts": [ { "abstractType": "Regular", "content": "This article analyses the issues pertaining to the simulation of joint attention with virtual humans. Gaze represents a powerful communication channel illustrated by the pivotal role of joint attention in social interactions. To our knowledge, there have been only few attempts to simulate gazing patterns associated with joint attention as a mean for developing empathic virtual agents. Eye-tracking technologies now enable creating non-invasive gaze-contingent systems that empower the user with the ability to lead a virtual human's focus of attention in real-time. Although gaze control can be deliberate, most of our visual behaviors in everyday life are not. This article reports empirical data suggesting that users only have partial awareness of controlling gaze-contingent displays. The technical challenges induced by detecting the user's focus of attention in virtual reality are reviewed and several solutions are compared. We designed and tested a platform for creating virtual humans endowed with the ability to follow the user's attention. The article discusses the advantages of simulating joint attention for improving interpersonal skills and user engagement. Joint attention plays a major role in the development of autism. The platform we designed is intended for research and treatment of autism and tests included participants with this disorder.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article analyses the issues pertaining to the simulation of joint attention with virtual humans. Gaze represents a powerful communication channel illustrated by the pivotal role of joint attention in social interactions. To our knowledge, there have been only few attempts to simulate gazing patterns associated with joint attention as a mean for developing empathic virtual agents. Eye-tracking technologies now enable creating non-invasive gaze-contingent systems that empower the user with the ability to lead a virtual human's focus of attention in real-time. Although gaze control can be deliberate, most of our visual behaviors in everyday life are not. This article reports empirical data suggesting that users only have partial awareness of controlling gaze-contingent displays. The technical challenges induced by detecting the user's focus of attention in virtual reality are reviewed and several solutions are compared. We designed and tested a platform for creating virtual humans endowed with the ability to follow the user's attention. The article discusses the advantages of simulating joint attention for improving interpersonal skills and user engagement. Joint attention plays a major role in the development of autism. The platform we designed is intended for research and treatment of autism and tests included participants with this disorder.", "title": "Joint Attention Simulation Using Eye-Tracking and Virtual Humans", "normalizedTitle": "Joint Attention Simulation Using Eye-Tracking and Virtual Humans", "fno": "06851182", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Joints", "Visualization", "Variable Speed Drives", "Context", "Shape", "Autism", "Real Time Systems", "Handicapped Persons Special Needs", "Interaction Techniques", "Virtual Reality", "Evaluation Methodology" ], "authors": [ { "givenName": "Matthieu", "surname": "Courgeon", "fullName": "Matthieu Courgeon", "affiliation": "Lab-STICC, Université de Bretagne-Sud, 29238 Brest Cedex 3, France", "__typename": "ArticleAuthorType" }, { "givenName": "Gilles", "surname": "Rautureau", "fullName": "Gilles Rautureau", "affiliation": "Emotion Center, CNRS USR 3246, Hôpital de La Salpêtrière, 75651 Paris Cedex 13, France", "__typename": "ArticleAuthorType" }, { "givenName": "Jean-Claude", "surname": "Martin", "fullName": "Jean-Claude Martin", "affiliation": "LIMSI-CNRS, Université Paris-Sud, 91403 Orsay Cedex, France", "__typename": "ArticleAuthorType" }, { "givenName": "Ouriel", "surname": "Grynszpan", "fullName": "Ouriel Grynszpan", "affiliation": "Emotion Center, CNRS USR 3246, Université Pierre et Marie Curie, Hôpital de La Saplêtrière, 75651 Paris Cedex 13, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2014-07-01 00:00:00", "pubType": "trans", "pages": "238-250", "year": "2014", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ichi/2013/5089/0/5089a484", "title": "Can NAO Robot Improve Eye-Gaze Attention of Children with High Functioning Autism?", "doi": null, "abstractUrl": "/proceedings-article/ichi/2013/5089a484/12OmNAoDifg", "parentPublication": { "id": "proceedings/ichi/2013/5089/0", "title": "2013 IEEE International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032d287", "title": "Learning Visual Attention to Identify People with Autism Spectrum Disorder", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032d287/12OmNyQ7Ga7", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2015/9953/0/07344621", "title": "Cognitive state measurement from eye gaze analysis in an intelligent virtual reality driving system for autism intervention", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344621/12OmNyfdOR4", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446242", "title": "Towards Joint Attention Training for Children with ASD - a VR Game Approach and Eye Gaze Exploration", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446242/13bd1h03qOo", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/04/ttg2013040711", "title": "Understanding How Adolescents with Autism Respond to Facial Expressions in Virtual Reality Environments", "doi": null, "abstractUrl": "/journal/tg/2013/04/ttg2013040711/13rRUwcAqqf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2017/02/07495013", "title": "Cognitive Load Measurement in a Virtual Reality-Based Driving System for Autism Intervention", "doi": null, "abstractUrl": "/journal/ta/2017/02/07495013/13rRUwhpBMP", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2018/7123/0/08493421", "title": "Enable an Innovative Prolonged Exposure Therapy of Attention Deficits on Autism Spectrum through Adaptive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2018/08493421/14tNJuA5rDa", "parentPublication": { "id": "proceedings/vs-games/2018/7123/0", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a625", "title": "Visual Attention Modeling for Autism Spectrum Disorder by Semantic Features", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a625/1cJ0EqHvcT6", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090559", "title": "A Methodology of Eye Gazing Attention Determination for VR Training", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090559/1jIxoACmybu", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093515", "title": "Attention Flow: End-to-End Joint Attention Estimation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093515/1jPbfQ6P3a0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06778017", "articleId": "13rRUyY2937", "__typename": "AdjacentArticleType" }, "next": { "fno": "06840953", "articleId": "13rRUxBrGf4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqBKUfE", "title": "September-October", "year": "1998", "issueNum": "05", "idPrefix": "cg", "pubType": "magazine", "volume": "18", "label": "September-October", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyoPSRo", "doi": "10.1109/38.708560", "abstract": "At present, few systems possess the multiple functions required to build believable and recognizable real-time deformable humans. We describe our interactive system for building a virtual human, fitting texture to the body and head, and controlling skeleton motion. We first detail the complete animation framework, integrating all the virtual human modules. We then present the first of our two case studies: CyberTennis, where two virtual humans play real-time tennis within our Virtual Life Network Environment system. One real player is in Geneva and her opponent is in Lausanne. An autonomous virtual judge referees the game. The second application combines high-tech artistic choreography in a CyberDance performance shown in a Geneva exhibition hall as part of Computer Animation 97. In this performance, the movements of the choreographer are captured and paralleled, in real time, by the virtual robot counterpart.", "abstracts": [ { "abstractType": "Regular", "content": "At present, few systems possess the multiple functions required to build believable and recognizable real-time deformable humans. We describe our interactive system for building a virtual human, fitting texture to the body and head, and controlling skeleton motion. We first detail the complete animation framework, integrating all the virtual human modules. We then present the first of our two case studies: CyberTennis, where two virtual humans play real-time tennis within our Virtual Life Network Environment system. One real player is in Geneva and her opponent is in Lausanne. An autonomous virtual judge referees the game. The second application combines high-tech artistic choreography in a CyberDance performance shown in a Geneva exhibition hall as part of Computer Animation 97. In this performance, the movements of the choreographer are captured and paralleled, in real time, by the virtual robot counterpart.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "At present, few systems possess the multiple functions required to build believable and recognizable real-time deformable humans. We describe our interactive system for building a virtual human, fitting texture to the body and head, and controlling skeleton motion. We first detail the complete animation framework, integrating all the virtual human modules. We then present the first of our two case studies: CyberTennis, where two virtual humans play real-time tennis within our Virtual Life Network Environment system. One real player is in Geneva and her opponent is in Lausanne. An autonomous virtual judge referees the game. The second application combines high-tech artistic choreography in a CyberDance performance shown in a Geneva exhibition hall as part of Computer Animation 97. In this performance, the movements of the choreographer are captured and paralleled, in real time, by the virtual robot counterpart.", "title": "Real-Time Animation of Realistic Virtual Humans", "normalizedTitle": "Real-Time Animation of Realistic Virtual Humans", "fno": "mcg1998050042", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Realistic Virtual Humans", "Real Time Animation", "Virtual Human Collaborative Environment", "Deformations", "Autonomous Virtual Actors", "Cyber Tennis", "Cyber Dance" ], "authors": [ { "givenName": "Prem", "surname": "Kalra", "fullName": "Prem Kalra", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Nadia", "surname": "Magnenat-Thalmann", "fullName": "Nadia Magnenat-Thalmann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Laurent", "surname": "Moccozet", "fullName": "Laurent Moccozet", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Gael", "surname": "Sannier", "fullName": "Gael Sannier", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Amaury", "surname": "Aubel", "fullName": "Amaury Aubel", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Thalmann", "fullName": "Daniel Thalmann", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "05", "pubDate": "1998-09-01 00:00:00", "pubType": "mags", "pages": "42-56", "year": "1998", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "mcg1998050032", "articleId": "13rRUwhpBSu", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg1998050058", "articleId": "13rRUwInv6N", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxvO04Q", "title": "Jan.", "year": "2017", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "23", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwwJWFR", "doi": "10.1109/TVCG.2016.2598998", "abstract": "Glyphs are a powerful tool for visualizing second-order tensors in a variety of scientic data as they allow to encode physical behavior in geometric properties. Most existing techniques focus on symmetric tensors and exclude non-symmetric tensors where the eigenvectors can be non-orthogonal or complex. We present a new construction of 2d and 3d tensor glyphs based on piecewise rational curves and surfaces with the following properties: invariance to (a) isometries and (b) scaling, (c) direct encoding of all real eigenvalues and eigenvectors, (d) one-to-one relation between the tensors and glyphs, (e) glyph continuity under changing the tensor. We apply the glyphs to visualize the Jacobian matrix fields of a number of 2d and 3d vector fields.", "abstracts": [ { "abstractType": "Regular", "content": "Glyphs are a powerful tool for visualizing second-order tensors in a variety of scientic data as they allow to encode physical behavior in geometric properties. Most existing techniques focus on symmetric tensors and exclude non-symmetric tensors where the eigenvectors can be non-orthogonal or complex. We present a new construction of 2d and 3d tensor glyphs based on piecewise rational curves and surfaces with the following properties: invariance to (a) isometries and (b) scaling, (c) direct encoding of all real eigenvalues and eigenvectors, (d) one-to-one relation between the tensors and glyphs, (e) glyph continuity under changing the tensor. We apply the glyphs to visualize the Jacobian matrix fields of a number of 2d and 3d vector fields.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Glyphs are a powerful tool for visualizing second-order tensors in a variety of scientic data as they allow to encode physical behavior in geometric properties. Most existing techniques focus on symmetric tensors and exclude non-symmetric tensors where the eigenvectors can be non-orthogonal or complex. We present a new construction of 2d and 3d tensor glyphs based on piecewise rational curves and surfaces with the following properties: invariance to (a) isometries and (b) scaling, (c) direct encoding of all real eigenvalues and eigenvectors, (d) one-to-one relation between the tensors and glyphs, (e) glyph continuity under changing the tensor. We apply the glyphs to visualize the Jacobian matrix fields of a number of 2d and 3d vector fields.", "title": "Glyphs for General Second-Order 2D and 3D Tensors", "normalizedTitle": "Glyphs for General Second-Order 2D and 3D Tensors", "fno": "07539639", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Tensile Stress", "Eigenvalues And Eigenfunctions", "Two Dimensional Displays", "Jacobian Matrices", "Matrix Decomposition", "Symmetric Matrices", "Three Dimensional Displays", "Flow Visualization", "Glyph Based Techniques", "Tensor Field Data" ], "authors": [ { "givenName": "Tim", "surname": "Gerrits", "fullName": "Tim Gerrits", "affiliation": "Visual Computing group at the University of Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Rössl", "fullName": "Christian Rössl", "affiliation": "Visual Computing group at the University of Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Holger", "surname": "Theisel", "fullName": "Holger Theisel", "affiliation": "Visual Computing group at the University of Magdeburg, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2017-01-01 00:00:00", "pubType": "trans", "pages": "980-989", "year": "2017", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/27660001", "title": "2D Asymmetric Tensor Analysis", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660001/12OmNAY79q0", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2017/3914/0/07967195", "title": "Model-Driven Sparse CP Decomposition for Higher-Order Tensors", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2017/07967195/12OmNBeRtOY", "parentPublication": { "id": "proceedings/ipdps/2017/3914/0", "title": "2017 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532770", "title": "2D asymmetric tensor analysis", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532770/12OmNCw3z9K", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660004", "title": "HOT- Lines: Tracking Lines in Higher Order Tensor Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660004/12OmNwMXnqd", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532773", "title": "HOT-lines: tracking lines in higher order tensor fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532773/12OmNzWx07T", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpp/2014/5618/0/5618a261", "title": "CAST: Contraction Algorithm for Symmetric Tensors", "doi": null, "abstractUrl": "/proceedings-article/icpp/2014/5618a261/12OmNzzP5Be", "parentPublication": { "id": "proceedings/icpp/2014/5618/0", "title": "2014 43nd International Conference on Parallel Processing (ICPP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061595", "title": "Superquadric Glyphs for Symmetric Second-Order Tensors", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061595/13rRUxZzAhA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09737134", "title": "Visualizing Higher-Order 3D Tensors by Multipole Lines", "doi": null, "abstractUrl": "/journal/tg/5555/01/09737134/1BQidsAhMnS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/07/08967163", "title": "Visualization of 3D Stress Tensor Fields Using Superquadric Glyphs on Displacement Streamlines", "doi": null, "abstractUrl": "/journal/tg/2021/07/08967163/1gPjyn904OA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552927", "title": "Feature Curves and Surfaces of 3D Asymmetric Tensor Fields", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552927/1xic6oeRxnO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07539598", "articleId": "13rRUxC0SOY", "__typename": "AdjacentArticleType" }, "next": { "fno": "07536103", "articleId": "13rRUNvyakR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwpGgK8", "title": "Dec.", "year": "2014", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxZRbo1", "doi": "10.1109/TVCG.2014.2346319", "abstract": "We present a novel scheme for progressive rendering in interactive visualization. Static settings with respect to a certain image quality or frame rate are inherently incapable of delivering both high frame rates for rapid changes and high image quality for detailed investigation. Our novel technique flexibly adapts by steering the visualization process in three major degrees of freedom: when to terminate the refinement of a frame in the background and start a new one, when to display a frame currently computed, and how much resources to consume. We base these decisions on the correlation of the errors due to insufficient sampling and response delay, which we estimate separately using fast yet expressive heuristics. To automate the configuration of the steering behavior, we employ offline video quality analysis. We provide an efficient implementation of our scheme for the application of volume raycasting, featuring integrated GPU-accelerated image reconstruction and error estimation. Our implementation performs an integral handling of the changes due to camera transforms, transfer function adaptations, as well as the progression of the data to in time. Finally, the overall technique is evaluated with an expert study.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel scheme for progressive rendering in interactive visualization. Static settings with respect to a certain image quality or frame rate are inherently incapable of delivering both high frame rates for rapid changes and high image quality for detailed investigation. Our novel technique flexibly adapts by steering the visualization process in three major degrees of freedom: when to terminate the refinement of a frame in the background and start a new one, when to display a frame currently computed, and how much resources to consume. We base these decisions on the correlation of the errors due to insufficient sampling and response delay, which we estimate separately using fast yet expressive heuristics. To automate the configuration of the steering behavior, we employ offline video quality analysis. We provide an efficient implementation of our scheme for the application of volume raycasting, featuring integrated GPU-accelerated image reconstruction and error estimation. Our implementation performs an integral handling of the changes due to camera transforms, transfer function adaptations, as well as the progression of the data to in time. Finally, the overall technique is evaluated with an expert study.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel scheme for progressive rendering in interactive visualization. Static settings with respect to a certain image quality or frame rate are inherently incapable of delivering both high frame rates for rapid changes and high image quality for detailed investigation. Our novel technique flexibly adapts by steering the visualization process in three major degrees of freedom: when to terminate the refinement of a frame in the background and start a new one, when to display a frame currently computed, and how much resources to consume. We base these decisions on the correlation of the errors due to insufficient sampling and response delay, which we estimate separately using fast yet expressive heuristics. To automate the configuration of the steering behavior, we employ offline video quality analysis. We provide an efficient implementation of our scheme for the application of volume raycasting, featuring integrated GPU-accelerated image reconstruction and error estimation. Our implementation performs an integral handling of the changes due to camera transforms, transfer function adaptations, as well as the progression of the data to in time. Finally, the overall technique is evaluated with an expert study.", "title": "Interactive Progressive Visualization with Space-Time Error Control", "normalizedTitle": "Interactive Progressive Visualization with Space-Time Error Control", "fno": "06875936", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Rendering Computer Graphics", "Interactive Systems", "Image Resolution", "Quality Assessment", "Data Visualization", "Video Recording", "Optimization", "Interactive Volume Raycasting", "Progressive Visualization", "Error Based Frame Control" ], "authors": [ { "givenName": "Steffen", "surname": "Frey", "fullName": "Steffen Frey", "affiliation": ", University of Stuttgart", "__typename": "ArticleAuthorType" }, { "givenName": "Filip", "surname": "Sadlo", "fullName": "Filip Sadlo", "affiliation": ", University of Stuttgart", "__typename": "ArticleAuthorType" }, { "givenName": "Kwan-Liu", "surname": "Ma", "fullName": "Kwan-Liu Ma", "affiliation": ", UC Davis", "__typename": "ArticleAuthorType" }, { "givenName": "Thomas", "surname": "Ertl", "fullName": "Thomas Ertl", "affiliation": ", University of Stuttgart", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2014-12-01 00:00:00", "pubType": "trans", "pages": "2397-2406", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pg/2003/2028/0/20280059", "title": "Interactive Visualization of Complex Real-World Light Sources", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280059/12OmNA1mbeZ", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1995/7187/0/71870176", "title": "High-Speed Volume Rendering Using Redundant Block Compression", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1995/71870176/12OmNqBtiEl", "parentPublication": { "id": "proceedings/ieee-vis/1995/7187/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532799", "title": "Exploiting frame-to-frame coherence for accelerating high-quality volume raycasting on graphics hardware", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532799/12OmNrNh0LY", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2010/8420/0/05720358", "title": "Interactive Simulation and Visualization of Fluids with Surface Raycasting", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2010/05720358/12OmNwAKCLk", "parentPublication": { "id": "proceedings/sibgrapi/2010/8420/0", "title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890152", "title": "Minimisation of video downstream bit rate for large scale immersive video conferencing by utilising the perceptual variations of quality", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890152/12OmNxGALaX", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780029", "title": "Interactive Visualization of Particle-In-Cell Simulations", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780029/12OmNxHJ9sZ", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dagstuhl/1997/0503/0/05030233", "title": "InVIS - Interactive Visualization of Medical Data Sets", "doi": null, "abstractUrl": "/proceedings-article/dagstuhl/1997/05030233/12OmNy7h3aK", "parentPublication": { "id": "proceedings/dagstuhl/1997/0503/0", "title": "Dagstuhl '97 - Scientific Visualization Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660029", "title": "Exploiting Frame-to-Frame Coherence for Accelerating High-Quality Volume Raycasting on Graphics Hardware", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660029/12OmNzt0IIb", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0122", "title": "Interactive Level-of-Detail Selection Using Image-Based Quality Metric for Large Volume Visualization", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0122/13rRUILtJqL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a026", "title": "GPU-based Raycasting of Hermite Spline Tubes", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a026/1qRNBBpLeeI", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06876040", "articleId": "13rRUEgs2ts", "__typename": "AdjacentArticleType" }, "next": { "fno": "06875916", "articleId": "13rRUxlgxTl", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgR4", "name": "ttg201412-06875936s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201412-06875936s1.zip", "extension": "zip", "size": "41.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzvQI13", "title": "Oct.", "year": "2020", "issueNum": "10", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "19sOOqzUp7W", "doi": "10.1109/TVCG.2019.2912752", "abstract": "In a wide range of scientific fields, 3D datasets production capabilities have widely evolved in recent years, especially with the rapid increase in their sizes. As a result, many large-scale applications, including visualization or processing, have become challenging to address. A solution to this issue lies in providing out-of-core algorithms specifically designed to handle datasets significantly larger than memory. In this article, we present a new approach that extends the broad interactive addressing principles already established in the field of out-of-core volume rendering on GPUs to allow on-demand processing during the visualization stage. We propose a pipeline designed to manage data as regular 3D grids regardless of the underlying application. It relies on a caching approach with a virtual memory addressing system coupled to an efficient parallel management on GPU to provide efficient access to data in interactive time. It allows any visualization or processing application to leverage the flexibility of its structure by managing multi-modality datasets. Furthermore, we show that our system delivers good performance on a single standard PC with low memory budget on the GPU.", "abstracts": [ { "abstractType": "Regular", "content": "In a wide range of scientific fields, 3D datasets production capabilities have widely evolved in recent years, especially with the rapid increase in their sizes. As a result, many large-scale applications, including visualization or processing, have become challenging to address. A solution to this issue lies in providing out-of-core algorithms specifically designed to handle datasets significantly larger than memory. In this article, we present a new approach that extends the broad interactive addressing principles already established in the field of out-of-core volume rendering on GPUs to allow on-demand processing during the visualization stage. We propose a pipeline designed to manage data as regular 3D grids regardless of the underlying application. It relies on a caching approach with a virtual memory addressing system coupled to an efficient parallel management on GPU to provide efficient access to data in interactive time. It allows any visualization or processing application to leverage the flexibility of its structure by managing multi-modality datasets. Furthermore, we show that our system delivers good performance on a single standard PC with low memory budget on the GPU.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In a wide range of scientific fields, 3D datasets production capabilities have widely evolved in recent years, especially with the rapid increase in their sizes. As a result, many large-scale applications, including visualization or processing, have become challenging to address. A solution to this issue lies in providing out-of-core algorithms specifically designed to handle datasets significantly larger than memory. In this article, we present a new approach that extends the broad interactive addressing principles already established in the field of out-of-core volume rendering on GPUs to allow on-demand processing during the visualization stage. We propose a pipeline designed to manage data as regular 3D grids regardless of the underlying application. It relies on a caching approach with a virtual memory addressing system coupled to an efficient parallel management on GPU to provide efficient access to data in interactive time. It allows any visualization or processing application to leverage the flexibility of its structure by managing multi-modality datasets. Furthermore, we show that our system delivers good performance on a single standard PC with low memory budget on the GPU.", "title": "Interactive Visualization and On-Demand Processing of Large Volume Data: A Fully GPU-Based Out-of-Core Approach", "normalizedTitle": "Interactive Visualization and On-Demand Processing of Large Volume Data: A Fully GPU-Based Out-of-Core Approach", "fno": "08695851", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cache Storage", "Data Visualisation", "Graphics Processing Units", "Interactive Systems", "Rendering Computer Graphics", "Solid Modelling", "Out Of Core Volume Rendering", "Single Standard PC", "Multimodality Datasets Management", "Fully GPU Based Out Of Core Approach", "Low Memory Budget", "Efficient Parallel Management", "Virtual Memory Addressing System", "Caching Approach", "Regular 3 D Grids", "Broad Interactive Addressing Principles", "Large Scale Applications", "Large Volume Data", "On Demand Processing", "Interactive Visualization", "Graphics Processing Units", "Data Visualization", "Pipelines", "Memory Management", "Three Dimensional Displays", "Rendering Computer Graphics", "Casting", "GPU", "Caching System", "Out Of Core Data Management", "Large Data", "Interactive Visualization", "On Demand Processing" ], "authors": [ { "givenName": "Jonathan", "surname": "Sarton", "fullName": "Jonathan Sarton", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Courilleau", "fullName": "Nicolas Courilleau", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" }, { "givenName": "Yannick", "surname": "Remion", "fullName": "Yannick Remion", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" }, { "givenName": "Laurent", "surname": "Lucas", "fullName": "Laurent Lucas", "affiliation": "Université de Reims Champagne-Ardenne, Reims, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2020-10-01 00:00:00", "pubType": "trans", "pages": "3008-3021", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2003/2030/0/20300038", "title": "Acceleration Techniques for GPU-based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/unesst/2015/9852/0/9852a022", "title": "Increasing GPU-Speedup of Volume Rendering for Images with High Complexity", "doi": null, "abstractUrl": "/proceedings-article/unesst/2015/9852a022/12OmNCmpcES", "parentPublication": { "id": "proceedings/unesst/2015/9852/0", "title": "2015 8th International Conference on u- and e- Service, Science and Technology (UNESST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2012/2049/0/06266300", "title": "A flexible Java GPU-enhanced visualization framework and its applications", "doi": null, "abstractUrl": "/proceedings-article/cbms/2012/06266300/12OmNwHQBby", "parentPublication": { "id": "proceedings/cbms/2012/2049/0", "title": "2012 25th IEEE International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/unesst/2015/9852/0/9852a018", "title": "Complexity Evaluation of CT-Images for GPU-Based Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/unesst/2015/9852a018/12OmNxeutee", "parentPublication": { "id": "proceedings/unesst/2015/9852/0", "title": "2015 8th International Conference on u- and e- Service, Science and Technology (UNESST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2014/5215/0/07013209", "title": "Out-of-core visualization of time-varying hybrid-grid volume data", "doi": null, "abstractUrl": "/proceedings-article/ldav/2014/07013209/12OmNyoiZc2", "parentPublication": { "id": "proceedings/ldav/2014/5215/0", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/10/ttg2013101732", "title": "Octree Rasterization: Accelerating High-Quality Out-of-Core GPU Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2013/10/ttg2013101732/13rRUwvBy8T", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/05/mcg2008050066", "title": "Dynamic Shader Generation for GPU-Based Multi-Volume Ray Casting", "doi": null, "abstractUrl": "/magazine/cg/2008/05/mcg2008050066/13rRUxN5evD", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08100972", "title": "Multiresolution Volume Filtering in the Tensor Compressed Domain", "doi": null, "abstractUrl": "/journal/tg/2018/10/08100972/13rRUxjQypg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09079657", "title": "Distributed Interactive Visualization Using GPU-Optimized Spark", "doi": null, "abstractUrl": "/journal/tg/2021/09/09079657/1jmVbp8XqZa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2021/3283/0/328300a043", "title": "GPU-based Image Compression for Efficient Compositing in Distributed Rendering Applications", "doi": null, "abstractUrl": "/proceedings-article/ldav/2021/328300a043/1zdPDTXc4hy", "parentPublication": { "id": "proceedings/ldav/2021/3283/0", "title": "2021 IEEE 11th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09184389", "articleId": "1mLIesC5z0Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "08700299", "articleId": "19xNwtl1N4s", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyQphgZ", "title": "Jan.-Feb.", "year": "2016", "issueNum": "01", "idPrefix": "cs", "pubType": "magazine", "volume": "18", "label": "Jan.-Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUynZ5rP", "doi": "10.1109/MCSE.2016.7", "abstract": "The gap between large-scale data production rate and the rate of generation of data-driven scientific insights has led to an analytical bottleneck in scientific domains like climate, biology, and so on. This is primarily due to the lack of innovative analytical tools that can help scientists efficiently analyze and explore alternative hypotheses about the data and communicate their findings effectively to a broad audience. In this article, by reflecting on a set of successful collaborative research efforts between with a group of climate scientists and visualization researchers, the authors introspect how interactive visualization can help reduce the analytical bottleneck for domain scientists.", "abstracts": [ { "abstractType": "Regular", "content": "The gap between large-scale data production rate and the rate of generation of data-driven scientific insights has led to an analytical bottleneck in scientific domains like climate, biology, and so on. This is primarily due to the lack of innovative analytical tools that can help scientists efficiently analyze and explore alternative hypotheses about the data and communicate their findings effectively to a broad audience. In this article, by reflecting on a set of successful collaborative research efforts between with a group of climate scientists and visualization researchers, the authors introspect how interactive visualization can help reduce the analytical bottleneck for domain scientists.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The gap between large-scale data production rate and the rate of generation of data-driven scientific insights has led to an analytical bottleneck in scientific domains like climate, biology, and so on. This is primarily due to the lack of innovative analytical tools that can help scientists efficiently analyze and explore alternative hypotheses about the data and communicate their findings effectively to a broad audience. In this article, by reflecting on a set of successful collaborative research efforts between with a group of climate scientists and visualization researchers, the authors introspect how interactive visualization can help reduce the analytical bottleneck for domain scientists.", "title": "Reducing the Analytical Bottleneck for Domain Scientists: Lessons from a Climate Data Visualization Case Study", "normalizedTitle": "Reducing the Analytical Bottleneck for Domain Scientists: Lessons from a Climate Data Visualization Case Study", "fno": "mcs2016010092", "hasPdf": true, "idPrefix": "cs", "keywords": [ "Data Visualization", "Meteorology", "Analytical Models", "Data Models", "Biological System Modeling", "Computational Modeling", "Visualization", "Scientific Computing", "Data Visualization", "Big Data", "Simulation" ], "authors": [ { "givenName": "Aritra", "surname": "Dasgupta", "fullName": "Aritra Dasgupta", "affiliation": "Pacific Northwest National Laboratory", "__typename": "ArticleAuthorType" }, { "givenName": "Jorge", "surname": "Poco", "fullName": "Jorge Poco", "affiliation": "University of Washington, Seattle", "__typename": "ArticleAuthorType" }, { "givenName": "Enrico", "surname": "Bertini", "fullName": "Enrico Bertini", "affiliation": "New York University", "__typename": "ArticleAuthorType" }, { "givenName": "Claudio T.", "surname": "Silva", "fullName": "Claudio T. Silva", "affiliation": "New York University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2016-01-01 00:00:00", "pubType": "mags", "pages": "92-100", "year": "2016", "issn": "1521-9615", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ldav/2014/5215/0/07013208", "title": "Visual analytics of large-scale climate model data", "doi": null, "abstractUrl": "/proceedings-article/ldav/2014/07013208/12OmNA14Aga", "parentPublication": { "id": "proceedings/ldav/2014/5215/0", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2011/9618/0/05718551", "title": "A Decision Support System for Urban Climate Change Adaptation", "doi": null, "abstractUrl": "/proceedings-article/hicss/2011/05718551/12OmNvlg8hu", "parentPublication": { "id": "proceedings/hicss/2011/9618/0", "title": "2011 44th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2015/9926/0/07363973", "title": "Climate model diagnostic analyzer", "doi": null, "abstractUrl": "/proceedings-article/big-data/2015/07363973/12OmNwcl7BL", "parentPublication": { "id": "proceedings/big-data/2015/9926/0", "title": "2015 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004058", "title": "Information Visualization in Climate Research", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004058/12OmNyO8tVC", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2015/01/mmu2015010041", "title": "Designing an Interactive Audio Interface for Climate Science", "doi": null, "abstractUrl": "/magazine/mu/2015/01/mmu2015010041/13rRUwdrdMQ", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060019", "title": "Scalable Multivariate Time-Series Models for Climate Informatics", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060019/13rRUxD9h19", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539323", "title": "Multi-Resolution Climate Ensemble Parameter Analysis with Nested Parallel Coordinates Plots", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539323/13rRUxNEqPZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060027", "title": "Identifying Physical Interactions from Climate Data: Challenges and Opportunities", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060027/13rRUxjyWZB", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060043", "title": "Can Topic Modeling Shed Light on Climate Extremes?", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060043/13rRUyYBlcf", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/09/07061479", "title": "Bridging Theory with Practice: An Exploratory Study of Visualization Use and Design for Climate Model Comparison", "doi": null, "abstractUrl": "/journal/tg/2015/09/07061479/13rRUyfbwqL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcs2016010088", "articleId": "13rRUEgs2GQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcs2016010102", "articleId": "13rRUILLkzn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyGtjf5", "title": "April", "year": "2019", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17YCN4oTjd6", "doi": "10.1109/TVCG.2018.2825424", "abstract": "This paper presents an approach for the interactive visualization, exploration and interpretation of large multivariate time series. Interesting patterns in such datasets usually appear as periodic or recurrent behavior often caused by the interaction between variables. To identify such patterns, we summarize the data as conceptual states, modeling temporal dynamics as transitions between the states. This representation can visualize large datasets with potentially billions of examples. We extend the representation to multiple spatial granularities allowing the user to find patterns on multiple scales. The result is an interactive web-based tool called StreamStory. StreamStory couples the abstraction with several tools that map the abstractions back to domain-specific concepts using techniques from statistics and machine learning. It is aimed at users who are not experts in data analytics, minimizing the number of parameters to configure out-of-the-box. We use three real-world datasets to demonstrate how StreamStory can be used to perform three main visual analytics tasks: identify the main states of a complex system and map them back to data-specific concepts, find high-level and long-term periodic behavior and traverse the scales to identify which scales exhibit interesting phenomena. We find and interpret several known, as well as previously unknown patterns in these datasets.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents an approach for the interactive visualization, exploration and interpretation of large multivariate time series. Interesting patterns in such datasets usually appear as periodic or recurrent behavior often caused by the interaction between variables. To identify such patterns, we summarize the data as conceptual states, modeling temporal dynamics as transitions between the states. This representation can visualize large datasets with potentially billions of examples. We extend the representation to multiple spatial granularities allowing the user to find patterns on multiple scales. The result is an interactive web-based tool called StreamStory. StreamStory couples the abstraction with several tools that map the abstractions back to domain-specific concepts using techniques from statistics and machine learning. It is aimed at users who are not experts in data analytics, minimizing the number of parameters to configure out-of-the-box. We use three real-world datasets to demonstrate how StreamStory can be used to perform three main visual analytics tasks: identify the main states of a complex system and map them back to data-specific concepts, find high-level and long-term periodic behavior and traverse the scales to identify which scales exhibit interesting phenomena. We find and interpret several known, as well as previously unknown patterns in these datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents an approach for the interactive visualization, exploration and interpretation of large multivariate time series. Interesting patterns in such datasets usually appear as periodic or recurrent behavior often caused by the interaction between variables. To identify such patterns, we summarize the data as conceptual states, modeling temporal dynamics as transitions between the states. This representation can visualize large datasets with potentially billions of examples. We extend the representation to multiple spatial granularities allowing the user to find patterns on multiple scales. The result is an interactive web-based tool called StreamStory. StreamStory couples the abstraction with several tools that map the abstractions back to domain-specific concepts using techniques from statistics and machine learning. It is aimed at users who are not experts in data analytics, minimizing the number of parameters to configure out-of-the-box. We use three real-world datasets to demonstrate how StreamStory can be used to perform three main visual analytics tasks: identify the main states of a complex system and map them back to data-specific concepts, find high-level and long-term periodic behavior and traverse the scales to identify which scales exhibit interesting phenomena. We find and interpret several known, as well as previously unknown patterns in these datasets.", "title": "StreamStory: Exploring Multivariate Time Series on Multiple Scales", "normalizedTitle": "StreamStory: Exploring Multivariate Time Series on Multiple Scales", "fno": "08340877", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Learning Artificial Intelligence", "Pattern Classification", "Time Series", "Multivariate Time Series", "Recurrent Behavior", "Temporal Dynamics", "Multiple Spatial Granularities", "Stream Story Couples", "Domain Specific Concepts", "Data Analytics", "Real World Datasets", "Main Visual Analytics Tasks", "Data Specific Concepts", "Long Term Periodic Behavior", "Unknown Patterns", "Interactive Web Based Tool", "Data Visualization", "Time Series Analysis", "Tools", "Data Models", "Markov Processes", "Clutter", "Meteorology", "Time Series Analysis", "Visualization Systems And Software", "Data And Knowledge Visualization", "Markov Processes", "Multivariate Visualization", "Data Mining" ], "authors": [ { "givenName": "Luka", "surname": "Stopar", "fullName": "Luka Stopar", "affiliation": "Jozef Stefan Institute, Jozef Stefan International Postgraduate School, Ljubljana, Slovenia", "__typename": "ArticleAuthorType" }, { "givenName": "Primoz", "surname": "Skraba", "fullName": "Primoz Skraba", "affiliation": "Jozef Stefan Institute, Ljubljana, Slovenia", "__typename": "ArticleAuthorType" }, { "givenName": "Marko", "surname": "Grobelnik", "fullName": "Marko Grobelnik", "affiliation": "Jozef Stefan Institute, Ljubljana, Slovenia", "__typename": "ArticleAuthorType" }, { "givenName": "Dunja", "surname": "Mladenic", "fullName": "Dunja Mladenic", "affiliation": "Jozef Stefan Institute, Jozef Stefan International Postgraduate School, Ljubljana, Slovenia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "04", "pubDate": "2019-04-01 00:00:00", "pubType": "trans", "pages": "1788-1802", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tk/2005/07/k0875", "title": "Periodicity Detection in Time Series Databases", "doi": null, "abstractUrl": "/journal/tk/2005/07/k0875/13rRUwdrdL0", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122899", "title": "A Visual Analytics Approach to Multiscale Exploration of Environmental Time Series", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122899/13rRUxDqS8g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2021/1815/0/181500a083", "title": "Sequence Attention for Multivariate Time Series Forecasting", "doi": null, "abstractUrl": "/proceedings-article/dsc/2021/181500a083/1CuhWbfuEYU", "parentPublication": { "id": "proceedings/dsc/2021/1815/0", "title": "2021 IEEE Sixth International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2023/7578/0/757800a269", "title": "Temporal Convolutional Network-Based Time-Series Segmentation", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2023/757800a269/1LFLAQcKyWs", "parentPublication": { "id": "proceedings/bigcomp/2023/7578/0", "title": "2023 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10105527", "title": "Multi-Scale Adaptive Graph Neural Network for Multivariate Time Series Forecasting", "doi": null, "abstractUrl": "/journal/tk/5555/01/10105527/1MtgpjufAOc", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600b020", "title": "Discovering Periodic Patterns in Irregular Time Series", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600b020/1gAwZcMpVle", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09224197", "title": "MultiSegVA: Using Visual Analytics to Segment Biologging Time Series on Multiple Scales", "doi": null, "abstractUrl": "/journal/tg/2021/02/09224197/1nV6Z3fZjUY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b394", "title": "Learning Periods from Incomplete Multivariate Time Series", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b394/1r54HRKRr56", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a328", "title": "Exploring Time-Series Through Force-Directed Timelines", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a328/1rSR9aLY29W", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09477164", "title": "Modeling Multiple Temporal Scales of Full-body Movements for Emotion Classification", "doi": null, "abstractUrl": "/journal/ta/5555/01/09477164/1v2LXMHQUAo", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08327892", "articleId": "17YCN5yqdZn", "__typename": "AdjacentArticleType" }, "next": { "fno": "08334579", "articleId": "17YCN2ZsR9e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1DGRZtSiOdy", "title": "July", "year": "2022", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1ovEVPWDI4g", "doi": "10.1109/TVCG.2020.3035823", "abstract": "After demonstrating that rainbow colors are still commonly used in scientific publications, we comparatively evaluate the rainbow and sequential color schemes on choropleth and isarithmic maps in an empirical user study with 544 participants to examine if a) people intuitively associate <italic>order</italic> for the colors in these schemes, b) they can successfully conduct perceptual and semantic map reading and recall tasks with quantitative data where order may have implicit or explicit importance. We find that there is little to no agreement in ordering of rainbow colors while sequential colors are indeed intuitively ordered by the participants with a strong <italic>dark is more bias</italic>. Sequential colors facilitate most quantitative map reading tasks better than the rainbow colors, whereas rainbow colors competitively facilitate extracting specific values from a map, and may support hue recall better than sequential. We thus contribute to <italic>dark- versus light is more bias</italic> debate, demonstrate why and when rainbow colors may impair performance, and add further nuance to our understanding of this highly popular, yet highly criticized color scheme.", "abstracts": [ { "abstractType": "Regular", "content": "After demonstrating that rainbow colors are still commonly used in scientific publications, we comparatively evaluate the rainbow and sequential color schemes on choropleth and isarithmic maps in an empirical user study with 544 participants to examine if a) people intuitively associate <italic>order</italic> for the colors in these schemes, b) they can successfully conduct perceptual and semantic map reading and recall tasks with quantitative data where order may have implicit or explicit importance. We find that there is little to no agreement in ordering of rainbow colors while sequential colors are indeed intuitively ordered by the participants with a strong <italic>dark is more bias</italic>. Sequential colors facilitate most quantitative map reading tasks better than the rainbow colors, whereas rainbow colors competitively facilitate extracting specific values from a map, and may support hue recall better than sequential. We thus contribute to <italic>dark- versus light is more bias</italic> debate, demonstrate why and when rainbow colors may impair performance, and add further nuance to our understanding of this highly popular, yet highly criticized color scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "After demonstrating that rainbow colors are still commonly used in scientific publications, we comparatively evaluate the rainbow and sequential color schemes on choropleth and isarithmic maps in an empirical user study with 544 participants to examine if a) people intuitively associate order for the colors in these schemes, b) they can successfully conduct perceptual and semantic map reading and recall tasks with quantitative data where order may have implicit or explicit importance. We find that there is little to no agreement in ordering of rainbow colors while sequential colors are indeed intuitively ordered by the participants with a strong dark is more bias. Sequential colors facilitate most quantitative map reading tasks better than the rainbow colors, whereas rainbow colors competitively facilitate extracting specific values from a map, and may support hue recall better than sequential. We thus contribute to dark- versus light is more bias debate, demonstrate why and when rainbow colors may impair performance, and add further nuance to our understanding of this highly popular, yet highly criticized color scheme.", "title": "Rainbow Dash: Intuitiveness, Interpretability and Memorability of the Rainbow Color Scheme in Visualization", "normalizedTitle": "Rainbow Dash: Intuitiveness, Interpretability and Memorability of the Rainbow Color Scheme in Visualization", "fno": "09249052", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Colour Displays", "Data Visualisation", "Feature Extraction", "Human Computer Interaction", "Human Factors", "Image Colour Analysis", "Medical Information Systems", "Yet Highly Criticized Color Scheme", "Rainbow Dash", "Rainbow Color Scheme", "Rainbow Colors", "Sequential Color Schemes", "People Intuitively Associate Order", "Semantic Map Reading", "Recall Tasks", "Sequential Colors", "Highly Popular Criticized Color Scheme", "Image Color Analysis", "Task Analysis", "Data Visualization", "Remote Sensing", "Visualization", "Licenses", "Anomaly Detection", "Color", "Visualization", "Colormap", "Color Perception", "Visual Design" ], "authors": [ { "givenName": "Izabela M.", "surname": "Gołbiowska", "fullName": "Izabela M. Gołbiowska", "affiliation": "Faculty of Geography and Regional Studies, University of Warsaw, Warszawa, Poland", "__typename": "ArticleAuthorType" }, { "givenName": "Arzu", "surname": "Çöltekin", "fullName": "Arzu Çöltekin", "affiliation": "Institute of Interactive Technologies, University of Applied Sciences and Arts Northwestern Switzerland, Brugg-Windisch, Switzerland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "07", "pubDate": "2022-07-01 00:00:00", "pubType": "trans", "pages": "2722-2733", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/avss/2013/0703/0/06636664", "title": "Tree-based vehicle color classification using spatial features on publicly available continuous data", "doi": null, "abstractUrl": "/proceedings-article/avss/2013/06636664/12OmNz5JBNp", "parentPublication": { "id": "proceedings/avss/2013/0703/0", "title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2007/02/mcg2007020014", "title": "Rainbow Color Map (Still) Considered Harmful", "doi": null, "abstractUrl": "/magazine/cg/2007/02/mcg2007020014/13rRUxYrbOE", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08494817", "title": "The Effect of Color Scales on Climate Scientists&#x2019; Objective and Subjective Performance in Spatial Data Analysis Tasks", "doi": null, "abstractUrl": "/journal/tg/2020/03/08494817/14s8M4gkNi0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08454346", "title": "Mapping Color to Meaning in Colormap Data Visualizations", "doi": null, "abstractUrl": "/journal/tg/2019/01/08454346/17D45VsBU7I", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a561", "title": "A New Diagram for Amino Acids: User Study Comparing Rainbow Boxes to Venn/Euler Diagram", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a561/17D45WaTkc1", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/09/08637778", "title": "Measuring the Effects of Scalar and Spherical Colormaps on Ensembles of DMRI Tubes", "doi": null, "abstractUrl": "/journal/tg/2020/09/08637778/17D45WrVgbO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08805429", "title": "Color Crafting: Automating the Construction of Designer Quality Color Ramps", "doi": null, "abstractUrl": "/journal/tg/2020/01/08805429/1cG4w5XPNUQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150800", "title": "Hierarchical Color Learning in Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150800/1lPHawaBFg4", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2007/02/04118486", "title": "Rainbow Color Map (Still) Considered Harmful", "doi": null, "abstractUrl": "/magazine/cg/2007/02/04118486/1oCjGn4Rpss", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/08/09293392", "title": "A Perceptual Color-Matching Method for Examining Color Blending in Augmented Reality Head-Up Display Graphics", "doi": null, "abstractUrl": "/journal/tg/2022/08/09293392/1pyomiXbJQs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09266764", "articleId": "1oZxEim72LK", "__typename": "AdjacentArticleType" }, "next": { "fno": "09258424", "articleId": "1oHhYFwKrM4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1DGScAbrTEs", "name": "ttg202207-09249052s1-supp1-3035823.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202207-09249052s1-supp1-3035823.pdf", "extension": "pdf", "size": "910 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNBTJIK6", "title": "June", "year": "2013", "issueNum": "02", "idPrefix": "ci", "pubType": "journal", "volume": "5", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxk89gH", "doi": "10.1109/TCIAIG.2013.2253778", "abstract": "Brain-computer interfaces (BCIs) are not only being developed to aid disabled individuals with motor substitution, motor recovery, and novel communication possibilities, but also as a modality for healthy users in entertainment and gaming. This study investigates whether the incorporation of a BCI in the popular game World of Warcraft (WoW) has effects on the user experience. A BCI control channel based on parietal alpha band power is used to control the shape and function of the avatar in the game. In the experiment, participants (n=42) , a mix of experienced and inexperienced WoW players, played with and without the use of BCI in a within-subjects design. Participants themselves could indicate when they wanted to stop playing. Actual and estimated duration was recorded and questionnaires on presence and control were administered. Afterwards, oral interviews were taken. No difference in actual duration was found between conditions. Results indicate that the difference between estimated and actual duration was not related to user experience but was person specific. When using a BCI, control and involvement were rated lower. But BCI control did not significantly decrease fun. During interviews, experienced players stated that they saw potential in the application of BCIs in games with complex interfaces such as WoW. This study suggests that BCI as an additional control can be as much fun and natural to use as keyboard/mouse control, even if the amount of control is limited.", "abstracts": [ { "abstractType": "Regular", "content": "Brain-computer interfaces (BCIs) are not only being developed to aid disabled individuals with motor substitution, motor recovery, and novel communication possibilities, but also as a modality for healthy users in entertainment and gaming. This study investigates whether the incorporation of a BCI in the popular game World of Warcraft (WoW) has effects on the user experience. A BCI control channel based on parietal alpha band power is used to control the shape and function of the avatar in the game. In the experiment, participants (n=42) , a mix of experienced and inexperienced WoW players, played with and without the use of BCI in a within-subjects design. Participants themselves could indicate when they wanted to stop playing. Actual and estimated duration was recorded and questionnaires on presence and control were administered. Afterwards, oral interviews were taken. No difference in actual duration was found between conditions. Results indicate that the difference between estimated and actual duration was not related to user experience but was person specific. When using a BCI, control and involvement were rated lower. But BCI control did not significantly decrease fun. During interviews, experienced players stated that they saw potential in the application of BCIs in games with complex interfaces such as WoW. This study suggests that BCI as an additional control can be as much fun and natural to use as keyboard/mouse control, even if the amount of control is limited.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Brain-computer interfaces (BCIs) are not only being developed to aid disabled individuals with motor substitution, motor recovery, and novel communication possibilities, but also as a modality for healthy users in entertainment and gaming. This study investigates whether the incorporation of a BCI in the popular game World of Warcraft (WoW) has effects on the user experience. A BCI control channel based on parietal alpha band power is used to control the shape and function of the avatar in the game. In the experiment, participants (n=42) , a mix of experienced and inexperienced WoW players, played with and without the use of BCI in a within-subjects design. Participants themselves could indicate when they wanted to stop playing. Actual and estimated duration was recorded and questionnaires on presence and control were administered. Afterwards, oral interviews were taken. No difference in actual duration was found between conditions. Results indicate that the difference between estimated and actual duration was not related to user experience but was person specific. When using a BCI, control and involvement were rated lower. But BCI control did not significantly decrease fun. During interviews, experienced players stated that they saw potential in the application of BCIs in games with complex interfaces such as WoW. This study suggests that BCI as an additional control can be as much fun and natural to use as keyboard/mouse control, even if the amount of control is limited.", "title": "Experiencing BCI Control in a Popular Computer Game", "normalizedTitle": "Experiencing BCI Control in a Popular Computer Game", "fno": "06484110", "hasPdf": true, "idPrefix": "ci", "keywords": [ "Avatars", "Brain Computer Interfaces", "Computer Games", "Brain Computer Interfaces", "Disabled Persons", "Motor Substitution", "Motor Recovery", "Communication Possibilities", "Healthy User Modality", "World Of Warcraft Computer Game", "Wo W Computer Game Interfaces", "User Experience", "BCI Control Channel", "Parietal Alpha Band Power", "Avatar Function Control", "Avatar Shape Control", "Experienced Wo W Players", "Inexperienced Wo W Players", "Games", "Shape", "Electroencephalography", "Headphones", "Interviews", "Keyboards", "Mice", "Brain Computer Interface BCI", "Games", "Human Factors", "Presence", "User Experience" ], "authors": [ { "givenName": "Bram", "surname": "van de Laar", "fullName": "Bram van de Laar", "affiliation": "Human Media Interaction Group, Faculty of Electrical Engineering, Mathematics and Computer Science (EEMCS), University of Twente, Enschede, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Hayrettin", "surname": "Gürkök", "fullName": "Hayrettin Gürkök", "affiliation": "Human Media Interaction Group, Faculty of Electrical Engineering, Mathematics and Computer Science (EEMCS), University of Twente, Enschede, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Danny", "surname": "Plass-Oude Bos", "fullName": "Danny Plass-Oude Bos", "affiliation": "Human Media Interaction Group, Faculty of Electrical Engineering, Mathematics and Computer Science (EEMCS), University of Twente, Enschede, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Mannes", "surname": "Poel", "fullName": "Mannes Poel", "affiliation": "Human Media Interaction Group, Faculty of Electrical Engineering, Mathematics and Computer Science (EEMCS), University of Twente, Enschede, The Netherlands", "__typename": "ArticleAuthorType" }, { "givenName": "Anton", "surname": "Nijholt", "fullName": "Anton Nijholt", "affiliation": "Human Media Interaction Group, Faculty of Electrical Engineering, Mathematics and Computer Science (EEMCS), University of Twente, Enschede, The Netherlands", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2013-04-01 00:00:00", "pubType": "trans", "pages": "176-184", "year": "2013", "issn": "1943-068X", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363081", "title": "Feature Selection in Brain Computer Interface Using Genetics Method", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363081/12OmNAkWvIK", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a488", "title": "Examining User Experiences through a Multimodal BCI Puzzle Game", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a488/12OmNB0X8xz", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349478", "title": "Emotional brain-computer interfaces", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349478/12OmNBKmXj3", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2017/1710/0/1710a440", "title": "Visual Versus Kinesthetic Motor Imagery for BCI Control of Robotic Arms (Mercury 2.0)", "doi": null, "abstractUrl": "/proceedings-article/cbms/2017/1710a440/12OmNBRsVuI", "parentPublication": { "id": "proceedings/cbms/2017/1710/0", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2012/1247/0/06180932", "title": "New input modalities for modern game design and virtual embodiment", "doi": null, "abstractUrl": "/proceedings-article/vr/2012/06180932/12OmNCbU3bC", "parentPublication": { "id": "proceedings/vr/2012/1247/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2016/4607/0/4607a102", "title": "Brain Computer Interface (BCI) Applications: Privacy Threats and Countermeasures", "doi": null, "abstractUrl": "/proceedings-article/cic/2016/4607a102/12OmNCcKQhF", "parentPublication": { "id": "proceedings/cic/2016/4607/0", "title": "2016 IEEE 2nd International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2017/1710/0/1710a262", "title": "Commercial BCI Control and Functional Brain Networks in Spinal Cord Injury: A Proof-of-Concept", "doi": null, "abstractUrl": "/proceedings-article/cbms/2017/1710a262/12OmNwCJOUC", "parentPublication": { "id": "proceedings/cbms/2017/1710/0", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2013/02/06518141", "title": "Games, gameplay, and BCI: The state of the art", "doi": null, "abstractUrl": "/journal/ci/2013/02/06518141/13rRUy2YLNu", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2013/02/06509428", "title": "Toward Contextual SSVEP-Based BCI Controller: Smart Activation of Stimuli and Control Weighting", "doi": null, "abstractUrl": "/journal/ci/2013/02/06509428/13rRUyuegjt", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/03/08481564", "title": "Towards BCI-Based Interfaces for Augmented Reality: Feasibility, Design and Evaluation", "doi": null, "abstractUrl": "/journal/tg/2020/03/08481564/146z4OQdyi9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06509413", "articleId": "13rRUEgs2vK", "__typename": "AdjacentArticleType" }, "next": { "fno": "06400237", "articleId": "13rRUyuNszz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBTJIK6", "title": "June", "year": "2013", "issueNum": "02", "idPrefix": "ci", "pubType": "journal", "volume": "5", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy2YLNu", "doi": "10.1109/TCIAIG.2013.2263555", "abstract": "Brain-computer interfaces (BCIs) and basic computer games have been interconnected since BCI development began, exploiting gameplay elements as a means of enhancing performance in BCI training protocols and entertaining and challenging participants while training to use a BCI. By providing the BCI user with an entertaining environment, researchers hope to assist users in becoming more proficient at controlling a BCI system. BCIs have been used to enrich the experience of abled-bodied and physically impaired users in various computer applications, in particular, computer games. BCI games have been reviewed previously, yet a critical evaluation of “gameplay” within BCI games has not been undertaken. Gameplay is a key aspect of any computer game and encompasses the challenges presented to the player, the actions made available to the player by the game designer to overcome the challenges and the interaction mechanism in the game. Here, the appropriateness of game genres (a category of games characterized by a particular set of gameplay challenges) and the associated gameplay challenges for different BCI paradigms is evaluated. The gameplay mechanics employed across a range of BCI games are reviewed and evaluated in terms of the BCI control strategy's suitability, considering the genre and gameplay mechanics employed. A number of recommendations for the field relating to genre-specific BCI-games development and assessing user performance are also provided for BCI game developers.", "abstracts": [ { "abstractType": "Regular", "content": "Brain-computer interfaces (BCIs) and basic computer games have been interconnected since BCI development began, exploiting gameplay elements as a means of enhancing performance in BCI training protocols and entertaining and challenging participants while training to use a BCI. By providing the BCI user with an entertaining environment, researchers hope to assist users in becoming more proficient at controlling a BCI system. BCIs have been used to enrich the experience of abled-bodied and physically impaired users in various computer applications, in particular, computer games. BCI games have been reviewed previously, yet a critical evaluation of “gameplay” within BCI games has not been undertaken. Gameplay is a key aspect of any computer game and encompasses the challenges presented to the player, the actions made available to the player by the game designer to overcome the challenges and the interaction mechanism in the game. Here, the appropriateness of game genres (a category of games characterized by a particular set of gameplay challenges) and the associated gameplay challenges for different BCI paradigms is evaluated. The gameplay mechanics employed across a range of BCI games are reviewed and evaluated in terms of the BCI control strategy's suitability, considering the genre and gameplay mechanics employed. A number of recommendations for the field relating to genre-specific BCI-games development and assessing user performance are also provided for BCI game developers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Brain-computer interfaces (BCIs) and basic computer games have been interconnected since BCI development began, exploiting gameplay elements as a means of enhancing performance in BCI training protocols and entertaining and challenging participants while training to use a BCI. By providing the BCI user with an entertaining environment, researchers hope to assist users in becoming more proficient at controlling a BCI system. BCIs have been used to enrich the experience of abled-bodied and physically impaired users in various computer applications, in particular, computer games. BCI games have been reviewed previously, yet a critical evaluation of “gameplay” within BCI games has not been undertaken. Gameplay is a key aspect of any computer game and encompasses the challenges presented to the player, the actions made available to the player by the game designer to overcome the challenges and the interaction mechanism in the game. Here, the appropriateness of game genres (a category of games characterized by a particular set of gameplay challenges) and the associated gameplay challenges for different BCI paradigms is evaluated. The gameplay mechanics employed across a range of BCI games are reviewed and evaluated in terms of the BCI control strategy's suitability, considering the genre and gameplay mechanics employed. A number of recommendations for the field relating to genre-specific BCI-games development and assessing user performance are also provided for BCI game developers.", "title": "Games, gameplay, and BCI: The state of the art", "normalizedTitle": "Games, gameplay, and BCI: The state of the art", "fno": "06518141", "hasPdf": true, "idPrefix": "ci", "keywords": [ "Brain Computer Interfaces", "Computer Games", "Gameplay", "Computer Games", "Brain Computer Interfaces", "BCI Development", "BCI Training Protocols", "BCI System", "Computer Applications", "Game Designer", "Interaction Mechanism", "Games", "Computers", "Training", "Electroencephalography", "Visualization", "Control Systems", "Electric Potential", "Brain Computer Interfaces BC Is", "Games", "Game Design", "Gameplay", "Review" ], "authors": [ { "givenName": "David", "surname": "Marshall", "fullName": "David Marshall", "affiliation": "Faculty of Computing and Engineering, University of Ulster, Derry/Londonderry, U.K.", "__typename": "ArticleAuthorType" }, { "givenName": "Damien", "surname": "Coyle", "fullName": "Damien Coyle", "affiliation": "Faculty of Computing and Engineering, University of Ulster, Derry/Londonderry, U.K.", "__typename": "ArticleAuthorType" }, { "givenName": "Shane", "surname": "Wilson", "fullName": "Shane Wilson", "affiliation": "School of Computing and Intelligent Systems, University of Ulster, Derry/Londonderry, U.K.", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Callaghan", "fullName": "Michael Callaghan", "affiliation": "School of Computing and Intelligent Systems, University of Ulster, Derry/Londonderry, U.K.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2013-04-01 00:00:00", "pubType": "trans", "pages": "82-99", "year": "2013", "issn": "1943-068X", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/re/2014/3031/0/06912242", "title": "How practitioners approach gameplay requirements? An exploration into the context of massive multiplayer online role-playing games", "doi": null, "abstractUrl": "/proceedings-article/re/2014/06912242/12OmNAoDhVI", "parentPublication": { "id": "proceedings/re/2014/3031/0", "title": "2014 IEEE 22nd International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2012/1120/0/S3005", "title": "A gameplay loops formal language", "doi": null, "abstractUrl": "/proceedings-article/cgames/2012/S3005/12OmNArth9x", "parentPublication": { "id": "proceedings/cgames/2012/1120/0", "title": "2012 17th International Conference on Computer Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2015/7568/0/7568a488", "title": "Examining User Experiences through a Multimodal BCI Puzzle Game", "doi": null, "abstractUrl": "/proceedings-article/iv/2015/7568a488/12OmNB0X8xz", "parentPublication": { "id": "proceedings/iv/2015/7568/0", "title": "2015 19th International Conference on Information Visualisation (iV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2015/7921/0/07272958", "title": "Classification effects on Motion-Onset Visual Evoked Potentials using commercially available video games", "doi": null, "abstractUrl": "/proceedings-article/cgames/2015/07272958/12OmNy5hRcO", "parentPublication": { "id": "proceedings/cgames/2015/7921/0", "title": "2015 Computer Games: AI, Animation, Mobile, Multimedia, Educational and Serious Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2014/8065/0/8065a123", "title": "Mindninja: Concept, Development and Evaluation of a Mind Action Game Based on EEGs", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2014/8065a123/12OmNz61cY4", "parentPublication": { "id": "proceedings/sbgames/2014/8065/0", "title": "2014 Brazilian Symposium on Computer Games and Digital Entertainment (SBGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056579", "title": "Investigating the effect of user profile during training for BCI-based games", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056579/12OmNzZmZr7", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2013/02/06334432", "title": "Steady-state visual evoked potential-based computer gaming on a consumer-grade EEG device", "doi": null, "abstractUrl": "/journal/ci/2013/02/06334432/13rRUNvya3x", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2013/02/06484110", "title": "Experiencing BCI Control in a Popular Computer Game", "doi": null, "abstractUrl": "/journal/ci/2013/02/06484110/13rRUxk89gH", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ci/2013/02/06509428", "title": "Toward Contextual SSVEP-Based BCI Controller: Smart Activation of Stimuli and Control Weighting", "doi": null, "abstractUrl": "/journal/ci/2013/02/06509428/13rRUyuegjt", "parentPublication": { "id": "trans/ci", "title": "IEEE Transactions on Computational Intelligence and AI in Games", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a333", "title": "Comparison between dry and wet EEG electrodes in an SSVEP-based BCI for robot navigation", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a333/1J6hHZY7bfa", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06528021", "articleId": "13rRUy0qnJ6", "__typename": "AdjacentArticleType" }, "next": { "fno": "06334432", "articleId": "13rRUNvya3x", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwMob9C", "title": "April", "year": "2018", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwIF6lb", "doi": "10.1109/TVCG.2018.2805123", "abstract": "This special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG) contains the 29 full papers selected for the IEEE Virtual Reality and 3D User Interfaces (IEEE VR 2018) Conference held in Reutlingen, Germany, March 18-22, 2017. Since its inception in 1993, IEEE VR has been the premier venue to present new research results in the field of Virtual Reality (VR). The strong current trends toward VR systems for consumer audiences heightens the importance of this event. This fact is reflected in the cooperation between TVCG and IEEE VR, which is in its seventh year and is one cornerstone of the strategy of TVCG to combine computer graphics and data visualization in its scope with virtual and augmented reality. The special issue format combines speed of publication with all the established advantages of an archival journal. To that end, a rigorous and competitive two-round review process was performed to ensure the highest quality.", "abstracts": [ { "abstractType": "Regular", "content": "This special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG) contains the 29 full papers selected for the IEEE Virtual Reality and 3D User Interfaces (IEEE VR 2018) Conference held in Reutlingen, Germany, March 18-22, 2017. Since its inception in 1993, IEEE VR has been the premier venue to present new research results in the field of Virtual Reality (VR). The strong current trends toward VR systems for consumer audiences heightens the importance of this event. This fact is reflected in the cooperation between TVCG and IEEE VR, which is in its seventh year and is one cornerstone of the strategy of TVCG to combine computer graphics and data visualization in its scope with virtual and augmented reality. The special issue format combines speed of publication with all the established advantages of an archival journal. To that end, a rigorous and competitive two-round review process was performed to ensure the highest quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG) contains the 29 full papers selected for the IEEE Virtual Reality and 3D User Interfaces (IEEE VR 2018) Conference held in Reutlingen, Germany, March 18-22, 2017. Since its inception in 1993, IEEE VR has been the premier venue to present new research results in the field of Virtual Reality (VR). The strong current trends toward VR systems for consumer audiences heightens the importance of this event. This fact is reflected in the cooperation between TVCG and IEEE VR, which is in its seventh year and is one cornerstone of the strategy of TVCG to combine computer graphics and data visualization in its scope with virtual and augmented reality. The special issue format combines speed of publication with all the established advantages of an archival journal. To that end, a rigorous and competitive two-round review process was performed to ensure the highest quality.", "title": "Introducing the IEEE Virtual Reality 2018 Special Issue", "normalizedTitle": "Introducing the IEEE Virtual Reality 2018 Special Issue", "fno": "08315163", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Leila", "surname": "De Floriani", "fullName": "Leila De Floriani", "affiliation": "University of Maryland at College Park", "__typename": "ArticleAuthorType" }, { "givenName": "Dieter", "surname": "Schmalstieg", "fullName": "Dieter Schmalstieg", "affiliation": "Graz University of Technology", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "04", "pubDate": "2018-04-01 00:00:00", "pubType": "trans", "pages": "v-v", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446264", "title": "The 2018 VGTC Virtual Reality Career Award", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446264/13bd1sv5NyF", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg2014040vi", "title": "Message from the Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08315160", "title": "Preface", "doi": null, "abstractUrl": "/journal/tg/2018/04/08315160/13rRUxNW1TW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09754285", "title": "IEEE VR 2022 Introducing the Special Issue", "doi": null, "abstractUrl": "/journal/tg/2022/05/09754285/1CpcIar9LS8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09754286", "title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10086962", "title": "IEEE VR 2023 Message from the Program Chairs and Guest Editors", "doi": null, "abstractUrl": "/journal/tg/2023/05/10086962/1LUpENvdb3O", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10086959", "title": "IEEE VR 2023 Introducing the Special Issue", "doi": null, "abstractUrl": "/journal/tg/2023/05/10086959/1LUpFscWKxq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/09052628", "title": "Introducing the IEEE Virtual Reality 2020 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2020/05/09052628/1iFLKo4ODvO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/09052630", "title": "Preface", "doi": null, "abstractUrl": "/journal/tg/2020/05/09052630/1iFLLHpsBfW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09405571", "title": "Introducing the IEEE Virtual Reality 2021 Special Issue", "doi": null, "abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08315162", "articleId": "13rRUwInvyH", "__typename": "AdjacentArticleType" }, "next": { "fno": "08315160", "articleId": "13rRUxNW1TW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwswg8v", "title": "April-June", "year": "2016", "issueNum": "02", "idPrefix": "th", "pubType": "journal", "volume": "9", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwcAqqr", "doi": "10.1109/TOH.2016.2564965", "abstract": "Upper limb amputees lack the valuable tactile sensing that helps provide context about the surrounding environment. Here, we utilize tactile information to provide active touch feedback to a prosthetic hand. First, we developed fingertip tactile sensors for producing biomimetic spiking responses for monitoring contact, release, and slip of an object grasped by a prosthetic hand. We convert the sensor output into pulses, mimicking the rapid and slowly adapting spiking responses of receptor afferents found in the human body. Second, we designed and implemented two neuromimetic event-based algorithms, Compliant Grasping and Slip Prevention, on a prosthesis to create a local closed-loop tactile feedback control system (i.e., tactile information is sent to the prosthesis). Grasping experiments were designed to assess the benefit of this biologically inspired neuromimetic tactile feedback to a prosthesis. Results from able-bodied and amputee subjects show the average number of objects that broke or slipped during grasping decreased by over 50 percent and the average time to complete a grasping task decreased by at least 10 percent for most trials when comparing neuromimetic tactile feedback with no feedback on a prosthesis. Our neuromimetic method of closed-loop tactile sensing is a novel approach to improving the function of upper limb prostheses.", "abstracts": [ { "abstractType": "Regular", "content": "Upper limb amputees lack the valuable tactile sensing that helps provide context about the surrounding environment. Here, we utilize tactile information to provide active touch feedback to a prosthetic hand. First, we developed fingertip tactile sensors for producing biomimetic spiking responses for monitoring contact, release, and slip of an object grasped by a prosthetic hand. We convert the sensor output into pulses, mimicking the rapid and slowly adapting spiking responses of receptor afferents found in the human body. Second, we designed and implemented two neuromimetic event-based algorithms, Compliant Grasping and Slip Prevention, on a prosthesis to create a local closed-loop tactile feedback control system (i.e., tactile information is sent to the prosthesis). Grasping experiments were designed to assess the benefit of this biologically inspired neuromimetic tactile feedback to a prosthesis. Results from able-bodied and amputee subjects show the average number of objects that broke or slipped during grasping decreased by over 50 percent and the average time to complete a grasping task decreased by at least 10 percent for most trials when comparing neuromimetic tactile feedback with no feedback on a prosthesis. Our neuromimetic method of closed-loop tactile sensing is a novel approach to improving the function of upper limb prostheses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Upper limb amputees lack the valuable tactile sensing that helps provide context about the surrounding environment. Here, we utilize tactile information to provide active touch feedback to a prosthetic hand. First, we developed fingertip tactile sensors for producing biomimetic spiking responses for monitoring contact, release, and slip of an object grasped by a prosthetic hand. We convert the sensor output into pulses, mimicking the rapid and slowly adapting spiking responses of receptor afferents found in the human body. Second, we designed and implemented two neuromimetic event-based algorithms, Compliant Grasping and Slip Prevention, on a prosthesis to create a local closed-loop tactile feedback control system (i.e., tactile information is sent to the prosthesis). Grasping experiments were designed to assess the benefit of this biologically inspired neuromimetic tactile feedback to a prosthesis. Results from able-bodied and amputee subjects show the average number of objects that broke or slipped during grasping decreased by over 50 percent and the average time to complete a grasping task decreased by at least 10 percent for most trials when comparing neuromimetic tactile feedback with no feedback on a prosthesis. Our neuromimetic method of closed-loop tactile sensing is a novel approach to improving the function of upper limb prostheses.", "title": "Neuromimetic Event-Based Detection for Closed-Loop Tactile Feedback Control of Upper Limb Prostheses", "normalizedTitle": "Neuromimetic Event-Based Detection for Closed-Loop Tactile Feedback Control of Upper Limb Prostheses", "fno": "07466833", "hasPdf": true, "idPrefix": "th", "keywords": [ "Grasping", "Force", "Tactile Sensors", "Electromyography", "Prosthetic Hand", "Real Time Control", "Prosthetics", "Neuromimetic", "Force Feedback", "Real Time Control", "Neuromimetic", "Prosthetic", "Force Feedback" ], "authors": [ { "givenName": "Luke", "surname": "Osborn", "fullName": "Luke Osborn", "affiliation": "Department of Biomedical Engineering, Johns Hopkins University, Baltimore, MD", "__typename": "ArticleAuthorType" }, { "givenName": "Rahul R.", "surname": "Kaliki", "fullName": "Rahul R. Kaliki", "affiliation": ", Infinite Biomedical Technologies, Baltimore, MD", "__typename": "ArticleAuthorType" }, { "givenName": "Alcimar B.", "surname": "Soares", "fullName": "Alcimar B. Soares", "affiliation": "Department of Electrical Engineering, Biomedical Engineering Lab, Federal University of Uberlândia, Uberlândia, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "Nitish V.", "surname": "Thakor", "fullName": "Nitish V. Thakor", "affiliation": "Department of Biomedical Engineering, Johns Hopkins University, Baltimore, MD", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2016-04-01 00:00:00", "pubType": "trans", "pages": "196-206", "year": "2016", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ictai/2014/6572/0/6572a620", "title": "Two-Stage Multiclassifier System with Correction of Competence of Base Classifiers Applied to the Control of Bioprosthetic Hand", "doi": null, "abstractUrl": "/proceedings-article/ictai/2014/6572a620/12OmNBKW9y4", "parentPublication": { "id": "proceedings/ictai/2014/6572/0", "title": "2014 IEEE 26th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ecbs/2012/4664/0/4664a188", "title": "Modeling and Visualization of Classification-Based Control Schemes for Upper Limb Prostheses", "doi": null, "abstractUrl": "/proceedings-article/ecbs/2012/4664a188/12OmNrH1PAt", "parentPublication": { "id": "proceedings/ecbs/2012/4664/0", "title": "Engineering of Computer-Based Systems, IEEE International Conference on the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ams/2010/4062/0/4062a624", "title": "Development of Optical Three-Axis Tactile Sensor and its Application to Robotic Hand for Dexterous Manipulation Tasks", "doi": null, "abstractUrl": "/proceedings-article/ams/2010/4062a624/12OmNzX6cqW", "parentPublication": { "id": "proceedings/ams/2010/4062/0", "title": "Asia International Conference on Modelling &amp; Simulation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2008/2005/0/04479933", "title": "Effects of Proprioceptive Motion Feedback on Sighted and Non-Sighted Control of a Virtual Hand Prosthesis", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479933/12OmNzdoMQ8", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/04/07080902", "title": "Tactile Feedback of Object Slip Facilitates Virtual Object Manipulation", "doi": null, "abstractUrl": "/journal/th/2015/04/07080902/13rRUNvyakX", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2019/9245/0/924500a344", "title": "Estimation of Lightweight Object's Mass by a Humanoid Robot During a Precision Grip with Soft Tactile Sensors", "doi": null, "abstractUrl": "/proceedings-article/irc/2019/924500a344/18M7dtkmfxm", "parentPublication": { "id": "proceedings/irc/2019/9245/0", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2019/9245/0/924500a293", "title": "Human-in-the-Loop Prosthetic Robot Hand Control Using Particle Filters for Grasp Selection", "doi": null, "abstractUrl": "/proceedings-article/irc/2019/924500a293/18M7erodQXK", "parentPublication": { "id": "proceedings/irc/2019/9245/0", "title": "2019 Third IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2022/9774/0/977400a098", "title": "Integrating High-Resolution Tactile Sensing into Grasp Stability Prediction", "doi": null, "abstractUrl": "/proceedings-article/crv/2022/977400a098/1GeCvgOw44o", "parentPublication": { "id": "proceedings/crv/2022/9774/0", "title": "2022 19th Conference on Robots and Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a770", "title": "The Importance of Sensory Feedback to Enhance Embodiment During Virtual Training of Myoelectric Prostheses Users", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a770/1tnXNXoimGI", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a558", "title": "The importance of sensory feedback to enhance embodiment during virtual training of myoelectric prostheses users", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a558/1tnXbQv2sNi", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07286860", "articleId": "13rRUILLkE2", "__typename": "AdjacentArticleType" }, "next": { "fno": "07390277", "articleId": "13rRUxBrGhc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvDqsVL", "title": "October-December", "year": "2011", "issueNum": "04", "idPrefix": "th", "pubType": "journal", "volume": "4", "label": "October-December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxDqS8r", "doi": "10.1109/TOH.2011.2", "abstract": "This paper surveys the research literature on robust tactile and haptic illusions. The illusions are organized into two categories. The first category relates to objects and their properties, and is further differentiated in terms of haptic processing of material versus geometric object properties. The second category relates to haptic space, and is further differentiated in terms of the observer's own body versus external space. The illusions are initially described and where possible addressed in terms of their functional properties and/or underlying neural processes. The significance of these illusions for the design of tactile and haptic displays is also discussed. We conclude by briefly considering a number of important general themes that have emerged in the materials surveyed.", "abstracts": [ { "abstractType": "Regular", "content": "This paper surveys the research literature on robust tactile and haptic illusions. The illusions are organized into two categories. The first category relates to objects and their properties, and is further differentiated in terms of haptic processing of material versus geometric object properties. The second category relates to haptic space, and is further differentiated in terms of the observer's own body versus external space. The illusions are initially described and where possible addressed in terms of their functional properties and/or underlying neural processes. The significance of these illusions for the design of tactile and haptic displays is also discussed. We conclude by briefly considering a number of important general themes that have emerged in the materials surveyed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper surveys the research literature on robust tactile and haptic illusions. The illusions are organized into two categories. The first category relates to objects and their properties, and is further differentiated in terms of haptic processing of material versus geometric object properties. The second category relates to haptic space, and is further differentiated in terms of the observer's own body versus external space. The illusions are initially described and where possible addressed in terms of their functional properties and/or underlying neural processes. The significance of these illusions for the design of tactile and haptic displays is also discussed. We conclude by briefly considering a number of important general themes that have emerged in the materials surveyed.", "title": "Tactile and Haptic Illusions", "normalizedTitle": "Tactile and Haptic Illusions", "fno": "tth2011040273", "hasPdf": true, "idPrefix": "th", "keywords": [ "Haptic Interfaces", "Data Visualization", "Temperature Sensors", "Robustness", "Surface Roughness", "Haptic Communication", "Touch Based Properties And Capabilities Of The Human User", "Hardware And Software That Enable Touch Based Interactions With Real", "Remote", "And Virtual Environments", "Tactile And Haptic Illusions" ], "authors": [ { "givenName": "Susan J.", "surname": "Lederman", "fullName": "Susan J. Lederman", "affiliation": "Queen's University, Kingston", "__typename": "ArticleAuthorType" }, { "givenName": "Lynette A.", "surname": "Jones", "fullName": "Lynette A. Jones", "affiliation": "Massachusetts Institute for Technology, Cambridge", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2011-10-01 00:00:00", "pubType": "trans", "pages": "273-294", "year": "2011", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892341", "title": "Classification method of tactile feeling using stacked autoencoder based on haptic primary colors", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892341/12OmNA14Ae6", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444754", "title": "Design and evaluation of a haptic tactile actuator to simulate rough textures", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444754/12OmNAS9zy0", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010081", "title": "Psychophysical Dimensions of Tactile Perception of Textures", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010081/13rRUx0xPTW", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/04/07876800", "title": "Intermanual Apparent Tactile Motion and Its Extension to 3D Interactions", "doi": null, "abstractUrl": "/journal/th/2017/04/07876800/13rRUx0xPTX", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/01/07539397", "title": "Importance of Matching Physical Friction, Hardness, and Texture in Creating Realistic Haptic Virtual Surfaces", "doi": null, "abstractUrl": "/journal/th/2017/01/07539397/13rRUxAAT7O", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/04/tth2013040453", "title": "Human Detection and Discrimination of Tactile Repeatability, Mechanical Backlash, and Temporal Delay in a Combined Tactile-Kinesthetic Haptic Display System", "doi": null, "abstractUrl": "/journal/th/2013/04/tth2013040453/13rRUyeCkau", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08642446", "title": "Modulating Fine Roughness Perception of Vibrotactile Textured Surface using Pseudo-haptic Effect", "doi": null, "abstractUrl": "/journal/tg/2019/05/08642446/17PYEjfZjoZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09117062", "title": "Augmenting Perceived Softness of Haptic Proxy Objects Through Transient Vibration and Visuo-Haptic Illusion in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2021/12/09117062/1kGg69DDrFe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/04/09665216", "title": "HaptoMapping: Visuo-Haptic Augmented Reality by Embedding User-Imperceptible Tactile Display Control Signals in a Projected Image", "doi": null, "abstractUrl": "/journal/tg/2023/04/09665216/1zJiKwg69PO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "tth2011040263", "articleId": "13rRUyeTVib", "__typename": "AdjacentArticleType" }, "next": { "fno": "tth2011040295", "articleId": "13rRUxOve9U", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAPjA9Q", "title": "Third Quarter", "year": "2012", "issueNum": "03", "idPrefix": "th", "pubType": "journal", "volume": "5", "label": "Third Quarter", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyoPSPg", "doi": "10.1109/TOH.2012.33", "abstract": "Tactile motion guidance systems aim to direct the user's movement toward a target pose or trajectory by delivering tactile cues through lightweight wearable actuators. This study evaluates 10 forms of tactile feedback for guidance of wrist rotation to understand the traits that influence the effectiveness of such systems. We present five wearable actuators capable of tapping, dragging across, squeezing, twisting, or vibrating against the user's wrist; each actuator can be controlled via steady or pulsing drive algorithms. Ten subjects used each form of feedback to perform three unsighted movement tasks: directional response, position targeting, and trajectory following. The results show that directional responses are fastest when direction is conveyed through the location of the tactile stimulus or steady lateral skin stretch. Feedback that clearly conveys movement direction enables subjects to reach target positions most quickly, though tactile magnitude cues (steady intensity and especially pulsing frequency) can also be used when direction is difficult to discern. Subjects closely tracked arbitrary trajectories only when both movement direction and cue magnitude were subjectively rated as very easy to discern. The best overall performance was achieved by the actuator that repeatedly taps on the subject's wrist on the side toward which they should turn.", "abstracts": [ { "abstractType": "Regular", "content": "Tactile motion guidance systems aim to direct the user's movement toward a target pose or trajectory by delivering tactile cues through lightweight wearable actuators. This study evaluates 10 forms of tactile feedback for guidance of wrist rotation to understand the traits that influence the effectiveness of such systems. We present five wearable actuators capable of tapping, dragging across, squeezing, twisting, or vibrating against the user's wrist; each actuator can be controlled via steady or pulsing drive algorithms. Ten subjects used each form of feedback to perform three unsighted movement tasks: directional response, position targeting, and trajectory following. The results show that directional responses are fastest when direction is conveyed through the location of the tactile stimulus or steady lateral skin stretch. Feedback that clearly conveys movement direction enables subjects to reach target positions most quickly, though tactile magnitude cues (steady intensity and especially pulsing frequency) can also be used when direction is difficult to discern. Subjects closely tracked arbitrary trajectories only when both movement direction and cue magnitude were subjectively rated as very easy to discern. The best overall performance was achieved by the actuator that repeatedly taps on the subject's wrist on the side toward which they should turn.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Tactile motion guidance systems aim to direct the user's movement toward a target pose or trajectory by delivering tactile cues through lightweight wearable actuators. This study evaluates 10 forms of tactile feedback for guidance of wrist rotation to understand the traits that influence the effectiveness of such systems. We present five wearable actuators capable of tapping, dragging across, squeezing, twisting, or vibrating against the user's wrist; each actuator can be controlled via steady or pulsing drive algorithms. Ten subjects used each form of feedback to perform three unsighted movement tasks: directional response, position targeting, and trajectory following. The results show that directional responses are fastest when direction is conveyed through the location of the tactile stimulus or steady lateral skin stretch. Feedback that clearly conveys movement direction enables subjects to reach target positions most quickly, though tactile magnitude cues (steady intensity and especially pulsing frequency) can also be used when direction is difficult to discern. Subjects closely tracked arbitrary trajectories only when both movement direction and cue magnitude were subjectively rated as very easy to discern. The best overall performance was achieved by the actuator that repeatedly taps on the subject's wrist on the side toward which they should turn.", "title": "Evaluation of Tactile Feedback Methods for Wrist Rotation Guidance", "normalizedTitle": "Evaluation of Tactile Feedback Methods for Wrist Rotation Guidance", "fno": "tth2012030240", "hasPdf": true, "idPrefix": "th", "keywords": [ "Wrist", "Actuators", "Tactile Sensors", "Servomotors", "Vibrations", "Humans", "Trajectory", "Human Factors", "Wearable Tactile Devices", "Tactile Rendering", "Motion Guidance" ], "authors": [ { "givenName": "Andrew A.", "surname": "Stanley", "fullName": "Andrew A. Stanley", "affiliation": "University of Pennsylvania, Philadelphia and Stanford University, Stanford", "__typename": "ArticleAuthorType" }, { "givenName": "Katherine J.", "surname": "Kuchenbecker", "fullName": "Katherine J. Kuchenbecker", "affiliation": "University of Pennsylvania, Philadelphia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2012-07-01 00:00:00", "pubType": "trans", "pages": "240-251", "year": "2012", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isatp/2003/7770/0/01217213", "title": "Rotational force-feedback wrist", "doi": null, "abstractUrl": "/proceedings-article/isatp/2003/01217213/12OmNAfPIOH", "parentPublication": { "id": "proceedings/isatp/2003/7770/0", "title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2011/0774/0/05959583", "title": "AirTouch: Synchronizing In-air Hand Gesture and On-body Tactile Feedback to Augment Mobile Gesture Interaction", "doi": null, "abstractUrl": "/proceedings-article/iswc/2011/05959583/12OmNwkzumy", "parentPublication": { "id": "proceedings/iswc/2011/0774/0", "title": "2011 15th Annual International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2013/4795/0/06549418", "title": "An actuated stage for a tablet computer: Generation of tactile feedback and communication using the motion of the whole tablet", "doi": null, "abstractUrl": "/proceedings-article/vr/2013/06549418/12OmNxFsmGl", "parentPublication": { "id": "proceedings/vr/2013/4795/0", "title": "2013 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percomw/2015/8425/0/07134102", "title": "Gait, wrist, and sensors: Detecting freezing of gait in Parkinson's disease from wrist movement", "doi": null, "abstractUrl": "/proceedings-article/percomw/2015/07134102/12OmNyRxFuE", "parentPublication": { "id": "proceedings/percomw/2015/8425/0", "title": "2015 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2010/9046/0/05665867", "title": "Tactor placement in wrist worn wearables", "doi": null, "abstractUrl": "/proceedings-article/iswc/2010/05665867/12OmNz2TCzq", "parentPublication": { "id": "proceedings/iswc/2010/9046/0", "title": "International Symposium on Wearable Computers (ISWC) 2010", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvri/2011/0054/0/05759662", "title": "Pseudo-haptic feedback augmented with visual and tactile vibrations", "doi": null, "abstractUrl": "/proceedings-article/isvri/2011/05759662/12OmNzvz6OE", "parentPublication": { "id": "proceedings/isvri/2011/0054/0", "title": "2011 IEEE International Symposium on VR Innovation (ISVRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2010/04/mpc2010040033", "title": "LifeBelt: Crowd Evacuation Based on Vibro-Tactile Guidance", "doi": null, "abstractUrl": "/magazine/pc/2010/04/mpc2010040033/13rRUy2YLVt", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a350", "title": "Investigating Remote Tactile Feedback for Mid-Air Text-Entry in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a350/1pysyvL4CwU", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2023/01/09400761", "title": "MagAuth: Secure and Usable Two-Factor Authentication With Magnetic Wrist Wearables", "doi": null, "abstractUrl": "/journal/tm/2023/01/09400761/1sK2h8w8fAc", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a328", "title": "Evaluating Wearable Tactile Feedback Patterns During a Virtual Reality Fighting Game", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a328/1yeQPdMOGZ2", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "tth2012030231", "articleId": "13rRUwInvt2", "__typename": "AdjacentArticleType" }, "next": { "fno": "tth2012030252", "articleId": "13rRUwjoNx9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesYu", "name": "tth2012030240s.mov", "location": "https://www.computer.org/csdl/api/v1/extra/tth2012030240s.mov", "extension": "mov", "size": "43.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1LUpyYLBfeo", "title": "May", "year": "2023", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1KYotugT0xW", "doi": "10.1109/TVCG.2023.3247114", "abstract": "Most prior teleportation techniques in virtual reality are bound to target positions in the vicinity of selectable scene objects. In this paper, we present three adaptations of the classic teleportation metaphor that enable the user to travel to mid-air targets as well. Inspired by related work on the combination of teleports with virtual rotations, our three techniques differ in the extent to which elevation changes are integrated into the conventional target selection process. Elevation can be specified either simultaneously, as a connected second step, or separately from horizontal movements. A user study with 30 participants indicated a trade-off between the simultaneous method leading to the highest accuracy and the two-step method inducing the lowest task load as well as receiving the highest usability ratings. The separate method was least suitable on its own but could serve as a complement to one of the other approaches. Based on these findings and previous research, we define initial design guidelines for mid-air navigation techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Most prior teleportation techniques in virtual reality are bound to target positions in the vicinity of selectable scene objects. In this paper, we present three adaptations of the classic teleportation metaphor that enable the user to travel to mid-air targets as well. Inspired by related work on the combination of teleports with virtual rotations, our three techniques differ in the extent to which elevation changes are integrated into the conventional target selection process. Elevation can be specified either simultaneously, as a connected second step, or separately from horizontal movements. A user study with 30 participants indicated a trade-off between the simultaneous method leading to the highest accuracy and the two-step method inducing the lowest task load as well as receiving the highest usability ratings. The separate method was least suitable on its own but could serve as a complement to one of the other approaches. Based on these findings and previous research, we define initial design guidelines for mid-air navigation techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most prior teleportation techniques in virtual reality are bound to target positions in the vicinity of selectable scene objects. In this paper, we present three adaptations of the classic teleportation metaphor that enable the user to travel to mid-air targets as well. Inspired by related work on the combination of teleports with virtual rotations, our three techniques differ in the extent to which elevation changes are integrated into the conventional target selection process. Elevation can be specified either simultaneously, as a connected second step, or separately from horizontal movements. A user study with 30 participants indicated a trade-off between the simultaneous method leading to the highest accuracy and the two-step method inducing the lowest task load as well as receiving the highest usability ratings. The separate method was least suitable on its own but could serve as a complement to one of the other approaches. Based on these findings and previous research, we define initial design guidelines for mid-air navigation techniques.", "title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments", "normalizedTitle": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments", "fno": "10049698", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Teleportation", "Virtual Reality", "Elevation Changes", "Immersive Virtual Environments", "Mid Air Navigation Techniques", "Mid Air Targets", "Scene Objects", "Simultaneous Method", "Target Selection Process", "Teleportation Metaphor", "Two Step Method", "Usability Ratings", "Virtual Reality", "Virtual Rotations", "Teleportation", "Navigation", "Avatars", "Visualization", "Task Analysis", "Floors", "Virtual Environments", "Virtual Reality", "3 D User Interfaces", "3 D Navigation", "Head Mounted Display", "Teleportation", "Flying", "Mid Air Navigation" ], "authors": [ { "givenName": "Tim", "surname": "Weissker", "fullName": "Tim Weissker", "affiliation": "Visual Computing Institute at RWTH Aachen University, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Pauline", "surname": "Bimberg", "fullName": "Pauline Bimberg", "affiliation": "Human-Computer Interaction Group, University of Trier, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Aalok Shashidhar", "surname": "Gokhale", "fullName": "Aalok Shashidhar Gokhale", "affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Torsten", "surname": "Kuhlen", "fullName": "Torsten Kuhlen", "affiliation": "Visual Computing Institute at RWTH Aachen University, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Froehlich", "fullName": "Bernd Froehlich", "affiliation": "Virtual Reality and Visualization Research Group, Bauhaus-Universität Weimar, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2023-05-01 00:00:00", "pubType": "trans", "pages": "2467-2477", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892386", "title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a674", "title": "Virtual Workspace Positioning Techniques during Teleportation for Co-located Collaboration in Virtual Reality using HMDs", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a674/1CJbVNhPGSI", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a238", "title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a317", "title": "WriArm: Leveraging Wrist Movement to Design Wrist&#x002B;Arm Based Teleportation in VR", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a317/1JrRkBbpP1K", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797777", "title": "Exploration of Large Omnidirectional Images in Immersive Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797777/1cJ0JISlXDG", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998353", "title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998353/1hpPDKs9c7C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a608", "title": "Walking and Teleportation in Wide-area Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a377", "title": "Multisensory Teleportation in Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a377/1tnXGQKSUPm", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a480", "title": "Analysis of Positional Tracking Space Usage when using Teleportation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a278", "title": "In Touch with Everyday Objects: Teleportation Techniques in Virtual Environments Supporting Tangibility", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a278/1tnXjaZXiw0", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10049714", "articleId": "1KYonwZBA08", "__typename": "AdjacentArticleType" }, "next": { "fno": "10049694", "articleId": "1KYopPcDKk8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1HMOit1lSk8", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1vyjtJyV16g", "doi": "10.1109/TVCG.2021.3099012", "abstract": "Exploring large virtual environments, such as cities, is a central task in several domains, such as gaming and urban planning. VR systems can greatly help this task by providing an immersive experience; however, a common issue with viewing and navigating a city in the traditional sense is that users can either obtain a local or a global view, but not both at the same time, requiring them to continuously switch between perspectives, losing context and distracting them from their analysis. In this article, our goal is to allow users to navigate to points of interest without changing perspectives. To accomplish this, we design an intuitive navigation interface that takes advantage of the strong sense of spatial presence provided by VR. We supplement this interface with a perspective that warps the environment, called UrbanRama, based on a cylindrical projection, providing a mix of local and global views. The design of this interface was performed as an iterative process in collaboration with architects and urban planners. We conducted a qualitative and a quantitative pilot user study to evaluate UrbanRama and the results indicate the effectiveness of our system in reducing perspective changes, while ensuring that the warping doesn&#x0027;t affect distance and orientation perception.", "abstracts": [ { "abstractType": "Regular", "content": "Exploring large virtual environments, such as cities, is a central task in several domains, such as gaming and urban planning. VR systems can greatly help this task by providing an immersive experience; however, a common issue with viewing and navigating a city in the traditional sense is that users can either obtain a local or a global view, but not both at the same time, requiring them to continuously switch between perspectives, losing context and distracting them from their analysis. In this article, our goal is to allow users to navigate to points of interest without changing perspectives. To accomplish this, we design an intuitive navigation interface that takes advantage of the strong sense of spatial presence provided by VR. We supplement this interface with a perspective that warps the environment, called UrbanRama, based on a cylindrical projection, providing a mix of local and global views. The design of this interface was performed as an iterative process in collaboration with architects and urban planners. We conducted a qualitative and a quantitative pilot user study to evaluate UrbanRama and the results indicate the effectiveness of our system in reducing perspective changes, while ensuring that the warping doesn&#x0027;t affect distance and orientation perception.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Exploring large virtual environments, such as cities, is a central task in several domains, such as gaming and urban planning. VR systems can greatly help this task by providing an immersive experience; however, a common issue with viewing and navigating a city in the traditional sense is that users can either obtain a local or a global view, but not both at the same time, requiring them to continuously switch between perspectives, losing context and distracting them from their analysis. In this article, our goal is to allow users to navigate to points of interest without changing perspectives. To accomplish this, we design an intuitive navigation interface that takes advantage of the strong sense of spatial presence provided by VR. We supplement this interface with a perspective that warps the environment, called UrbanRama, based on a cylindrical projection, providing a mix of local and global views. The design of this interface was performed as an iterative process in collaboration with architects and urban planners. We conducted a qualitative and a quantitative pilot user study to evaluate UrbanRama and the results indicate the effectiveness of our system in reducing perspective changes, while ensuring that the warping doesn't affect distance and orientation perception.", "title": "UrbanRama: Navigating Cities in Virtual Reality", "normalizedTitle": "UrbanRama: Navigating Cities in Virtual Reality", "fno": "09495135", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Town And Country Planning", "User Interfaces", "Virtual Reality", "Called Urban Rama", "Central Task", "Changing Perspectives", "Cylindrical Projection", "Global Views", "Immersive Experience", "Intuitive Navigation Interface", "Local Views", "Navigating Cities", "Perspective Changes", "Quantitative Pilot User Study", "Spatial Presence", "Strong Sense", "Traditional Sense", "Urban Planners", "Urban Planning", "Viewing Navigating", "Virtual Environments", "Virtual Reality", "VR Systems", "Navigation", "Virtual Reality", "Three Dimensional Displays", "Task Analysis", "Urban Planning", "Buildings", "Virtual Environments", "Virtual Reality", "VR Navigation", "Cylindrical Deformation" ], "authors": [ { "givenName": "Shaoyu", "surname": "Chen", "fullName": "Shaoyu Chen", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Fabio", "surname": "Miranda", "fullName": "Fabio Miranda", "affiliation": "University of Illinois at Chicago, Chicago, IL, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Nivan", "surname": "Ferreira", "fullName": "Nivan Ferreira", "affiliation": "Universidade Federal de Pernambuco, Recife, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "Marcos", "surname": "Lage", "fullName": "Marcos Lage", "affiliation": "Universidade Federal Fluminense, Niteri, State of Rio de Janeiro, Brazil", "__typename": "ArticleAuthorType" }, { "givenName": "Harish", "surname": "Doraiswamy", "fullName": "Harish Doraiswamy", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Corinne", "surname": "Brenner", "fullName": "Corinne Brenner", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Connor", "surname": "Defanti", "fullName": "Connor Defanti", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Koutsoubis", "fullName": "Michael Koutsoubis", "affiliation": "Kohn Pedersen Fox Associates PC, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Luc", "surname": "Wilson", "fullName": "Luc Wilson", "affiliation": "Kohn Pedersen Fox Associates PC, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Ken", "surname": "Perlin", "fullName": "Ken Perlin", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Claudio", "surname": "Silva", "fullName": "Claudio Silva", "affiliation": "New York University, New York, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "4685-4699", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bigcomp/2018/3649/0/364901a475", "title": "Artificial Landmarks to Facilitate Spatial Learning and Recalling for Curved Visual Wall Layout in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2018/364901a475/12OmNA14A36", "parentPublication": { "id": "proceedings/bigcomp/2018/3649/0", "title": "2018 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892335", "title": "Designing intentional impossible spaces in virtual reality narratives: A case study", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892335/12OmNApcu9b", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892331", "title": "Advertising perception with immersive virtual reality devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892331/12OmNvk7JO0", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892253", "title": "Lean into it: Exploring leaning-based motion cueing interfaces for virtual reality movement", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892253/12OmNxETane", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049647", "title": "Evaluating the Effects of Virtual Reality Environment Learning on Subsequent Robot Teleoperation in an Unfamiliar Building", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049647/1KYoqyI8xfq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798345", "title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a465", "title": "Marking the City: Interactions in Multiple Space Scales in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a465/1gysjbi37mE", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a321", "title": "Simultaneous Real Walking and Asymmetric Input in Virtual Reality with a Smartphone-based Hybrid Interface", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a321/1yeQEyk3fbO", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a491", "title": "Inter-Brain Synchronization during Collaboration in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a491/1yeQzTT2Hte", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vissoft/2021/3144/0/314400a012", "title": "CodeCity: On-Screen or in Virtual Reality?", "doi": null, "abstractUrl": "/proceedings-article/vissoft/2021/314400a012/1yrHskxcdl6", "parentPublication": { "id": "proceedings/vissoft/2021/3144/0", "title": "2021 Working Conference on Software Visualization (VISSOFT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09496211", "articleId": "1vyjumhb4ZO", "__typename": "AdjacentArticleType" }, "next": { "fno": "09497687", "articleId": "1vzYfxgOoeI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1HMOJ2946D6", "name": "ttg202212-09495135s1-supp1-3099012.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202212-09495135s1-supp1-3099012.mp4", "extension": "mp4", "size": "43 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNxwENE7", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tp", "pubType": "journal", "volume": "42", "label": "May", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45Wt3Exc", "doi": "10.1109/TPAMI.2019.2893666", "abstract": "Light field imaging has recently known a regain of interest due to the availability of practical light field capturing systems that offer a wide range of applications in the field of computer vision. However, capturing high-resolution light fields remains technologically challenging since the increase in angular resolution is often accompanied by a significant reduction in spatial resolution. This paper describes a learning-based spatial light field super-resolution method that allows the restoration of the entire light field with consistency across all angular views. The algorithm first uses optical flow to align the light field and then reduces its angular dimension using low-rank approximation. We then consider the linearly independent columns of the resulting low-rank model as an embedding, which is restored using a deep convolutional neural network (DCNN). The super-resolved embedding is then used to reconstruct the remaining views. The original disparities are restored using inverse warping where missing pixels are approximated using a novel light field inpainting algorithm. Experimental results show that the proposed method outperforms existing light field super-resolution algorithms, achieving PSNR gains of 0.23 dB over the second best performing method. The performance is shown to be further improved using iterative back-projection as a post-processing step.", "abstracts": [ { "abstractType": "Regular", "content": "Light field imaging has recently known a regain of interest due to the availability of practical light field capturing systems that offer a wide range of applications in the field of computer vision. However, capturing high-resolution light fields remains technologically challenging since the increase in angular resolution is often accompanied by a significant reduction in spatial resolution. This paper describes a learning-based spatial light field super-resolution method that allows the restoration of the entire light field with consistency across all angular views. The algorithm first uses optical flow to align the light field and then reduces its angular dimension using low-rank approximation. We then consider the linearly independent columns of the resulting low-rank model as an embedding, which is restored using a deep convolutional neural network (DCNN). The super-resolved embedding is then used to reconstruct the remaining views. The original disparities are restored using inverse warping where missing pixels are approximated using a novel light field inpainting algorithm. Experimental results show that the proposed method outperforms existing light field super-resolution algorithms, achieving PSNR gains of 0.23 dB over the second best performing method. The performance is shown to be further improved using iterative back-projection as a post-processing step.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Light field imaging has recently known a regain of interest due to the availability of practical light field capturing systems that offer a wide range of applications in the field of computer vision. However, capturing high-resolution light fields remains technologically challenging since the increase in angular resolution is often accompanied by a significant reduction in spatial resolution. This paper describes a learning-based spatial light field super-resolution method that allows the restoration of the entire light field with consistency across all angular views. The algorithm first uses optical flow to align the light field and then reduces its angular dimension using low-rank approximation. We then consider the linearly independent columns of the resulting low-rank model as an embedding, which is restored using a deep convolutional neural network (DCNN). The super-resolved embedding is then used to reconstruct the remaining views. The original disparities are restored using inverse warping where missing pixels are approximated using a novel light field inpainting algorithm. Experimental results show that the proposed method outperforms existing light field super-resolution algorithms, achieving PSNR gains of 0.23 dB over the second best performing method. The performance is shown to be further improved using iterative back-projection as a post-processing step.", "title": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks", "normalizedTitle": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks", "fno": "08620368", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Image Reconstruction", "Image Resolution", "Image Sampling", "Image Sensors", "Image Sequences", "Learning Artificial Intelligence", "Deep Convolutional Neural Network", "Light Field Imaging", "Practical Light Field", "High Resolution Light Fields", "Angular Resolution", "Spatial Resolution", "Learning Based Spatial Light Field Super Resolution Method", "Entire Light Field", "Angular Views", "Low Rank Approximation", "Resulting Low Rank Model", "Super Resolved Embedding", "Light Field Super Resolution Algorithms", "Spatial Resolution", "Cameras", "Image Restoration", "Matrix Decomposition", "Sparse Matrices", "Light Fields", "Deep Convolutional Neural Networks", "Light Field", "Low Rank Matrix Approximation", "Super Resolution" ], "authors": [ { "givenName": "Reuben A.", "surname": "Farrugia", "fullName": "Reuben A. Farrugia", "affiliation": "Department of Communications and Computer Engineering, University of Malta, Msida, MSD, Malta", "__typename": "ArticleAuthorType" }, { "givenName": "Christine", "surname": "Guillemot", "fullName": "Christine Guillemot", "affiliation": "Institut National de Recherche en Informatique et en Automatique, Rennes, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1162-1175", "year": "2020", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2015/9711/0/5720a057", "title": "Learning a Deep Convolutional Network for Light-Field Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a057/12OmNyL0TyY", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09798876", "title": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging", "doi": null, "abstractUrl": "/journal/tg/5555/01/09798876/1Eho8QXQucg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859373", "title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859373/1G4F0ndbVoQ", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/03/08854138", "title": "High-Dimensional Dense Residual Convolutional Neural Network for Light Field Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2021/03/08854138/1dM2dOAAMh2", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1038", "title": "Residual Networks for Light Field Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1038/1gyrMPr3gcw", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b853", "title": "An Epipolar Volume Autoencoder With Adversarial Loss for Deep Light Field Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b853/1iTvlh1qLGU", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b804", "title": "Light Field Super-Resolution: A Benchmark", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b804/1iTvo7kjJFm", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09043741", "title": "4D Light Field Segmentation From Light Field Super-Pixel Hypergraph Representation", "doi": null, "abstractUrl": "/journal/tg/2021/09/09043741/1ilQLDcivHa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2021/04/09392312", "title": "LFI-Augmenter: Intelligent Light Field Image Editing With Interleaved Spatial-Angular Convolution", "doi": null, "abstractUrl": "/magazine/mu/2021/04/09392312/1sq7wcFIASI", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0005", "title": "Light Field Super-Resolution with Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0005/1yeISN5Dx4c", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08611390", "articleId": "17D45WK5Aot", "__typename": "AdjacentArticleType" }, "next": { "fno": "08611140", "articleId": "17D45XERmmH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvqEvRo", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1Eho8QXQucg", "doi": "10.1109/TVCG.2022.3184047", "abstract": "Light field (LF) imaging expands traditional imaging techniques by simultaneously capturing the intensity and direction information of light rays, and promotes many visual applications. However, owing to the inherent trade-off between the spatial and angular dimensions, LF images acquired by LF cameras usually suffer from low spatial resolution. Many current approaches increase the spatial resolution by exploring the four-dimensional (4D) structure of the LF images, but they have difficulties in recovering fine textures at a large upscaling factor. To address this challenge, this paper proposes a new deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The designed heterogeneous imaging system uses an extra high-resolution (HR) traditional camera to capture the abundant spatial information in addition to the LF camera imaging, where the auxiliary information from the HR camera is utilized to super-resolve the LF image. Specifically, an LF feature alignment module is constructed to learn the correspondence between the 4D LF image and the 2D HR image to realize information alignment. Subsequently, a multi-level spatial-angular feature enhancement module is designed to gradually embed the aligned HR information into the rough LF features. Finally, the enhanced LF features are reconstructed into a super-resolved LF image using a simple feature decoder. To improve the flexibility of the proposed method, a pyramid reconstruction strategy is leveraged to generate multi-scale super-resolution results in one forward inference. The experimental results show that the proposed LFSSR-HI method achieves significant advantages over the state-of-the-art methods in both qualitative and quantitative comparisons. Furthermore, the proposed method preserves more accurate angular consistency.", "abstracts": [ { "abstractType": "Regular", "content": "Light field (LF) imaging expands traditional imaging techniques by simultaneously capturing the intensity and direction information of light rays, and promotes many visual applications. However, owing to the inherent trade-off between the spatial and angular dimensions, LF images acquired by LF cameras usually suffer from low spatial resolution. Many current approaches increase the spatial resolution by exploring the four-dimensional (4D) structure of the LF images, but they have difficulties in recovering fine textures at a large upscaling factor. To address this challenge, this paper proposes a new deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The designed heterogeneous imaging system uses an extra high-resolution (HR) traditional camera to capture the abundant spatial information in addition to the LF camera imaging, where the auxiliary information from the HR camera is utilized to super-resolve the LF image. Specifically, an LF feature alignment module is constructed to learn the correspondence between the 4D LF image and the 2D HR image to realize information alignment. Subsequently, a multi-level spatial-angular feature enhancement module is designed to gradually embed the aligned HR information into the rough LF features. Finally, the enhanced LF features are reconstructed into a super-resolved LF image using a simple feature decoder. To improve the flexibility of the proposed method, a pyramid reconstruction strategy is leveraged to generate multi-scale super-resolution results in one forward inference. The experimental results show that the proposed LFSSR-HI method achieves significant advantages over the state-of-the-art methods in both qualitative and quantitative comparisons. Furthermore, the proposed method preserves more accurate angular consistency.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Light field (LF) imaging expands traditional imaging techniques by simultaneously capturing the intensity and direction information of light rays, and promotes many visual applications. However, owing to the inherent trade-off between the spatial and angular dimensions, LF images acquired by LF cameras usually suffer from low spatial resolution. Many current approaches increase the spatial resolution by exploring the four-dimensional (4D) structure of the LF images, but they have difficulties in recovering fine textures at a large upscaling factor. To address this challenge, this paper proposes a new deep learning-based LF spatial super-resolution method using heterogeneous imaging (LFSSR-HI). The designed heterogeneous imaging system uses an extra high-resolution (HR) traditional camera to capture the abundant spatial information in addition to the LF camera imaging, where the auxiliary information from the HR camera is utilized to super-resolve the LF image. Specifically, an LF feature alignment module is constructed to learn the correspondence between the 4D LF image and the 2D HR image to realize information alignment. Subsequently, a multi-level spatial-angular feature enhancement module is designed to gradually embed the aligned HR information into the rough LF features. Finally, the enhanced LF features are reconstructed into a super-resolved LF image using a simple feature decoder. To improve the flexibility of the proposed method, a pyramid reconstruction strategy is leveraged to generate multi-scale super-resolution results in one forward inference. The experimental results show that the proposed LFSSR-HI method achieves significant advantages over the state-of-the-art methods in both qualitative and quantitative comparisons. Furthermore, the proposed method preserves more accurate angular consistency.", "title": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging", "normalizedTitle": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging", "fno": "09798876", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Spatial Resolution", "Superresolution", "Visualization", "Image Reconstruction", "Light Fields", "Training", "Light Field", "Heterogeneous Imaging", "Spatial Super Resolution", "Pyramid Reconstruction" ], "authors": [ { "givenName": "Yeyao", "surname": "Chen", "fullName": "Yeyao Chen", "affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China", "__typename": "ArticleAuthorType" }, { "givenName": "Gangyi", "surname": "Jiang", "fullName": "Gangyi Jiang", "affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China", "__typename": "ArticleAuthorType" }, { "givenName": "Mei", "surname": "Yu", "fullName": "Mei Yu", "affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China", "__typename": "ArticleAuthorType" }, { "givenName": "Haiyong", "surname": "Xu", "fullName": "Haiyong Xu", "affiliation": "Faculty of Information Science and Engineering, Ningbo University, Ningbo, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yo-Sung", "surname": "Ho", "fullName": "Yo-Sung Ho", "affiliation": "School of Electrical Engineering and ComputerScience, Gwangju Institute of Science and Technology, Gwangju, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-06-01 00:00:00", "pubType": "trans", "pages": "1-16", "year": "5555", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccp/2014/5188/0/06831814", "title": "Improving resolution and depth-of-field of light field cameras using a hybrid imaging system", "doi": null, "abstractUrl": "/proceedings-article/iccp/2014/06831814/12OmNyaoDEw", "parentPublication": { "id": "proceedings/iccp/2014/5188/0", "title": "2014 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/05/08620368", "title": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2020/05/08620368/17D45Wt3Exc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/01/09716806", "title": "Disentangling Light Fields for Super-Resolution and Disparity Estimation", "doi": null, "abstractUrl": "/journal/tp/2023/01/09716806/1B5WzcrxgIM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859373", "title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859373/1G4F0ndbVoQ", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccbd/2022/5716/0/10079964", "title": "Multiple Magnification Spatial Super-Resolution Network for Light Field Images Based on EPI Solid", "doi": null, "abstractUrl": "/proceedings-article/iccbd/2022/10079964/1LSP47RLwFq", "parentPublication": { "id": "proceedings/iccbd/2022/5716/0", "title": "2022 5th International Conference on Computing and Big Data (ICCBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b804", "title": "Light Field Super-Resolution: A Benchmark", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b804/1iTvo7kjJFm", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/12/09099445", "title": "CrossNet++: Cross-Scale Large-Parallax Warping for Reference-Based Super-Resolution", "doi": null, "abstractUrl": "/journal/tp/2021/12/09099445/1k7oyvQ9LzO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c257", "title": "Light Field Spatial Super-Resolution via Deep Combinatorial Geometry Embedding and Structural Consistency Regularization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c257/1m3npj9GAZa", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09448470", "title": "Deep Spatial-Angular Regularization for Light Field Imaging, Denoising, and Super-Resolution", "doi": null, "abstractUrl": "/journal/tp/2022/10/09448470/1ugE5vtunqo", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0005", "title": "Light Field Super-Resolution with Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0005/1yeISN5Dx4c", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09797843", "articleId": "1EfIX5LNd5e", "__typename": "AdjacentArticleType" }, "next": { "fno": "09801527", "articleId": "1EmmQ2RjHbO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1Eo1xZZmf6g", "name": "ttg555501-09798876s1-supp1-3184047.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09798876s1-supp1-3184047.pdf", "extension": "pdf", "size": "5.61 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1zas0oVk4oM", "title": "Oct.-Dec.", "year": "2021", "issueNum": "04", "idPrefix": "mu", "pubType": "magazine", "volume": "28", "label": "Oct.-Dec.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1sq7wcFIASI", "doi": "10.1109/MMUL.2021.3069912", "abstract": "The emerging light field images (LFIs) support 6 degrees of freedom (6DoF) user interaction, which is the key feature for future virtual reality (VR) media experiences. Compared to regular 2-D images, LFIs are characterized by particular image structure with both spatial and angular information. In practice, it is infeasible for the user to manually edit each subaperture of the LFI, respectively, and the user cannot guarantee the parallax consistency between different subapertures. To address this problem, we propose a deep-learning-based LFI editing scheme named central view augmentation propagation (CVAP), which employs interleaved spatial-angular convolutional neural networks (4-D CNN) for effective learning of both spatial and angular features from the input LFI. Moreover, for comparison purposes, we also implemented a “direct editing” scheme based on the geometry correspondence between subviews, and another benchmark method based on light field super resolution (LFSR). The experimental results show that CVAP achieved higher PSNR and overall more pleasant visual quality than direct editing and LFSR.", "abstracts": [ { "abstractType": "Regular", "content": "The emerging light field images (LFIs) support 6 degrees of freedom (6DoF) user interaction, which is the key feature for future virtual reality (VR) media experiences. Compared to regular 2-D images, LFIs are characterized by particular image structure with both spatial and angular information. In practice, it is infeasible for the user to manually edit each subaperture of the LFI, respectively, and the user cannot guarantee the parallax consistency between different subapertures. To address this problem, we propose a deep-learning-based LFI editing scheme named central view augmentation propagation (CVAP), which employs interleaved spatial-angular convolutional neural networks (4-D CNN) for effective learning of both spatial and angular features from the input LFI. Moreover, for comparison purposes, we also implemented a “direct editing” scheme based on the geometry correspondence between subviews, and another benchmark method based on light field super resolution (LFSR). The experimental results show that CVAP achieved higher PSNR and overall more pleasant visual quality than direct editing and LFSR.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The emerging light field images (LFIs) support 6 degrees of freedom (6DoF) user interaction, which is the key feature for future virtual reality (VR) media experiences. Compared to regular 2-D images, LFIs are characterized by particular image structure with both spatial and angular information. In practice, it is infeasible for the user to manually edit each subaperture of the LFI, respectively, and the user cannot guarantee the parallax consistency between different subapertures. To address this problem, we propose a deep-learning-based LFI editing scheme named central view augmentation propagation (CVAP), which employs interleaved spatial-angular convolutional neural networks (4-D CNN) for effective learning of both spatial and angular features from the input LFI. Moreover, for comparison purposes, we also implemented a “direct editing” scheme based on the geometry correspondence between subviews, and another benchmark method based on light field super resolution (LFSR). The experimental results show that CVAP achieved higher PSNR and overall more pleasant visual quality than direct editing and LFSR.", "title": "LFI-Augmenter: Intelligent Light Field Image Editing With Interleaved Spatial-Angular Convolution", "normalizedTitle": "LFI-Augmenter: Intelligent Light Field Image Editing With Interleaved Spatial-Angular Convolution", "fno": "09392312", "hasPdf": true, "idPrefix": "mu", "keywords": [ "Convolutional Neural Nets", "Data Visualisation", "Deep Learning Artificial Intelligence", "Feature Extraction", "Image Reconstruction", "Image Resolution", "Image Sampling", "Lighting", "Virtual Reality", "LFI Augmenter", "Intelligent Light Field Image Editing", "User Interaction", "Virtual Reality", "Media Experiences", "Spatial Information", "Angular Information", "Subapertures", "LFI Editing Scheme", "Central View Augmentation Propagation", "Spatial Angular Convolutional Neural Networks", "Light Field Super Resolution", "VR", "2 D Images", "Deep Learning", "CVAP", "4 D CNN", "LFSR", "PSNR", "Visual Quality", "Feature Extraction", "Cameras", "Two Dimensional Displays", "Light Fields", "Geometry", "Image Color Analysis", "Virtual Reality", "Light Field Image", "Image Editing", "Convolutional 20 Neural Networks 21" ], "authors": [ { "givenName": "Zhicheng", "surname": "Lu", "fullName": "Zhicheng Lu", "affiliation": "Beijing Technology and Business University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaoming", "surname": "Chen", "fullName": "Xiaoming Chen", "affiliation": "Beijing Technology and Business University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Vera Yuk Ying", "surname": "Chung", "fullName": "Vera Yuk Ying Chung", "affiliation": "University of Sydney, Sydney, NSW, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Sen", "surname": "Liu", "fullName": "Sen Liu", "affiliation": "University of Science and Technology of China, Hefei, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2021-10-01 00:00:00", "pubType": "mags", "pages": "84-95", "year": "2021", "issn": "1070-986X", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2015/6964/0/07298804", "title": "Depth from shading, defocus, and correspondence using light-field angular coherence", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298804/12OmNwDj0Zi", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2016/8623/0/07492872", "title": "4D light field segmentation with spatial and angular consistencies", "doi": null, "abstractUrl": "/proceedings-article/iccp/2016/07492872/12OmNy2ah2s", "parentPublication": { "id": "proceedings/iccp/2016/8623/0", "title": "2016 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/05/08620368", "title": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2020/05/08620368/17D45Wt3Exc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09798876", "title": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging", "doi": null, "abstractUrl": "/journal/tg/5555/01/09798876/1Eho8QXQucg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2022/7218/0/09859373", "title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/icmew/2022/09859373/1G4F0ndbVoQ", "parentPublication": { "id": "proceedings/icmew/2022/7218/0", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956107", "title": "A Deep Retinex Framework for Light Field Restoration under Low-light Conditions", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956107/1IHqjKP3U8o", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049721", "title": "LFACon: Introducing Anglewise Attention to No-Reference Quality Assessment in Light Field Space", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049721/1KYopzRJwA0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a600", "title": "The effect of angular resolution and 3D rendering on the perceived quality of the industrial use cases of light field visualization", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a600/1MeoEsRvvI4", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09043741", "title": "4D Light Field Segmentation From Light Field Super-Pixel Hypergraph Representation", "doi": null, "abstractUrl": "/journal/tg/2021/09/09043741/1ilQLDcivHa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090524", "title": "Light Field Editing Propagation using 4D Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090524/1jIxjGuj4zK", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09326365", "articleId": "1quu0LgE3p6", "__typename": "AdjacentArticleType" }, "next": { "fno": "09431651", "articleId": "1tB9eXR3Xdm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1sP18ke9Y64", "title": "May", "year": "2021", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1scDuWhBPY4", "doi": "10.1109/TVCG.2021.3067758", "abstract": "We propose a new thin and flat virtual reality (VR) display design using a Fresnel lenslet array, a Fresnel lens, and a polarization-based optical folding technique. The proposed optical system has a wide field of view (FOV) of 102&#x00B0;x102&#x00B0;, a wide eye-box of 8.8 mm, and an ergonomic eye-relief of 20 mm. Simultaneously, only 3.3 mm of physical distance is required between the display panel and the lens, so that the integrated VR display can have a compact form factor like sunglasses. Moreover, since all lenslet of the lenslet array is designed to operate under on-axis condition with low aberration, the discontinuous pupil swim distortion between the lenslets is hardly observed. In addition, all on-axis lenslets can be designed identically, reducing production cost, and even off-the-shelf Fresnel optics can be used. In this paper, we introduce how we design system parameters and analyze system performance. Finally, we demonstrate two prototypes and experimentally verify that the proposed VR display system has the expected performance while having a glasses-like form factor.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a new thin and flat virtual reality (VR) display design using a Fresnel lenslet array, a Fresnel lens, and a polarization-based optical folding technique. The proposed optical system has a wide field of view (FOV) of 102&#x00B0;x102&#x00B0;, a wide eye-box of 8.8 mm, and an ergonomic eye-relief of 20 mm. Simultaneously, only 3.3 mm of physical distance is required between the display panel and the lens, so that the integrated VR display can have a compact form factor like sunglasses. Moreover, since all lenslet of the lenslet array is designed to operate under on-axis condition with low aberration, the discontinuous pupil swim distortion between the lenslets is hardly observed. In addition, all on-axis lenslets can be designed identically, reducing production cost, and even off-the-shelf Fresnel optics can be used. In this paper, we introduce how we design system parameters and analyze system performance. Finally, we demonstrate two prototypes and experimentally verify that the proposed VR display system has the expected performance while having a glasses-like form factor.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a new thin and flat virtual reality (VR) display design using a Fresnel lenslet array, a Fresnel lens, and a polarization-based optical folding technique. The proposed optical system has a wide field of view (FOV) of 102°x102°, a wide eye-box of 8.8 mm, and an ergonomic eye-relief of 20 mm. Simultaneously, only 3.3 mm of physical distance is required between the display panel and the lens, so that the integrated VR display can have a compact form factor like sunglasses. Moreover, since all lenslet of the lenslet array is designed to operate under on-axis condition with low aberration, the discontinuous pupil swim distortion between the lenslets is hardly observed. In addition, all on-axis lenslets can be designed identically, reducing production cost, and even off-the-shelf Fresnel optics can be used. In this paper, we introduce how we design system parameters and analyze system performance. Finally, we demonstrate two prototypes and experimentally verify that the proposed VR display system has the expected performance while having a glasses-like form factor.", "title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "normalizedTitle": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array", "fno": "09384477", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Aberrations", "Ergonomics", "Flat Panel Displays", "Integrated Optics", "Lenses", "Optical Design Techniques", "Virtual Reality", "Flat Virtual Reality Display", "On Axis Lenslets", "Discontinuous Pupil Swim Distortion", "Compact Form Factor Like Sunglasses", "Integrated VR Display", "Display Panel", "Ergonomic Eye Relief", "Wide Eye Box", "Optical System", "Polarization Based Optical Folding Technique", "Fresnel Lenslet Array", "Wide FOV Virtual Reality Display", "Lens Iet VR", "Design System Parameters", "Off The Shelf Fresnel Optics", "Distance 3 3 Mm", "Lenses", "Prototypes", "Optical Imaging", "Optical Distortion", "Optics", "Optical Polarization", "Optical Design", "Virtual Reality", "Near Eye Display", "Lenslet Array", "Fresnel Lens" ], "authors": [ { "givenName": "Kiseung", "surname": "Bang", "fullName": "Kiseung Bang", "affiliation": "School of Electrical and Computer Engineering, Seoul National University, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Youngjin", "surname": "Jo", "fullName": "Youngjin Jo", "affiliation": "School of Electrical and Computer Engineering, Seoul National University, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Minseok", "surname": "Chae", "fullName": "Minseok Chae", "affiliation": "School of Electrical and Computer Engineering, Seoul National University, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Byoungho", "surname": "Lee", "fullName": "Byoungho Lee", "affiliation": "School of Electrical and Computer Engineering, Seoul National University, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "05", "pubDate": "2021-05-01 00:00:00", "pubType": "trans", "pages": "2545-2554", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2015/6759/0/07301373", "title": "Fresnel lens imaging with post-capture image processing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2015/07301373/12OmNBLdKJ7", "parentPublication": { "id": "proceedings/cvprw/2015/6759/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09760161", "title": "Predicting Subjective Discomfort Associated with Lens Distortion in VR Headsets During Vestibulo-Ocular Response to VR Scenes", "doi": null, "abstractUrl": "/journal/tg/5555/01/09760161/1CHsCvUiJQA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a628", "title": "A Binocular Model to Evaluate User Experience in Ophthalmic and AR Prescription Lens Designs", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a628/1J7WmUiV2la", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10050791", "title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable", "doi": null, "abstractUrl": "/journal/tg/2023/05/10050791/1L039oS5wDm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a503", "title": "Virtual Optical Bench: Teaching Spherical Lens Layout in VR with Real-Time Ray Tracing", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a503/1MNgIE9xnBC", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798364", "title": "Color Moir&#x00E9; Reduction Method for Thin Integral 3D Displays", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798364/1cJ0XcgYa1W", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/11/08794584", "title": "Towards a Switchable AR/VR Near-eye Display with Accommodation-Vergence and Eyeglass Prescription Support", "doi": null, "abstractUrl": "/journal/tg/2019/11/08794584/1dNHlOrNW5W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/05/08998293", "title": "ThinVR: Heterogeneous microlens arrays for compact, 180 degree FOV VR near-eye displays", "doi": null, "abstractUrl": "/journal/tg/2020/05/08998293/1hrXiCmKkak", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09199567", "title": "StainedView: Variable-Intensity Light-Attenuation Display with Cascaded Spatial Color Filtering for Improved Color Fidelity", "doi": null, "abstractUrl": "/journal/tg/2020/12/09199567/1ncgpOWQBig", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a073", "title": "Optical distortions in VR bias the perceived slant of moving surfaces", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a073/1pysw2tXYOY", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09382914", "articleId": "1saZw54tjDa", "__typename": "AdjacentArticleType" }, "next": { "fno": "09382892", "articleId": "1saZrRoiA3C", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvqEvRo", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1LtR7JYxVEk", "doi": "10.1109/TVCG.2023.3255991", "abstract": "This study aims to allow users to perform dexterous hand manipulation of objects in virtual environments with hand-held VR controllers. To this end, the VR controller is mapped to the virtual hand and the hand motions are dynamically synthesized when the virtual hand approaches an object. At each frame, given the information about the virtual hand, VR controller input, and hand-object spatial relations, the deep neural network determines the desired joint orientations of the virtual hand model in the next frame. The desired orientations are then converted into a set of torques acting on hand joints and applied to a physics simulation to determine the hand pose at the next frame. The deep neural network, named VR-HandNet, is trained with a reinforcement learning-based approach. Therefore, it can produce physically plausible hand motion since the trial-and-error training process can learn how the interaction between hand and object is performed under the environment that is simulated by a physics engine. Furthermore, we adopted an imitation learning paradigm to increase visual plausibility by mimicking the reference motion datasets. Through the ablation studies, we validated the proposed method is effectively constructed and successfully serves our design goal. A live demo is demonstrated in the supplementary video.", "abstracts": [ { "abstractType": "Regular", "content": "This study aims to allow users to perform dexterous hand manipulation of objects in virtual environments with hand-held VR controllers. To this end, the VR controller is mapped to the virtual hand and the hand motions are dynamically synthesized when the virtual hand approaches an object. At each frame, given the information about the virtual hand, VR controller input, and hand-object spatial relations, the deep neural network determines the desired joint orientations of the virtual hand model in the next frame. The desired orientations are then converted into a set of torques acting on hand joints and applied to a physics simulation to determine the hand pose at the next frame. The deep neural network, named VR-HandNet, is trained with a reinforcement learning-based approach. Therefore, it can produce physically plausible hand motion since the trial-and-error training process can learn how the interaction between hand and object is performed under the environment that is simulated by a physics engine. Furthermore, we adopted an imitation learning paradigm to increase visual plausibility by mimicking the reference motion datasets. Through the ablation studies, we validated the proposed method is effectively constructed and successfully serves our design goal. A live demo is demonstrated in the supplementary video.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This study aims to allow users to perform dexterous hand manipulation of objects in virtual environments with hand-held VR controllers. To this end, the VR controller is mapped to the virtual hand and the hand motions are dynamically synthesized when the virtual hand approaches an object. At each frame, given the information about the virtual hand, VR controller input, and hand-object spatial relations, the deep neural network determines the desired joint orientations of the virtual hand model in the next frame. The desired orientations are then converted into a set of torques acting on hand joints and applied to a physics simulation to determine the hand pose at the next frame. The deep neural network, named VR-HandNet, is trained with a reinforcement learning-based approach. Therefore, it can produce physically plausible hand motion since the trial-and-error training process can learn how the interaction between hand and object is performed under the environment that is simulated by a physics engine. Furthermore, we adopted an imitation learning paradigm to increase visual plausibility by mimicking the reference motion datasets. Through the ablation studies, we validated the proposed method is effectively constructed and successfully serves our design goal. A live demo is demonstrated in the supplementary video.", "title": "VR-HandNet: A Visually and Physically Plausible Hand Manipulation System in Virtual Reality", "normalizedTitle": "VR-HandNet: A Visually and Physically Plausible Hand Manipulation System in Virtual Reality", "fno": "10066837", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Tracking", "Three Dimensional Displays", "Shape", "Physics", "Grasping", "Deep Learning", "Visualization", "Hand Manipulation", "Physics Based Animation", "Reinforcement Learning", "Virtual Reality" ], "authors": [ { "givenName": "DongHeun", "surname": "Han", "fullName": "DongHeun Han", "affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "RoUn", "surname": "Lee", "fullName": "RoUn Lee", "affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "KyeongMin", "surname": "Kim", "fullName": "KyeongMin Kim", "affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "HyeongYeop", "surname": "Kang", "fullName": "HyeongYeop Kang", "affiliation": "IIIXR LAB at the Department of Software Convergence, Kyung Hee University, Yongin, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "1-12", "year": "5555", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2011/0039/0/05759430", "title": "A soft hand model for physically-based manipulation of virtual objects", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759430/12OmNBpEeRU", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571178", "title": "EMG Biofeedback Based VR System for Hand Rotation and Grasping Rehabilitation", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571178/12OmNxRnvUd", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a493", "title": "Eye Tracking-based LSTM for Locomotion Prediction in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a493/1CJcrKWnUtO", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0545", "title": "D-Grasp: Physically Plausible Dynamic Grasp Synthesis for Hand-Object Interactions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0545/1H0Nqno8Tw4", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a566", "title": "A cup of coffee in Mixed Reality: analysis of movements&#x0027; smoothness from real to virtual", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a566/1J7Waw7xSy4", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a905", "title": "Haptics in VR Using Origami-Augmented Drones", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a905/1J7WrPcWIVO", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10023999", "title": "VR Blowing: A Physically Plausible Interaction Method for Blowing Air in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/5555/01/10023999/1K9ssyL8VvG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049658", "title": "Comparing Different Grasping Visualizations for Object Manipulation in VR using Controllers", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049658/1KYotjCVD7W", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049645", "title": "GestureSurface: VR Sketching through Assembling Scaffold Surface with Non-Dominant Hand", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049645/1KYoyLX55fy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a243", "title": "Continuous VR Weight Illusion by Combining Adaptive Trigger Resistance and Control-Display Ratio Manipulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a243/1MNgyZ3pLFe", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10068257", "articleId": "1LtR7CeyeHe", "__typename": "AdjacentArticleType" }, "next": { "fno": "10070611", "articleId": "1LvvYkEy8XC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1LvvXRrzl60", "name": "ttg555501-010066837s1-supp1-3255991.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-010066837s1-supp1-3255991.mp4", "extension": "mp4", "size": "53.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1Lk2nmMLR6M", "title": "April", "year": "2023", "issueNum": "04", "idPrefix": "tp", "pubType": "journal", "volume": "45", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1G2VVeZiu9q", "doi": "10.1109/TPAMI.2022.3200725", "abstract": "Correcting the optical aberrations and the manufacturing deviations of cameras is a challenging task. Due to the limitation on volume and the demand for mass production, existing mobile terminals cannot rectify optical degradation. In this work, we systematically construct the perturbed lens system model to illustrate the relationship between the deviated system parameters and the spatial frequency response (SFR) measured from photographs. To further address this issue, an optimization framework is proposed based on this model to build proxy cameras from the machining samples&#x2019; SFRs. Engaging with the proxy cameras, we synthetic data pairs, which encode the optical aberrations and the random manufacturing biases, for training the learning-based algorithms. In correcting aberration, although promising results have been shown recently with convolutional neural networks, they are hard to generalize to stochastic machining biases. Therefore, we propose a dilated Omni-dimensional dynamic convolution (DOConv) and implement it in post-processing to account for the manufacturing degradation. Extensive experiments which evaluate multiple samples of two representative devices demonstrate that the proposed optimization framework accurately constructs the proxy camera. And the dynamic processing model is well-adapted to manufacturing deviations of different cameras, realizing perfect computational photography. The evaluation shows that the proposed method bridges the gap between optical design, system machining, and post-processing pipeline, shedding light on the joint of image signal reception (lens and sensor) and image signal processing (ISP).", "abstracts": [ { "abstractType": "Regular", "content": "Correcting the optical aberrations and the manufacturing deviations of cameras is a challenging task. Due to the limitation on volume and the demand for mass production, existing mobile terminals cannot rectify optical degradation. In this work, we systematically construct the perturbed lens system model to illustrate the relationship between the deviated system parameters and the spatial frequency response (SFR) measured from photographs. To further address this issue, an optimization framework is proposed based on this model to build proxy cameras from the machining samples&#x2019; SFRs. Engaging with the proxy cameras, we synthetic data pairs, which encode the optical aberrations and the random manufacturing biases, for training the learning-based algorithms. In correcting aberration, although promising results have been shown recently with convolutional neural networks, they are hard to generalize to stochastic machining biases. Therefore, we propose a dilated Omni-dimensional dynamic convolution (DOConv) and implement it in post-processing to account for the manufacturing degradation. Extensive experiments which evaluate multiple samples of two representative devices demonstrate that the proposed optimization framework accurately constructs the proxy camera. And the dynamic processing model is well-adapted to manufacturing deviations of different cameras, realizing perfect computational photography. The evaluation shows that the proposed method bridges the gap between optical design, system machining, and post-processing pipeline, shedding light on the joint of image signal reception (lens and sensor) and image signal processing (ISP).", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Correcting the optical aberrations and the manufacturing deviations of cameras is a challenging task. Due to the limitation on volume and the demand for mass production, existing mobile terminals cannot rectify optical degradation. In this work, we systematically construct the perturbed lens system model to illustrate the relationship between the deviated system parameters and the spatial frequency response (SFR) measured from photographs. To further address this issue, an optimization framework is proposed based on this model to build proxy cameras from the machining samples’ SFRs. Engaging with the proxy cameras, we synthetic data pairs, which encode the optical aberrations and the random manufacturing biases, for training the learning-based algorithms. In correcting aberration, although promising results have been shown recently with convolutional neural networks, they are hard to generalize to stochastic machining biases. Therefore, we propose a dilated Omni-dimensional dynamic convolution (DOConv) and implement it in post-processing to account for the manufacturing degradation. Extensive experiments which evaluate multiple samples of two representative devices demonstrate that the proposed optimization framework accurately constructs the proxy camera. And the dynamic processing model is well-adapted to manufacturing deviations of different cameras, realizing perfect computational photography. The evaluation shows that the proposed method bridges the gap between optical design, system machining, and post-processing pipeline, shedding light on the joint of image signal reception (lens and sensor) and image signal processing (ISP).", "title": "Computational Optics for Mobile Terminals in Mass Production", "normalizedTitle": "Computational Optics for Mobile Terminals in Mass Production", "fno": "09864277", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Aberrations", "Cameras", "Convolutional Neural Nets", "Frequency Response", "Interactive Terminals", "Machining", "Mass Production", "Mobile Computing", "Optimisation", "Photography", "Production Engineering Computing", "Signal Processing", "Computational Photography", "Convolutional Neural Networks", "Image Signal Processing", "ISP", "Learning Based Algorithms", "Machining Samples", "Manufacturing Degradation", "Mass Production", "Mobile Terminals", "Omni Dimensional Dynamic Convolution", "Optical Degradation", "Optimization Framework", "Post Processing Pipeline", "Proxy Camera", "Random Manufacturing Biases", "SFR", "Shedding Light", "Spatial Frequency Response", "Stochastic Machining Biases", "Cameras", "Optical Imaging", "Adaptive Optics", "Manufacturing", "Degradation", "Optical Sensors", "Machining", "Optical Tolerancing", "Imaging Simulation", "Computational Photography", "Dynamic Convolution", "Mobile ISP Systems" ], "authors": [ { "givenName": "Shiqi", "surname": "Chen", "fullName": "Shiqi Chen", "affiliation": "State Key Laboratory of Modern Optical Instrumentation, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ting", "surname": "Lin", "fullName": "Ting Lin", "affiliation": "State Key Laboratory of Modern Optical Instrumentation, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Huajun", "surname": "Feng", "fullName": "Huajun Feng", "affiliation": "State Key Laboratory of Modern Optical Instrumentation, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhihai", "surname": "Xu", "fullName": "Zhihai Xu", "affiliation": "State Key Laboratory of Modern Optical Instrumentation, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Qi", "surname": "Li", "fullName": "Qi Li", "affiliation": "State Key Laboratory of Modern Optical Instrumentation, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yueting", "surname": "Chen", "fullName": "Yueting Chen", "affiliation": "State Key Laboratory of Modern Optical Instrumentation, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2023-04-01 00:00:00", "pubType": "trans", "pages": "4245-4259", "year": "2023", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cmpeur/1992/2760/0/00218468", "title": "Fast switching using nonlinear optics", "doi": null, "abstractUrl": "/proceedings-article/cmpeur/1992/00218468/12OmNvjyy44", "parentPublication": { "id": "proceedings/cmpeur/1992/2760/0", "title": "1992 Proceedings Computer Systems and Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isatp/2003/7770/0/01217184", "title": "Integrated process planning and scheduling in holonic manufacturing systems-optimization based on shop time and machining cost", "doi": null, "abstractUrl": "/proceedings-article/isatp/2003/01217184/12OmNvkYx9X", "parentPublication": { "id": "proceedings/isatp/2003/7770/0", "title": "ISATP'03: 5th IEEE International Symposium on Assembly and Task Planning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iri/2014/5880/0/07051914", "title": "Towards ray optics formalization of optical imaging systems", "doi": null, "abstractUrl": "/proceedings-article/iri/2014/07051914/12OmNvq5jzp", "parentPublication": { "id": "proceedings/iri/2014/5880/0", "title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/slip/2013/6173/0/06681678", "title": "Channel routing for integrated optics", "doi": null, "abstractUrl": "/proceedings-article/slip/2013/06681678/12OmNyY4rxt", "parentPublication": { "id": "proceedings/slip/2013/6173/0", "title": "2013 ACM/IEEE International Workshop on System Level Interconnect Prediction (SLIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2013/2576/0/06815045", "title": "Multi-level Structuralized MBD Model for Manufacturing Reuse of Mechanical Parts", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06815045/12OmNzYeASJ", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2009/12/ttk2009121803", "title": "Manufacturing-Oriented Discrete Process Modeling Approach Using the Predicate Logic", "doi": null, "abstractUrl": "/journal/tk/2009/12/ttk2009121803/13rRUxAAT7X", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/qe/2023/01/09964108", "title": "Fundamentals of Quantum Fourier Optics", "doi": null, "abstractUrl": "/journal/qe/2023/01/09964108/1IAFM4ExGVi", "parentPublication": { "id": "trans/qe", "title": "IEEE Transactions on Quantum Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3cbit/2022/9225/0/922500a311", "title": "Design and Research of Flexible Machining Cell(FMC)-Take Machining the Connecting Rod Parts as an Example", "doi": null, "abstractUrl": "/proceedings-article/3cbit/2022/922500a311/1La4LSJNW7u", "parentPublication": { "id": "proceedings/3cbit/2022/9225/0", "title": "2022 International Conference on Cloud Computing, Big Data and Internet of Things (3CBIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106014", "title": "Computational Multifocal Near-Eye Display with Hybrid Refractive-Diffractive Optics", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106014/1kwqMlDE9KE", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ecie/2021/1869/0/186900a171", "title": "Mechanical and Thermal Performance Analysis of Optical Components with Integrated Function and Structure", "doi": null, "abstractUrl": "/proceedings-article/ecie/2021/186900a171/1sXnV64dTX2", "parentPublication": { "id": "proceedings/ecie/2021/1869/0", "title": "2021 International Conference on Electronics, Circuits and Information Engineering (ECIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09852288", "articleId": "1FFHaJppkDm", "__typename": "AdjacentArticleType" }, "next": { "fno": "09816025", "articleId": "1EMV4dNmV3y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1Lk2w12JlFC", "name": "ttp202304-09864277s1-supp1-3200725.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202304-09864277s1-supp1-3200725.pdf", "extension": "pdf", "size": "10.6 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1HMOit1lSk8", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1tTpcuKN5jW", "doi": "10.1109/TVCG.2021.3083423", "abstract": "The availability of new and improved display, tracking and input devices for Virtual Reality experiences has facilitated the use of partial and full body self-avatars in interaction with virtual objects in the environment. However, scaling the avatar to match the user&#x0027;s body dimensions remains to be a cumbersome process. Moreover, the effect of body-scaled self-avatars on size perception of virtual handheld objects and related action capabilities has been relatively unexplored. To this end, we present an empirical evaluation investigating the effect of the presence or absence of body-scaled self-avatars and visuo-motor calibration on frontal passability affordance judgments when interacting with virtual handheld objects. The self-avatar&#x0027;s dimensions were scaled to match the participant&#x0027;s eyeheight, arms length, shoulder width and body depth along the mid section. The results indicate that the presence of body-scaled self-avatars produce more realistic judgments of passability and aid the calibration process when interacting with virtual objects. Also, participants rely on the visual size of virtual objects to make judgments even though the kinesthetic and proprioceptive feedback of the object is missing or mismatched.", "abstracts": [ { "abstractType": "Regular", "content": "The availability of new and improved display, tracking and input devices for Virtual Reality experiences has facilitated the use of partial and full body self-avatars in interaction with virtual objects in the environment. However, scaling the avatar to match the user&#x0027;s body dimensions remains to be a cumbersome process. Moreover, the effect of body-scaled self-avatars on size perception of virtual handheld objects and related action capabilities has been relatively unexplored. To this end, we present an empirical evaluation investigating the effect of the presence or absence of body-scaled self-avatars and visuo-motor calibration on frontal passability affordance judgments when interacting with virtual handheld objects. The self-avatar&#x0027;s dimensions were scaled to match the participant&#x0027;s eyeheight, arms length, shoulder width and body depth along the mid section. The results indicate that the presence of body-scaled self-avatars produce more realistic judgments of passability and aid the calibration process when interacting with virtual objects. Also, participants rely on the visual size of virtual objects to make judgments even though the kinesthetic and proprioceptive feedback of the object is missing or mismatched.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The availability of new and improved display, tracking and input devices for Virtual Reality experiences has facilitated the use of partial and full body self-avatars in interaction with virtual objects in the environment. However, scaling the avatar to match the user's body dimensions remains to be a cumbersome process. Moreover, the effect of body-scaled self-avatars on size perception of virtual handheld objects and related action capabilities has been relatively unexplored. To this end, we present an empirical evaluation investigating the effect of the presence or absence of body-scaled self-avatars and visuo-motor calibration on frontal passability affordance judgments when interacting with virtual handheld objects. The self-avatar's dimensions were scaled to match the participant's eyeheight, arms length, shoulder width and body depth along the mid section. The results indicate that the presence of body-scaled self-avatars produce more realistic judgments of passability and aid the calibration process when interacting with virtual objects. Also, participants rely on the visual size of virtual objects to make judgments even though the kinesthetic and proprioceptive feedback of the object is missing or mismatched.", "title": "Did I Hit the Door? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Frontal Passability in VR", "normalizedTitle": "Did I Hit the Door? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Frontal Passability in VR", "fno": "09440766", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Avatars", "Calibration", "Mechanoception", "Virtual Reality", "Visual Perception", "Body Depth", "Body Scaled Self Avatars", "Full Body Self Avatars", "Partial Body Self Avatars", "Perceived Frontal Passability", "Person Plus Virtual Object System", "Self Avatar", "Shoulder", "Virtual Handheld Objects", "Virtual Objects", "Virtual Reality Experiences", "Input Devices", "Calibration", "Apertures", "Avatars", "Tracking", "Virtual Reality", "Self Avatars", "Virtual Objects", "Affordance Perception", "Passability" ], "authors": [ { "givenName": "Ayush", "surname": "Bhargava", "fullName": "Ayush Bhargava", "affiliation": "Key Lime Interactive, Brooklyn, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Roshan", "surname": "Venkatakrishnan", "fullName": "Roshan Venkatakrishnan", "affiliation": "School of Computing, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Rohith", "surname": "Venkatakrishnan", "fullName": "Rohith Venkatakrishnan", "affiliation": "School of Computing, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Hannah", "surname": "Solini", "fullName": "Hannah Solini", "affiliation": "Department of Psychology, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Kathryn", "surname": "Lucaites", "fullName": "Kathryn Lucaites", "affiliation": "Department of Psychology, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew C.", "surname": "Robb", "fullName": "Andrew C. Robb", "affiliation": "School of Computing, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Christopher C.", "surname": "Pagano", "fullName": "Christopher C. Pagano", "affiliation": "Department of Psychology, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Sabarish V.", "surname": "Babu", "fullName": "Sabarish V. Babu", "affiliation": "School of Computing, Clemson University, Clemson, SC, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "4198-4210", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2014/2871/0/06802113", "title": "Automatic acquisition and animation of virtual avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2014/06802113/12OmNCeaQ1Z", "parentPublication": { "id": "proceedings/vr/2014/2871/0", "title": "2014 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446189", "title": "Towards Revisiting Passability Judgments in Real and Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446189/13bd1fdV4lC", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446477", "title": "Auto-Scaled Full Body Avatars for Virtual Reality: Facilitating Interactive Virtual Body Modification", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446477/13bd1ftOBDh", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2018/9269/0/926900a237", "title": "Integrating Biomechanical and Animation Motion Capture Methods in the Production of Participant Specific, Scaled Avatars", "doi": null, "abstractUrl": "/proceedings-article/aivr/2018/926900a237/17D45XeKgqk", "parentPublication": { "id": "proceedings/aivr/2018/9269/0", "title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08648222", "title": "The Virtual Caliper: Rapid Creation of Metrically Accurate Avatars from 3D Measurements", "doi": null, "abstractUrl": "/journal/tg/2019/05/08648222/17QjJf0qqr2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4508", "title": "EgoRenderer: Rendering Human Avatars from Egocentric Camera Images", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4508/1BmJxzOtk4w", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049626", "title": "Can I Squeeze Through? Effects of Self-Avatars and Calibration in a Person-Plus-Virtual-Object System on Perceived Lateral Passability in VR", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049626/1KYoySw7RM4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2023/4815/0/481500a308", "title": "Empirically Evaluating the Effects of Eye Height and Self-Avatars on Dynamic Passability Affordances in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2023/481500a308/1MNgWLowz1m", "parentPublication": { "id": "proceedings/vr/2023/4815/0", "title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089645", "title": "Comparative Evaluation of Viewing and Self-Representation on Passability Affordances to a Realistic Sliding Doorway in Real and Immersive Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089645/1jIx9zwn7SE", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icssa/2019/5912/0/591200a070", "title": "Acceptance of Virtual Health Avatars", "doi": null, "abstractUrl": "/proceedings-article/icssa/2019/591200a070/1q0FToF6GrK", "parentPublication": { "id": "proceedings/icssa/2019/5912/0", "title": "2019 International Conference on Software Security and Assurance (ICSSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09440798", "articleId": "1tTpcF5rJTy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09444657", "articleId": "1u3mEKfpEwU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyRxFj0", "title": "March", "year": "2018", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy2YLT6", "doi": "10.1109/TVCG.2017.2676777", "abstract": "Visual coherence between virtual and real objects is a major issue in creating convincing augmented reality (AR) applications. To achieve this seamless integration, actual light conditions must be determined in real time to ensure that virtual objects are correctly illuminated and cast consistent shadows. In this paper, we propose a novel method to estimate daylight illumination and use this information in outdoor AR applications to render virtual objects with coherent shadows. The illumination parameters are acquired in real time from context-aware live sensor data. The method works under unprepared natural conditions. We also present a novel and rapid implementation of a state-of-the-art skylight model, from which the illumination parameters are derived. The Sun's position is calculated based on the user location and time of day, with the relative rotational differences estimated from a gyroscope, compass and accelerometer. The results illustrated that our method can generate visually credible AR scenes with consistent shadows rendered from recovered illumination.", "abstracts": [ { "abstractType": "Regular", "content": "Visual coherence between virtual and real objects is a major issue in creating convincing augmented reality (AR) applications. To achieve this seamless integration, actual light conditions must be determined in real time to ensure that virtual objects are correctly illuminated and cast consistent shadows. In this paper, we propose a novel method to estimate daylight illumination and use this information in outdoor AR applications to render virtual objects with coherent shadows. The illumination parameters are acquired in real time from context-aware live sensor data. The method works under unprepared natural conditions. We also present a novel and rapid implementation of a state-of-the-art skylight model, from which the illumination parameters are derived. The Sun's position is calculated based on the user location and time of day, with the relative rotational differences estimated from a gyroscope, compass and accelerometer. The results illustrated that our method can generate visually credible AR scenes with consistent shadows rendered from recovered illumination.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual coherence between virtual and real objects is a major issue in creating convincing augmented reality (AR) applications. To achieve this seamless integration, actual light conditions must be determined in real time to ensure that virtual objects are correctly illuminated and cast consistent shadows. In this paper, we propose a novel method to estimate daylight illumination and use this information in outdoor AR applications to render virtual objects with coherent shadows. The illumination parameters are acquired in real time from context-aware live sensor data. The method works under unprepared natural conditions. We also present a novel and rapid implementation of a state-of-the-art skylight model, from which the illumination parameters are derived. The Sun's position is calculated based on the user location and time of day, with the relative rotational differences estimated from a gyroscope, compass and accelerometer. The results illustrated that our method can generate visually credible AR scenes with consistent shadows rendered from recovered illumination.", "title": "A Context-Aware Method for Authentically Simulating Outdoors Shadows for Mobile Augmented Reality", "normalizedTitle": "A Context-Aware Method for Authentically Simulating Outdoors Shadows for Mobile Augmented Reality", "fno": "07867820", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Mobile Computing", "Rendering Computer Graphics", "Context Aware Live Sensor Data", "Natural Conditions", "Virtual Object Rendering", "Augmented Reality Applications", "Skylight Model", "Gyroscope", "Compass", "Accelerometer", "Coherent Shadows", "Daylight Illumination", "Actual Light Conditions", "Seamless Integration", "Visual Coherence", "Mobile Augmented Reality", "Outdoors Shadows", "Context Aware Method", "Recovered Illumination", "Visually Credible AR Scenes", "Illumination Parameters", "Lighting", "Sun", "Augmented Reality", "Rendering Computer Graphics", "Cameras", "Context Awareness", "Augmented Reality", "Context Awareness", "Shadows Coherence", "Photometric Registration" ], "authors": [ { "givenName": "João", "surname": "Barreira", "fullName": "João Barreira", "affiliation": "Universidade de Trás-os-Montes e Alto Douro, Vila Real, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Maximino", "surname": "Bessa", "fullName": "Maximino Bessa", "affiliation": "Universidade de Trás-os-Montes e Alto Douro, Vila Real, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Luís", "surname": "Barbosa", "fullName": "Luís Barbosa", "affiliation": "Universidade de Trás-os-Montes e Alto Douro, Vila Real, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Luís", "surname": "Magalhães", "fullName": "Luís Magalhães", "affiliation": "Centro Algoritmi/Universidade do Minho, Braga, Portugal", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2018-03-01 00:00:00", "pubType": "trans", "pages": "1223-1231", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/svr/2012/4725/0/4725a036", "title": "Realistic Shadows for Mobile Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/svr/2012/4725a036/12OmNASrawf", "parentPublication": { "id": "proceedings/svr/2012/4725/0", "title": "2012 14th Symposium on Virtual and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gamepec/2015/7207/0/07331844", "title": "Illumination rendering in Game and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/gamepec/2015/07331844/12OmNBEGYIm", "parentPublication": { "id": "proceedings/gamepec/2015/7207/0", "title": "2015 Game Physics and Mechanics International Conference (GAMEPEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isuvr/2011/4420/0/4420b017", "title": "Estimation of Illuminants for Plausible Lighting in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/isuvr/2011/4420b017/12OmNvkplcm", "parentPublication": { "id": "proceedings/isuvr/2011/4420/0", "title": "International Symposium on Ubiquitous Virtual Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2013/3926/0/06726809", "title": "A review on illumination techniques in augmented reality", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2013/06726809/12OmNwMFMfk", "parentPublication": { "id": "proceedings/icccnt/2013/3926/0", "title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a192", "title": "[POSTER] Illumination Estimation Using Cast Shadows for Realistic Augmented Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a192/12OmNxX3uLh", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2010/9343/0/05643558", "title": "Foreground and shadow occlusion handling for outdoor augmented reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2010/05643558/12OmNyRPgDK", "parentPublication": { "id": "proceedings/ismar/2010/9343/0", "title": "2010 IEEE International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090547", "title": "[DC] Resolving Cue Conflicts in Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090547/1jIxw8zwtbO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2020/04/09084269", "title": "Evaluation of Drop Shadows for Virtual Object Grasping in Augmented Reality", "doi": null, "abstractUrl": "/magazine/cg/2020/04/09084269/1jtyNfWJwoo", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09490310", "title": "Shedding Light on Cast Shadows: An Investigation of Perceived Ground Contact in AR and VR", "doi": null, "abstractUrl": "/journal/tg/2022/12/09490310/1vmGThNh9jq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a422", "title": "Blending Shadows: Casting Shadows in Virtual and Real using Occlusion-Capable Augmented Reality Near-Eye Displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a422/1yeD2Kh0vxS", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "07845707", "articleId": "13rRUILLkDY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1LUpyYLBfeo", "title": "May", "year": "2023", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1KYouFY43ks", "doi": "10.1109/TVCG.2023.3247094", "abstract": "As virtual reality (VR) is typically designed in terms of visual experience, it poses major challenges for blind people to understand and interact with the environment. To address this, we propose a design space to explore how to augment objects and their behaviours in VR with a nonvisual audio representation. It intends to support designers in creating accessible experiences by explicitly considering alternative representations to visual feedback. To demonstrate its potential, we recruited 16 blind users and explored the design space under two scenarios in the context of boxing: understanding the location of objects (the opponent's defensive stance) and their movement (opponent's punches). We found that the design space enables the exploration of multiple engaging approaches for the auditory representation of virtual objects. Our findings depicted shared preferences but no one-size-fits-all solution, suggesting the need to understand the consequences of each design choice and their impact on the individual user experience.", "abstracts": [ { "abstractType": "Regular", "content": "As virtual reality (VR) is typically designed in terms of visual experience, it poses major challenges for blind people to understand and interact with the environment. To address this, we propose a design space to explore how to augment objects and their behaviours in VR with a nonvisual audio representation. It intends to support designers in creating accessible experiences by explicitly considering alternative representations to visual feedback. To demonstrate its potential, we recruited 16 blind users and explored the design space under two scenarios in the context of boxing: understanding the location of objects (the opponent's defensive stance) and their movement (opponent's punches). We found that the design space enables the exploration of multiple engaging approaches for the auditory representation of virtual objects. Our findings depicted shared preferences but no one-size-fits-all solution, suggesting the need to understand the consequences of each design choice and their impact on the individual user experience.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "As virtual reality (VR) is typically designed in terms of visual experience, it poses major challenges for blind people to understand and interact with the environment. To address this, we propose a design space to explore how to augment objects and their behaviours in VR with a nonvisual audio representation. It intends to support designers in creating accessible experiences by explicitly considering alternative representations to visual feedback. To demonstrate its potential, we recruited 16 blind users and explored the design space under two scenarios in the context of boxing: understanding the location of objects (the opponent's defensive stance) and their movement (opponent's punches). We found that the design space enables the exploration of multiple engaging approaches for the auditory representation of virtual objects. Our findings depicted shared preferences but no one-size-fits-all solution, suggesting the need to understand the consequences of each design choice and their impact on the individual user experience.", "title": "The Design Space of the Auditory Representation of Objects and Their Behaviours in Virtual Reality for Blind People", "normalizedTitle": "The Design Space of the Auditory Representation of Objects and Their Behaviours in Virtual Reality for Blind People", "fno": "10049631", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Handicapped Aids", "User Experience", "Virtual Reality", "Auditory Representation", "Blind People", "Blind Users", "Design Space", "Nonvisual Audio Representation", "Opponent Defensive Stance", "Opponent Punches", "Virtual Objects", "Virtual Reality", "Visual Experience", "Visual Feedback", "VR", "Space Exploration", "Blindness", "Visualization", "Virtual Reality", "Virtual Environments", "Haptic Interfaces", "Games", "Inclusive Virtual Reality", "Nonvisual Interaction", "Blind", "Auditory Feedback", "Design Space" ], "authors": [ { "givenName": "João", "surname": "Guerreiro", "fullName": "João Guerreiro", "affiliation": "LASIGE, Faculdade de Ciências, Universidade de Lisboa, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Yujin", "surname": "Kim", "fullName": "Yujin Kim", "affiliation": "Computer Science and Engineering, Ewha Womans University, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Rodrigo", "surname": "Nogueira", "fullName": "Rodrigo Nogueira", "affiliation": "LASIGE, Faculdade de Ciências, Universidade de Lisboa, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "SeungA", "surname": "Chung", "fullName": "SeungA Chung", "affiliation": "Computer Science and Engineering, Ewha Womans University, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "André", "surname": "Rodrigues", "fullName": "André Rodrigues", "affiliation": "LASIGE, Faculdade de Ciências, Universidade de Lisboa, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Uran", "surname": "Oh", "fullName": "Uran Oh", "affiliation": "Computer Science and Engineering, Ewha Womans University, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2023-05-01 00:00:00", "pubType": "trans", "pages": "2763-2773", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446280", "title": "Enhancing the Stiffness Perception of Tangible Objects in Mixed Reality Using Wearable Haptics", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446280/13bd1AIBM2a", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/11/08007246", "title": "AR Feels &#x201c;Softer&#x201d; than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000d608", "title": "VizWiz Grand Challenge: Answering Visual Questions from Blind People", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000d608/17D45WgziNH", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2019/5686/0/568600a454", "title": "Analyzing Stress Situations for Blind People", "doi": null, "abstractUrl": "/proceedings-article/sitis/2019/568600a454/1j9xCBI3gju", "parentPublication": { "id": "proceedings/sitis/2019/5686/0", "title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icict/2020/7283/0/728300a205", "title": "Methodology to Build a Wearable System for Assisting Blind People in Purposeful Navigation", "doi": null, "abstractUrl": "/proceedings-article/icict/2020/728300a205/1jPb3sCAV8I", "parentPublication": { "id": "proceedings/icict/2020/7283/0", "title": "2020 3rd International Conference on Information and Computer Technologies (ICICT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093462", "title": "Body Pose Sonification for a View-Independent Auditory Aid to Blind Rock Climbers", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093462/1jPbn5j2og0", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09105985", "title": "Calligraphy Navigation System for Blind People Based on Visual Prosthesis on Waist Belt", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09105985/1kwqHH2q3lK", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a206", "title": "Investigating Three-dimensional Directional Guidance with Nonvisual Feedback for Target Pointing Task", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a206/1pBMgY7fbGM", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a051", "title": "Exploring Virtual Environments by Visually Impaired Using a Mixed Reality Cane Without Visual Feedback", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a051/1pBMgh7AbaU", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/contie/2020/8342/0/834200a123", "title": "Game-Based Literacy for Blind People", "doi": null, "abstractUrl": "/proceedings-article/contie/2020/834200a123/1sZ2W46DCKs", "parentPublication": { "id": "proceedings/contie/2020/8342/0", "title": "2020 3rd International Conference of Inclusive Technology and Education (CONTIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10049705", "articleId": "1KYovqncdKo", "__typename": "AdjacentArticleType" }, "next": { "fno": "10049660", "articleId": "1KYoqi0DQK4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyaoDzm", "title": "April-June", "year": "2015", "issueNum": "02", "idPrefix": "ta", "pubType": "journal", "volume": "6", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASuz0", "doi": "10.1109/TAFFC.2014.2332471", "abstract": "Recent research in perception and theory of mind reveals that people show different behavior and lower activation of brain regions associated with mentalizing (i.e., the inference of other's mental states) when engaged in decision making with computers, when compared to humans. These findings are important for affective computing because they suggest people's decisions might be influenced differently according to whether they believe emotional expressions shown in computers are being generated by algorithms or humans. To test this, we had people engage in a social dilemma (Experiment 1) or negotiation (Experiment 2) with virtual humans that were either perceived to be agents (i.e., controlled by computers) or avatars (i.e., controlled by humans). The results showed that such perceptions have a deep impact on people's decisions: in Experiment 1, people cooperated more with virtual humans that showed cooperative facial displays (e.g., joy after mutual cooperation) than competitive displays (e.g., joy when the participant was exploited) but, the effect was stronger with avatars (d = .601) than with agents (d = .360); in Experiment 2, people conceded more to angry than neutral virtual humans but, again, the effect was much stronger with avatars (d = 1.162) than with agents (d = .066). Participants also showed less anger towards avatars and formed more positive impressions of avatars when compared to agents.", "abstracts": [ { "abstractType": "Regular", "content": "Recent research in perception and theory of mind reveals that people show different behavior and lower activation of brain regions associated with mentalizing (i.e., the inference of other's mental states) when engaged in decision making with computers, when compared to humans. These findings are important for affective computing because they suggest people's decisions might be influenced differently according to whether they believe emotional expressions shown in computers are being generated by algorithms or humans. To test this, we had people engage in a social dilemma (Experiment 1) or negotiation (Experiment 2) with virtual humans that were either perceived to be agents (i.e., controlled by computers) or avatars (i.e., controlled by humans). The results showed that such perceptions have a deep impact on people's decisions: in Experiment 1, people cooperated more with virtual humans that showed cooperative facial displays (e.g., joy after mutual cooperation) than competitive displays (e.g., joy when the participant was exploited) but, the effect was stronger with avatars (d = .601) than with agents (d = .360); in Experiment 2, people conceded more to angry than neutral virtual humans but, again, the effect was much stronger with avatars (d = 1.162) than with agents (d = .066). Participants also showed less anger towards avatars and formed more positive impressions of avatars when compared to agents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recent research in perception and theory of mind reveals that people show different behavior and lower activation of brain regions associated with mentalizing (i.e., the inference of other's mental states) when engaged in decision making with computers, when compared to humans. These findings are important for affective computing because they suggest people's decisions might be influenced differently according to whether they believe emotional expressions shown in computers are being generated by algorithms or humans. To test this, we had people engage in a social dilemma (Experiment 1) or negotiation (Experiment 2) with virtual humans that were either perceived to be agents (i.e., controlled by computers) or avatars (i.e., controlled by humans). The results showed that such perceptions have a deep impact on people's decisions: in Experiment 1, people cooperated more with virtual humans that showed cooperative facial displays (e.g., joy after mutual cooperation) than competitive displays (e.g., joy when the participant was exploited) but, the effect was stronger with avatars (d = .601) than with agents (d = .360); in Experiment 2, people conceded more to angry than neutral virtual humans but, again, the effect was much stronger with avatars (d = 1.162) than with agents (d = .066). Participants also showed less anger towards avatars and formed more positive impressions of avatars when compared to agents.", "title": "Humans versus Computers: Impact of Emotion Expressions on People's Decision Making", "normalizedTitle": "Humans versus Computers: Impact of Emotion Expressions on People's Decision Making", "fno": "06853335", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Avatars", "Decision Making", "Human Computer Interaction", "Humans Versus Computers", "Emotion Expressions", "People Decision Making", "People Behavior", "Brain Region Activation", "Social Dilemma", "Negotiation", "Virtual Humans", "Avatars", "Agents", "Cooperative Facial Displays", "Competitive Displays", "Computers", "Avatars", "Decision Making", "Games", "Atmospheric Measurements", "Particle Measurements", "Standards", "Human Versus Computers", "Emotion Expression", "Decision Making", "Human Versus Computers", "Emotion Expression", "Decision Making" ], "authors": [ { "givenName": "Celso M.", "surname": "de Melo", "fullName": "Celso M. de Melo", "affiliation": "USC Marshall School of Business, Los Angeles, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Jonathan", "surname": "Gratch", "fullName": "Jonathan Gratch", "affiliation": "USC Institute for Creative Technologies, 12015 Waterfront Drive, Building #4, Playa Vista, CA", "__typename": "ArticleAuthorType" }, { "givenName": "Peter J.", "surname": "Carnevale", "fullName": "Peter J. Carnevale", "affiliation": "USC Marshall School of Business, Los Angeles, CA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2015-04-01 00:00:00", "pubType": "trans", "pages": "127-136", "year": "2015", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/acii/2013/5048/0/5048a546", "title": "The Effect of Agency on the Impact of Emotion Expressions on People's Decision Making", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a546/12OmNCesrdM", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273657", "title": "Avatar and participant gender differences in the perception of uncanniness of virtual humans", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273657/12OmNzZmZBE", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2018/7123/0/08493429", "title": "Improving Context Understanding Using Avatar's Affective Expressions Reflecting Operator's Mental States", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2018/08493429/14tNJoD4Uxj", "parentPublication": { "id": "proceedings/vs-games/2018/7123/0", "title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a772", "title": "Embodiment of an Avatar with Unnatural Arm Movements", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049655", "title": "Measuring Interpersonal Trust towards Virtual Humans with a Virtual Maze Paradigm", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049655/1KYouwvCMBa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798122", "title": "An Initial Investigation into Stereotypical Influences on Implicit Racial Bias and Embodied Avatars", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798122/1cJ0MR4xjWg", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797926", "title": "Ethical Concerns of the Use of Virtual Avatars in Consumer Entertainment", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797926/1cJ1gv7LjFK", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2019/3888/0/08925481", "title": "Can Social Agents elicit Shame as Humans do?", "doi": null, "abstractUrl": "/proceedings-article/acii/2019/08925481/1fHGAcmd6H6", "parentPublication": { "id": "proceedings/acii/2019/3888/0", "title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cic/2019/6739/0/673900a298", "title": "Designing for Fallible Humans", "doi": null, "abstractUrl": "/proceedings-article/cic/2019/673900a298/1hrMg4asmCQ", "parentPublication": { "id": "proceedings/cic/2019/6739/0", "title": "2019 IEEE 5th International Conference on Collaboration and Internet Computing (CIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090457", "title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06975149", "articleId": "13rRUxcsYKa", "__typename": "AdjacentArticleType" }, "next": { "fno": "07042773", "articleId": "13rRUwh80Bg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCcKQnF", "title": "Oct.-Dec.", "year": "2016", "issueNum": "04", "idPrefix": "ta", "pubType": "journal", "volume": "7", "label": "Oct.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYrbSX", "doi": "10.1109/TAFFC.2015.2472013", "abstract": "This study presents an evaluation of a mobile game with physiologically aware virtual humans as an approach to modulate the participant's affective and physiological state. We developed a mobile version of a virtual reality scenario where the participants were able to interact with virtual human characters through their psychophysiological activity. Music was played in the background of the scenario and, depending on the experimental condition, the virtual humans were initially either barely dancing or dancing very euphorically. The task of the participants was to encourage the apathetic virtual humans to dance or to calm down the frenetically dancing characters, through the modulation of their own mood and physiological activity. Results from our study show that by using this mobile game with the physiologically aware and affective virtual humans the participants were able to emotionally arouse themselves in the Activation condition and were able to relax themselves in the Relaxation condition, during the same session with only a brief break between conditions. The self-reported affective data was also corroborated by the physiological data (heart rate, respiration and skin conductance) which significantly differed between the Activation and Relaxation conditions.", "abstracts": [ { "abstractType": "Regular", "content": "This study presents an evaluation of a mobile game with physiologically aware virtual humans as an approach to modulate the participant's affective and physiological state. We developed a mobile version of a virtual reality scenario where the participants were able to interact with virtual human characters through their psychophysiological activity. Music was played in the background of the scenario and, depending on the experimental condition, the virtual humans were initially either barely dancing or dancing very euphorically. The task of the participants was to encourage the apathetic virtual humans to dance or to calm down the frenetically dancing characters, through the modulation of their own mood and physiological activity. Results from our study show that by using this mobile game with the physiologically aware and affective virtual humans the participants were able to emotionally arouse themselves in the Activation condition and were able to relax themselves in the Relaxation condition, during the same session with only a brief break between conditions. The self-reported affective data was also corroborated by the physiological data (heart rate, respiration and skin conductance) which significantly differed between the Activation and Relaxation conditions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This study presents an evaluation of a mobile game with physiologically aware virtual humans as an approach to modulate the participant's affective and physiological state. We developed a mobile version of a virtual reality scenario where the participants were able to interact with virtual human characters through their psychophysiological activity. Music was played in the background of the scenario and, depending on the experimental condition, the virtual humans were initially either barely dancing or dancing very euphorically. The task of the participants was to encourage the apathetic virtual humans to dance or to calm down the frenetically dancing characters, through the modulation of their own mood and physiological activity. Results from our study show that by using this mobile game with the physiologically aware and affective virtual humans the participants were able to emotionally arouse themselves in the Activation condition and were able to relax themselves in the Relaxation condition, during the same session with only a brief break between conditions. The self-reported affective data was also corroborated by the physiological data (heart rate, respiration and skin conductance) which significantly differed between the Activation and Relaxation conditions.", "title": "Dancing with Physio: A Mobile Game with Physiologically Aware Virtual Humans", "normalizedTitle": "Dancing with Physio: A Mobile Game with Physiologically Aware Virtual Humans", "fno": "07219412", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Games", "Physiology", "Mood", "Biomedical Monitoring", "Biological Control Systems", "Atmospheric Measurements", "Particle Measurements", "Mobile Game", "Biofeedback", "Physiological Computing", "Virtual Human Characters", "Mood Modulation" ], "authors": [ { "givenName": "Jorge", "surname": "Arroyo-Palacios", "fullName": "Jorge Arroyo-Palacios", "affiliation": "Psychology, EVENTLAB, University of Barcelona, Barcelona, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Mel", "surname": "Slater", "fullName": "Mel Slater", "affiliation": "EVENTLAB, University of Barcelona, the Department of Computer Science, University College London, Barcelona, SpainUnited Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2016-10-01 00:00:00", "pubType": "trans", "pages": "326-336", "year": "2016", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2016/5670/0/5670a396", "title": "Selecting Physiological Features for Predicting Bidding Behavior in Electronic Auctions", "doi": null, "abstractUrl": "/proceedings-article/hicss/2016/5670a396/12OmNAkEU6t", "parentPublication": { "id": "proceedings/hicss/2016/5670/0", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2013/5048/0/5048a289", "title": "Heart Rate Variability and Skin Conductance Biofeedback: A Triple-Blind Randomized Controlled Study", "doi": null, "abstractUrl": "/proceedings-article/acii/2013/5048a289/12OmNAtK4n3", "parentPublication": { "id": "proceedings/acii/2013/5048/0", "title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892315", "title": "The effect of geometric realism on presence in a virtual reality game", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892315/12OmNBTawwY", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056587", "title": "The effect of cognitive load on physiological arousal in a decision-making serious game", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056587/12OmNqJHFDH", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2015/9953/0/07344646", "title": "The influence of subliminal visual primes on player affect in a horror computer game", "doi": null, "abstractUrl": "/proceedings-article/acii/2015/07344646/12OmNvjQ8Ow", "parentPublication": { "id": "proceedings/acii/2015/9953/0", "title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2015/02/06853335", "title": "Humans versus Computers: Impact of Emotion Expressions on People's Decision Making", "doi": null, "abstractUrl": "/journal/ta/2015/02/06853335/13rRUxASuz0", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/11/09873958", "title": "Characterizing Physiological Responses to Fear, Frustration, and Insight in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/11/09873958/1GjwGGW9cSA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049655", "title": "Measuring Interpersonal Trust towards Virtual Humans with a Virtual Maze Paradigm", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049655/1KYouwvCMBa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a193", "title": "Comparing Meditation and Immersive Virtual Environment for Relaxation", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a193/1KmFfgROQxO", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09229513", "title": "Estimating Affective Taste Experience Using Combined Implicit Behavioral and Neurophysiological Measures", "doi": null, "abstractUrl": "/journal/ta/2023/01/09229513/1o3nfbzpzhe", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07181686", "articleId": "13rRUy2YLWD", "__typename": "AdjacentArticleType" }, "next": { "fno": "07265021", "articleId": "13rRUILc8dN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRVo", "name": "tta201604-07219412s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/tta201604-07219412s1.zip", "extension": "zip", "size": "243 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNCau3c2", "title": "May/June", "year": "2006", "issueNum": "03", "idPrefix": "cg", "pubType": "magazine", "volume": "26", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyuegjh", "doi": "10.1109/MCG.2006.68", "abstract": "At the University of Florida (UF), a research team has worked on applying virtual humans as partners in interpersonal communication scenarios. The goal: teach communication skills using virtual humans. The simulation of an interaction between people to facilitate teaching, training, and testing of communication skills would be a powerful new application of VR. The author's approach is to employ natural methods of interaction with the virtual human, striving to create an experience similar to two (initially) people talking. By using a natural interface, we aim to have the student interact with DIANA naturally, and thus allow the training of interviewing and communication skills.", "abstracts": [ { "abstractType": "Regular", "content": "At the University of Florida (UF), a research team has worked on applying virtual humans as partners in interpersonal communication scenarios. The goal: teach communication skills using virtual humans. The simulation of an interaction between people to facilitate teaching, training, and testing of communication skills would be a powerful new application of VR. The author's approach is to employ natural methods of interaction with the virtual human, striving to create an experience similar to two (initially) people talking. By using a natural interface, we aim to have the student interact with DIANA naturally, and thus allow the training of interviewing and communication skills.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "At the University of Florida (UF), a research team has worked on applying virtual humans as partners in interpersonal communication scenarios. The goal: teach communication skills using virtual humans. The simulation of an interaction between people to facilitate teaching, training, and testing of communication skills would be a powerful new application of VR. The author's approach is to employ natural methods of interaction with the virtual human, striving to create an experience similar to two (initially) people talking. By using a natural interface, we aim to have the student interact with DIANA naturally, and thus allow the training of interviewing and communication skills.", "title": "Teaching Communication Skills with Virtual Humans", "normalizedTitle": "Teaching Communication Skills with Virtual Humans", "fno": "mcg2006030010", "hasPdf": false, "idPrefix": "cg", "keywords": [ "Biomedical Communication", "Biomedical Education", "Computer Aided Instruction", "Medical Computing", "Teaching", "Virtual Reality", "Virtual Human", "Interpersonal Communication Scenario", "Teaching", "Virtual Reality", "Computer Aided Instruction", "Biomedical Education", "Medical Computing", "Education", "Humans", "Microphones", "Educational Institutions", "Speech Recognition", "Communication Standards", "Cameras", "Hardware", "Software Algorithms", "Abdomen", "Virtual Humans", "Teaching", "Communication Skills", "Digital Animated Avatar" ], "authors": [ { "givenName": "Benjamin", "surname": "Lok", "fullName": "Benjamin Lok", "affiliation": "University of Florida", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2006-05-01 00:00:00", "pubType": "mags", "pages": "10-13", "year": "2006", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cerma/2010/4204/0/4204a225", "title": "A P2P Architecture to Perform Actions of Virtual Humans in Distributed Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/cerma/2010/4204a225/12OmNAlNiOt", "parentPublication": { "id": "proceedings/cerma/2010/4204/0", "title": "Electronics, Robotics and Automotive Mechanics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2005/8929/0/01492772", "title": "Experiences in using immersive virtual characters to educate medical communication skills", "doi": null, "abstractUrl": "/proceedings-article/vr/2005/01492772/12OmNvT2p3E", "parentPublication": { "id": "proceedings/vr/2005/8929/0", "title": "IEEE Virtual Reality 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811019", "title": "Virtual Humans That Touch Back: Enhancing Nonverbal Communication with Virtual Humans through Bidirectional Touch", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811019/12OmNwMXnsz", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2009/3791/0/3791a071", "title": "Virtual Humans in Serious Games", "doi": null, "abstractUrl": "/proceedings-article/cw/2009/3791a071/12OmNxxNbPm", "parentPublication": { "id": "proceedings/cw/2009/3791/0", "title": "2009 International Conference on CyberWorlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2011/468/0/06142859", "title": "Acquiring professional skills: Virtual facilitator as model for team communication", "doi": null, "abstractUrl": "/proceedings-article/fie/2011/06142859/12OmNzTH0Jo", "parentPublication": { "id": "proceedings/fie/2011/468/0", "title": "2011 Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/06/mcg2017060026", "title": "Modeling Virtual Humans", "doi": null, "abstractUrl": "/magazine/cg/2017/06/mcg2017060026/13rRUx0xPpu", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/1998/05/mcg1998050042", "title": "Real-Time Animation of Realistic Virtual Humans", "doi": null, "abstractUrl": "/magazine/cg/1998/05/mcg1998050042/13rRUyoPSRo", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2018/1174/0/08659258", "title": "Virtual Reality Activities for Teaching Engineering Students Professional Development Skills", "doi": null, "abstractUrl": "/proceedings-article/fie/2018/08659258/18j8YduwYo0", "parentPublication": { "id": "proceedings/fie/2018/1174/0", "title": "2018 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a954", "title": "[DC] Improving Presence of Virtual Humans through Paralinguistics", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a954/1CJfdg0rEyY", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbgames/2021/0189/0/018900a117", "title": "Perception of Personality Traits in Crowds of Virtual Humans", "doi": null, "abstractUrl": "/proceedings-article/sbgames/2021/018900a117/1zusqJ6D3Ne", "parentPublication": { "id": "proceedings/sbgames/2021/0189/0", "title": "2021 20th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2006030006", "articleId": "13rRUxZRbrM", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2006030014", "articleId": "13rRUwhpBSx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwMob9C", "title": "April", "year": "2018", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwI5U2R", "doi": "10.1109/TVCG.2018.2794073", "abstract": "Today&#x0027;s virtual reality (VR) applications such as gaming, multisensory entertainment, remote dining, and online shopping are mainly based on audio, visual, and touch interactions between humans and virtual worlds. Integrating the sense of taste into VR is difficult since humans are dependent on chemical-based taste delivery systems. This paper presents the &#x2018;Thermal Taste Machine&#x2019;, a new digital taste actuation technology that can effectively produce and modify thermal taste sensations on the tongue. It modifies the temperature of the surface of the tongue within a short period of time (from 25&#x00B0;C to 40 &#x00B0;C while heating, and from 25&#x00B0;C to 10 &#x00B0;C while cooling). We tested this device on human subjects and described the experience of thermal taste using 20 known (taste and non-taste) sensations. Our results suggested that rapidly heating the tongue produces sweetness, fatty/oiliness, electric taste, warmness, and reduces the sensibility for metallic taste. Similarly, cooling the tongue produced mint taste, pleasantness, and coldness. By conducting another user study on the perceived sweetness of sucrose solutions after the thermal stimulation, we found that heating the tongue significantly enhances the intensity of sweetness for both thermal tasters and non-thermal tasters. Also, we found that faster temperature rises on the tongue produce more intense sweet sensations for thermal tasters. This technology will be useful in two ways: First, it can produce taste sensations without using chemicals for the individuals who are sensitive to thermal taste. Second, the temperature rise of the device can be used as a way to enhance the intensity of sweetness. We believe that this technology can be used to digitally produce and enhance taste sensations in future virtual reality applications. The key novelties of this paper are as follows: 1. Development of a thermal taste actuation technology for stimulating the human taste receptors, 2. Characterization of the thermal taste produced by the device using taste-related sensations and non-taste related sensations, 3. Research on enhancing the intensity for sucrose solutions using thermal stimulation, 4. Research on how different speeds of heating affect the intensity of sweetness produced by thermal stimulation.", "abstracts": [ { "abstractType": "Regular", "content": "Today&#x0027;s virtual reality (VR) applications such as gaming, multisensory entertainment, remote dining, and online shopping are mainly based on audio, visual, and touch interactions between humans and virtual worlds. Integrating the sense of taste into VR is difficult since humans are dependent on chemical-based taste delivery systems. This paper presents the &#x2018;Thermal Taste Machine&#x2019;, a new digital taste actuation technology that can effectively produce and modify thermal taste sensations on the tongue. It modifies the temperature of the surface of the tongue within a short period of time (from 25&#x00B0;C to 40 &#x00B0;C while heating, and from 25&#x00B0;C to 10 &#x00B0;C while cooling). We tested this device on human subjects and described the experience of thermal taste using 20 known (taste and non-taste) sensations. Our results suggested that rapidly heating the tongue produces sweetness, fatty/oiliness, electric taste, warmness, and reduces the sensibility for metallic taste. Similarly, cooling the tongue produced mint taste, pleasantness, and coldness. By conducting another user study on the perceived sweetness of sucrose solutions after the thermal stimulation, we found that heating the tongue significantly enhances the intensity of sweetness for both thermal tasters and non-thermal tasters. Also, we found that faster temperature rises on the tongue produce more intense sweet sensations for thermal tasters. This technology will be useful in two ways: First, it can produce taste sensations without using chemicals for the individuals who are sensitive to thermal taste. Second, the temperature rise of the device can be used as a way to enhance the intensity of sweetness. We believe that this technology can be used to digitally produce and enhance taste sensations in future virtual reality applications. The key novelties of this paper are as follows: 1. Development of a thermal taste actuation technology for stimulating the human taste receptors, 2. Characterization of the thermal taste produced by the device using taste-related sensations and non-taste related sensations, 3. Research on enhancing the intensity for sucrose solutions using thermal stimulation, 4. Research on how different speeds of heating affect the intensity of sweetness produced by thermal stimulation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Today's virtual reality (VR) applications such as gaming, multisensory entertainment, remote dining, and online shopping are mainly based on audio, visual, and touch interactions between humans and virtual worlds. Integrating the sense of taste into VR is difficult since humans are dependent on chemical-based taste delivery systems. This paper presents the ‘Thermal Taste Machine’, a new digital taste actuation technology that can effectively produce and modify thermal taste sensations on the tongue. It modifies the temperature of the surface of the tongue within a short period of time (from 25°C to 40 °C while heating, and from 25°C to 10 °C while cooling). We tested this device on human subjects and described the experience of thermal taste using 20 known (taste and non-taste) sensations. Our results suggested that rapidly heating the tongue produces sweetness, fatty/oiliness, electric taste, warmness, and reduces the sensibility for metallic taste. Similarly, cooling the tongue produced mint taste, pleasantness, and coldness. By conducting another user study on the perceived sweetness of sucrose solutions after the thermal stimulation, we found that heating the tongue significantly enhances the intensity of sweetness for both thermal tasters and non-thermal tasters. Also, we found that faster temperature rises on the tongue produce more intense sweet sensations for thermal tasters. This technology will be useful in two ways: First, it can produce taste sensations without using chemicals for the individuals who are sensitive to thermal taste. Second, the temperature rise of the device can be used as a way to enhance the intensity of sweetness. We believe that this technology can be used to digitally produce and enhance taste sensations in future virtual reality applications. The key novelties of this paper are as follows: 1. Development of a thermal taste actuation technology for stimulating the human taste receptors, 2. Characterization of the thermal taste produced by the device using taste-related sensations and non-taste related sensations, 3. Research on enhancing the intensity for sucrose solutions using thermal stimulation, 4. Research on how different speeds of heating affect the intensity of sweetness produced by thermal stimulation.", "title": "New Thermal Taste Actuation Technology for Future Multisensory Virtual Reality and Internet", "normalizedTitle": "New Thermal Taste Actuation Technology for Future Multisensory Virtual Reality and Internet", "fno": "08260970", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Tongue", "Temperature Sensors", "Silver", "Heating Systems", "Chemicals", "Cooling", "Virtual Reality", "Thermal Taste", "Multisensory VR", "Digitizing Taste", "Characterization Of Thermal Taste", "TRPM 5" ], "authors": [ { "givenName": "Kasun", "surname": "Karunanayaka", "fullName": "Kasun Karunanayaka", "affiliation": "Imagineering Institute, Malaysia", "__typename": "ArticleAuthorType" }, { "givenName": "Nurafiqah", "surname": "Johari", "fullName": "Nurafiqah Johari", "affiliation": "Imagineering Institute, Malaysia", "__typename": "ArticleAuthorType" }, { "givenName": "Surina", "surname": "Hariri", "fullName": "Surina Hariri", "affiliation": "Imagineering Institute, Malaysia", "__typename": "ArticleAuthorType" }, { "givenName": "Hanis", "surname": "Camelia", "fullName": "Hanis Camelia", "affiliation": "Imagineering Institute, Malaysia", "__typename": "ArticleAuthorType" }, { "givenName": "Kevin Stanley", "surname": "Bielawski", "fullName": "Kevin Stanley Bielawski", "affiliation": "Imagineering Institute, Malaysia", "__typename": "ArticleAuthorType" }, { "givenName": "Adrian David", "surname": "Cheok", "fullName": "Adrian David Cheok", "affiliation": "Imagineering Institute, Malaysia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2018-04-01 00:00:00", "pubType": "trans", "pages": "1496-1505", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/bibmw/2010/8303/0/05703898", "title": "Study about the feature of some parts of the Traditional Chinese Medicine(TCM) physical examination correlate with the indexes of the renal function", "doi": null, "abstractUrl": "/proceedings-article/bibmw/2010/05703898/12OmNqFJhP4", "parentPublication": { "id": "proceedings/bibmw/2010/8303/0", "title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iswc/2012/4697/0/06246147", "title": "Tongue Mounted Interface for Digitally Actuating the Sense of Taste", "doi": null, "abstractUrl": "/proceedings-article/iswc/2012/06246147/12OmNrkT7KQ", "parentPublication": { "id": "proceedings/iswc/2012/4697/0", "title": "2012 16th International Symposium on Wearable Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcoss/2013/5041/0/5041a191", "title": "Model-Based Thermal Anomaly Detection in Cloud Datacenters", "doi": null, "abstractUrl": "/proceedings-article/dcoss/2013/5041a191/12OmNwI8cew", "parentPublication": { "id": "proceedings/dcoss/2013/5041/0", "title": "2013 IEEE International Conference on Distributed Computing in Sensor Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2017/1044/0/1044a261", "title": "Thermal-Aware Job Scheduling of MapReduce Applications on High Performance Clusters", "doi": null, "abstractUrl": "/proceedings-article/icppw/2017/1044a261/12OmNyLiuqE", "parentPublication": { "id": "proceedings/icppw/2017/1044/0", "title": "2017 46th International Conference on Parallel Processing Workshops (ICPPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlsid/2016/8700/0/8700a475", "title": "Thermal-Safe Schedule Generation for System-on-Chip Testing", "doi": null, "abstractUrl": "/proceedings-article/vlsid/2016/8700a475/12OmNyNQSEu", "parentPublication": { "id": "proceedings/vlsid/2016/8700/0", "title": "2016 29th International Conference on VLSI Design and 2016 15th International Conference on Embedded Systems (VLSID)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504762", "title": "Mechanism of inhibitory effect of cathodal current tongue stimulation on five basic tastes", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504762/12OmNzsJ7xT", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/cc/2018/02/07274707", "title": "Model-Based Thermal Anomaly Detection in Cloud Datacenters Using Thermal Imaging", "doi": null, "abstractUrl": "/journal/cc/2018/02/07274707/13rRUwhpBPG", "parentPublication": { "id": "trans/cc", "title": "IEEE Transactions on Cloud Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "letters/ca/2014/02/06544183", "title": "Architectural Thermal Energy Harvesting Opportunities for Sustainable Computing", "doi": null, "abstractUrl": "/journal/ca/2014/02/06544183/13rRUxjyXfC", "parentPublication": { "id": "letters/ca", "title": "IEEE Computer Architecture Letters", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a704", "title": "Dynamically Controlling Spatial Taste Location by Extraoral Galvanic Taste Stimulation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a704/1J7W8wUdqTK", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049717", "title": "Eating, Smelling, and Seeing: Investigating Multisensory Integration and (In)congruent Stimuli while Eating in VR", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049717/1KYostbG9gY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08260944", "articleId": "13rRUxYIMV9", "__typename": "AdjacentArticleType" }, "next": { "fno": "08267106", "articleId": "13rRUwIF6dW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyUFfMP", "title": "July-September", "year": "2011", "issueNum": "03", "idPrefix": "th", "pubType": "journal", "volume": "4", "label": "July-September", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxd2aZb", "doi": "10.1109/TOH.2011.32", "abstract": "This paper presents a virtual environment for training femoral palpation and needle insertion, the opening steps of many interventional radiology procedures. A novel augmented reality simulation called PalpSim has been developed that allows the trainees to feel a virtual patient using their own hands. The palpation step requires both force and tactile feedback. For the palpation haptics effect, two off-the-shelf force feedback devices have been linked together to provide a hybrid device that gives five degrees of force feedback. This is combined with a custom built hydraulic interface to provide a pulse like tactile effect. The needle interface is based on a modified PHANTOM Omni end effector that allows a real interventional radiology needle to be mounted and used during simulation. While using the virtual environment, the haptics hardware is masked from view using chroma-key techniques. The trainee sees a computer generated patient and needle, and interacts using their own hands. This simulation provides a high level of face validity and is one of the first medical simulation devices to integrate haptics with augmented reality.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a virtual environment for training femoral palpation and needle insertion, the opening steps of many interventional radiology procedures. A novel augmented reality simulation called PalpSim has been developed that allows the trainees to feel a virtual patient using their own hands. The palpation step requires both force and tactile feedback. For the palpation haptics effect, two off-the-shelf force feedback devices have been linked together to provide a hybrid device that gives five degrees of force feedback. This is combined with a custom built hydraulic interface to provide a pulse like tactile effect. The needle interface is based on a modified PHANTOM Omni end effector that allows a real interventional radiology needle to be mounted and used during simulation. While using the virtual environment, the haptics hardware is masked from view using chroma-key techniques. The trainee sees a computer generated patient and needle, and interacts using their own hands. This simulation provides a high level of face validity and is one of the first medical simulation devices to integrate haptics with augmented reality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a virtual environment for training femoral palpation and needle insertion, the opening steps of many interventional radiology procedures. A novel augmented reality simulation called PalpSim has been developed that allows the trainees to feel a virtual patient using their own hands. The palpation step requires both force and tactile feedback. For the palpation haptics effect, two off-the-shelf force feedback devices have been linked together to provide a hybrid device that gives five degrees of force feedback. This is combined with a custom built hydraulic interface to provide a pulse like tactile effect. The needle interface is based on a modified PHANTOM Omni end effector that allows a real interventional radiology needle to be mounted and used during simulation. While using the virtual environment, the haptics hardware is masked from view using chroma-key techniques. The trainee sees a computer generated patient and needle, and interacts using their own hands. This simulation provides a high level of face validity and is one of the first medical simulation devices to integrate haptics with augmented reality.", "title": "Integrating Haptics with Augmented Reality in a Femoral Palpation and Needle Insertion Training Simulation", "normalizedTitle": "Integrating Haptics with Augmented Reality in a Femoral Palpation and Needle Insertion Training Simulation", "fno": "tth2011030199", "hasPdf": true, "idPrefix": "th", "keywords": [ "Needles", "Solid Modeling", "Training", "Force Feedback", "Visualization", "Face", "Augmented Reality", "Haptics", "Force Feedback", "Tactile Display", "Medical Simulation", "Graphic Rendering", "Virtual Reality" ], "authors": [ { "givenName": "Timothy R.", "surname": "Coles", "fullName": "Timothy R. Coles", "affiliation": "Bangor University, Bangor and Istituto Italiano di Technologia, Genova", "__typename": "ArticleAuthorType" }, { "givenName": "Nigel W.", "surname": "John", "fullName": "Nigel W. John", "affiliation": "Bangor University, Bangor", "__typename": "ArticleAuthorType" }, { "givenName": "Derek", "surname": "Gould", "fullName": "Derek Gould", "affiliation": "Royal Liverpool University Hospital, Liverpool", "__typename": "ArticleAuthorType" }, { "givenName": "Darwin G.", "surname": "Caldwell", "fullName": "Darwin G. Caldwell", "affiliation": "Italiano di Technologia, Genova", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2011-07-01 00:00:00", "pubType": "trans", "pages": "199-209", "year": "2011", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/achi/2009/3529/0/3529a193", "title": "Haptic Palpation for the Femoral Pulse in Virtual Interventional Radiology", "doi": null, "abstractUrl": "/proceedings-article/achi/2009/3529a193/12OmNAZOJU1", "parentPublication": { "id": "proceedings/achi/2009/3529/0", "title": "International Conference on Advances in Computer-Human Interaction", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2015/7983/0/07367697", "title": "A mathematical model of a novel automated medical device for needle insertions", "doi": null, "abstractUrl": "/proceedings-article/bibe/2015/07367697/12OmNBkP3zY", "parentPublication": { "id": "proceedings/bibe/2015/7983/0", "title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223388", "title": "Preliminary evaluation of a virtual needle insertion training system", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223388/12OmNCdk2Jm", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890344", "title": "Simulated Interactive Needle Insertion", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890344/12OmNyKa5Y6", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2008/2005/0/04479920", "title": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479920/12OmNyOq4T4", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030188", "title": "Haptic Simulator for Prostate Brachytherapy with Simulated Needle and Probe Interaction", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030188/13rRUILtJr3", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030153", "title": "Haptics in medicine and clinical skill acquisition [special section intro.]", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030153/13rRUxOve9T", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/04/ttg2012040617", "title": "Haptic Palpation for Medical Simulation in Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2012/04/ttg2012040617/13rRUyfKIHI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030155", "title": "Perception and Action in Teleoperated Needle Insertion", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030155/13rRUyoPSPf", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08667734", "title": "Comparison of Projective Augmented Reality Concepts to Support Medical Needle Insertion", "doi": null, "abstractUrl": "/journal/tg/2019/06/08667734/18q6mxYAAik", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "tth2011030188", "articleId": "13rRUILtJr3", "__typename": "AdjacentArticleType" }, "next": { "fno": "tth2011030210", "articleId": "13rRUwcS1D6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqIhFTk", "title": "September/October", "year": "2005", "issueNum": "05", "idPrefix": "cg", "pubType": "magazine", "volume": "25", "label": "September/October", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYBlja", "doi": "10.1109/MCG.2005.94", "abstract": "Our interactive, 3D stereo display helps guide clinicians during endovascular procedures, such as intraoperative needle insertion and stent placement relative to the target organs. We describe a new method of guiding endovascular procedures using interactive 3D stereo visualizations. We use as an example the transjugular intrahepatic portosystemic shunt (TIPS) procedure. Our goal is to increase the speed and safety of endovascular procedures by providing the interventionalist with 3D information as the operation proceeds. Our goal is to provide 3D image guidance of the TIPS procedure so that the interventionalist can readily adjust the needle position and trajectory to reach the target on the first pass. We propose a 3D stereo display of the interventionalist's needle and target vessels. We also add interactivity via head tracking so that the interventionalist gains a better 3D sense of the relationship between the target vessels and the needle during needle advancement.", "abstracts": [ { "abstractType": "Regular", "content": "Our interactive, 3D stereo display helps guide clinicians during endovascular procedures, such as intraoperative needle insertion and stent placement relative to the target organs. We describe a new method of guiding endovascular procedures using interactive 3D stereo visualizations. We use as an example the transjugular intrahepatic portosystemic shunt (TIPS) procedure. Our goal is to increase the speed and safety of endovascular procedures by providing the interventionalist with 3D information as the operation proceeds. Our goal is to provide 3D image guidance of the TIPS procedure so that the interventionalist can readily adjust the needle position and trajectory to reach the target on the first pass. We propose a 3D stereo display of the interventionalist's needle and target vessels. We also add interactivity via head tracking so that the interventionalist gains a better 3D sense of the relationship between the target vessels and the needle during needle advancement.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Our interactive, 3D stereo display helps guide clinicians during endovascular procedures, such as intraoperative needle insertion and stent placement relative to the target organs. We describe a new method of guiding endovascular procedures using interactive 3D stereo visualizations. We use as an example the transjugular intrahepatic portosystemic shunt (TIPS) procedure. Our goal is to increase the speed and safety of endovascular procedures by providing the interventionalist with 3D information as the operation proceeds. Our goal is to provide 3D image guidance of the TIPS procedure so that the interventionalist can readily adjust the needle position and trajectory to reach the target on the first pass. We propose a 3D stereo display of the interventionalist's needle and target vessels. We also add interactivity via head tracking so that the interventionalist gains a better 3D sense of the relationship between the target vessels and the needle during needle advancement.", "title": "3D stereo interactive medical visualization", "normalizedTitle": "3D stereo interactive medical visualization", "fno": "01510543", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Visualization", "Needles", "Veins", "Portals", "Liver", "Image Segmentation", "Biomedical Imaging", "Catheters", "Medical Treatment", "Minimally Invasive Surgery", "Medical Imaging", "Image Guided Surgery", "Minimally Invasive Surgery", "Augmented Reality", "Quaternion", "3 D And 2 D Registration" ], "authors": [], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2005-09-01 00:00:00", "pubType": "mags", "pages": "67,68,69,70,71", "year": "2005", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cadgraphics/2011/4497/0/4497a443", "title": "An Interactive 3D Preoperative Planning and Training System for Minimally Invasive Vascular Surgery", "doi": null, "abstractUrl": "/proceedings-article/cadgraphics/2011/4497a443/12OmNAfy7Id", "parentPublication": { "id": "proceedings/cadgraphics/2011/4497/0", "title": "Computer-Aided Design and Computer Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2015/7143/0/7143b022", "title": "Structure Optimization of a Bi-planar Parallel Mechanism for Spine Surgeries", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2015/7143b022/12OmNAoDibT", "parentPublication": { "id": "proceedings/icmtma/2015/7143/0", "title": "2015 Seventh International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/perser/2004/8577/0/01356804", "title": "A distributed framework for relaying stereo vision for telerobotics", "doi": null, "abstractUrl": "/proceedings-article/perser/2004/01356804/12OmNCmpcIh", "parentPublication": { "id": "proceedings/perser/2004/8577/0", "title": "Proceedings. The IEEE/ACS International Conference on Pervasive Services", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ecbs/2009/3602/0/3602a287", "title": "Application of Structure-from-Motion 3D Reconstruction in Computer-Guided Surgical Training", "doi": null, "abstractUrl": "/proceedings-article/ecbs/2009/3602a287/12OmNqAU6Dk", "parentPublication": { "id": "proceedings/ecbs/2009/3602/0", "title": "Engineering of Computer-Based Systems, IEEE International Conference on the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptic/2006/0226/0/01627076", "title": "A Study on Haptic Rendering in a Simulated Surgical Training Environment", "doi": null, "abstractUrl": "/proceedings-article/haptic/2006/01627076/12OmNzRqdJj", "parentPublication": { "id": "proceedings/haptic/2006/0226/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2006/2542/0/25420065", "title": "Extracting and tracking Colon?s \"Pattern\" from Colonoscopic Images", "doi": null, "abstractUrl": "/proceedings-article/crv/2006/25420065/12OmNzUPpzp", "parentPublication": { "id": "proceedings/crv/2006/2542/0", "title": "The 3rd Canadian Conference on Computer and Robot Vision (CRV'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icrss/2022/6403/0/640300a006", "title": "Design and Technical Research of the End-executive Agency of Medical Robots", "doi": null, "abstractUrl": "/proceedings-article/icrss/2022/640300a006/1M2Ml2828qA", "parentPublication": { "id": "proceedings/icrss/2022/6403/0", "title": "2022 International Conference on Computing, Robotics and System Sciences (ICRSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/springsim/2019/8388/0/08732876", "title": "Enhancing A Laparoscopy Training System with Augmented Reality Visualization", "doi": null, "abstractUrl": "/proceedings-article/springsim/2019/08732876/1aIRUNRF5ao", "parentPublication": { "id": "proceedings/springsim/2019/8388/0", "title": "2019 Spring Simulation Conference (SpringSim)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2020/7303/0/730300a517", "title": "A Systematic Literature Review of Research in the Surgical Field of Medical Robotics", "doi": null, "abstractUrl": "/proceedings-article/compsac/2020/730300a517/1nkDlZenrSU", "parentPublication": { "id": "proceedings/compsac/2020/7303/0", "title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2021/4261/0/09635512", "title": "Magnetic Model Calibration for Tetherless Surgical Needle Manipulation using Zernike Polynomial Fitting", "doi": null, "abstractUrl": "/proceedings-article/bibe/2021/09635512/1zmvmdco7ao", "parentPublication": { "id": "proceedings/bibe/2021/4261/0", "title": "2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "01510542", "articleId": "1htyf6Xehag", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyUFfMP", "title": "July-September", "year": "2011", "issueNum": "03", "idPrefix": "th", "pubType": "journal", "volume": "4", "label": "July-September", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyoPSPf", "doi": "10.1109/TOH.2011.30", "abstract": "We studied the effect of delay on perception and action in contact with a force field that emulates elastic soft tissue with a rigid nonlinear boundary. Such a field is similar to forces exerted on a needle during teleoperated needle insertion. We found that delay causes motor underestimation of the stiffness of this nonlinear soft tissue, without perceptual change. These experimental results are supported by simulation of a simplified mechanical model of the arm and neural controller, and a model for perception of stiffness, which is based on regression in the force-position space. In addition, we show that changing the gain of the teleoperation channel cancels the motor effect of delay without adding perceptual distortion. We conclude that it is possible to achieve perceptual and motor transparency in virtual one-dimensional remote needle insertion task.", "abstracts": [ { "abstractType": "Regular", "content": "We studied the effect of delay on perception and action in contact with a force field that emulates elastic soft tissue with a rigid nonlinear boundary. Such a field is similar to forces exerted on a needle during teleoperated needle insertion. We found that delay causes motor underestimation of the stiffness of this nonlinear soft tissue, without perceptual change. These experimental results are supported by simulation of a simplified mechanical model of the arm and neural controller, and a model for perception of stiffness, which is based on regression in the force-position space. In addition, we show that changing the gain of the teleoperation channel cancels the motor effect of delay without adding perceptual distortion. We conclude that it is possible to achieve perceptual and motor transparency in virtual one-dimensional remote needle insertion task.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We studied the effect of delay on perception and action in contact with a force field that emulates elastic soft tissue with a rigid nonlinear boundary. Such a field is similar to forces exerted on a needle during teleoperated needle insertion. We found that delay causes motor underestimation of the stiffness of this nonlinear soft tissue, without perceptual change. These experimental results are supported by simulation of a simplified mechanical model of the arm and neural controller, and a model for perception of stiffness, which is based on regression in the force-position space. In addition, we show that changing the gain of the teleoperation channel cancels the motor effect of delay without adding perceptual distortion. We conclude that it is possible to achieve perceptual and motor transparency in virtual one-dimensional remote needle insertion task.", "title": "Perception and Action in Teleoperated Needle Insertion", "normalizedTitle": "Perception and Action in Teleoperated Needle Insertion", "fno": "tth2011030155", "hasPdf": true, "idPrefix": "th", "keywords": [ "Force", "Delay", "Needles", "Training", "Humans", "Surgery", "Robots", "Transparency", "Medical Simulation", "Perception And Psychophysics", "Telemanipulation" ], "authors": [ { "givenName": "Ilana", "surname": "Nisky", "fullName": "Ilana Nisky", "affiliation": "Ben-Gurion University of the Negev, Beer-Sheva", "__typename": "ArticleAuthorType" }, { "givenName": "Assaf", "surname": "Pressman", "fullName": "Assaf Pressman", "affiliation": "Ben-Gurion University of the Negev, Beer-Sheva", "__typename": "ArticleAuthorType" }, { "givenName": "Carla M.", "surname": "Pugh", "fullName": "Carla M. Pugh", "affiliation": "Northwestern University, Chicago", "__typename": "ArticleAuthorType" }, { "givenName": "Ferdinando A.", "surname": "Mussa-Ivaldi", "fullName": "Ferdinando A. Mussa-Ivaldi", "affiliation": "Rehabilitation Institute of Chicago, Chicago", "__typename": "ArticleAuthorType" }, { "givenName": "Amir", "surname": "Karniel", "fullName": "Amir Karniel", "affiliation": "Ben-Gurion University of the Negev, Beer-Sheva", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2011-07-01 00:00:00", "pubType": "trans", "pages": "155-166", "year": "2011", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2015/1727/0/07223388", "title": "Preliminary evaluation of a virtual needle insertion training system", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223388/12OmNCdk2Jm", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2007/1509/0/04375566", "title": "A Robotic System for Real-time Tumor Manipulation During Image guided Breast Biopsy", "doi": null, "abstractUrl": "/proceedings-article/bibe/2007/04375566/12OmNxdDFME", "parentPublication": { "id": "proceedings/bibe/2007/1509/0", "title": "7th IEEE International Conference on Bioinformatics and Bioengineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890344", "title": "Simulated Interactive Needle Insertion", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890344/12OmNyKa5Y6", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2008/2005/0/04479920", "title": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479920/12OmNyOq4T4", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2010/6821/0/05444612", "title": "Haptic system design for MRI-guided needle based prostate brachytherapy", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444612/12OmNyQYttN", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030188", "title": "Haptic Simulator for Prostate Brachytherapy with Simulated Needle and Probe Interaction", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030188/13rRUILtJr3", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/02/07589098", "title": "The Role of Direct and Visual Force Feedback in Suturing Using a 7-DOF Dual-Arm Teleoperated System", "doi": null, "abstractUrl": "/journal/th/2017/02/07589098/13rRUNvgyWx", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030199", "title": "Integrating Haptics with Augmented Reality in a Femoral Palpation and Needle Insertion Training Simulation", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030199/13rRUxd2aZb", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iceitsa/2021/1300/0/130000a166", "title": "Prediction Model of Target Movement in Soft Tissue Under Needle Puncture", "doi": null, "abstractUrl": "/proceedings-article/iceitsa/2021/130000a166/1B2HpcUZC6s", "parentPublication": { "id": "proceedings/iceitsa/2021/1300/0", "title": "2021 International Conference on Electronic Information Technology and Smart Agriculture (ICEITSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995143", "title": "OCT-guided Robotic Subretinal Needle Injections: A Deep Learning-Based Registration Approach", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995143/1JC3h3JCZ8s", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "tth2011030153", "articleId": "13rRUxOve9T", "__typename": "AdjacentArticleType" }, "next": { "fno": "tth2011030167", "articleId": "13rRUwcS1D5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAHW0Jc", "title": "June", "year": "2019", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "18q6mxYAAik", "doi": "10.1109/TVCG.2019.2903942", "abstract": "Augmented reality (AR) is a promising tool to improve instrument navigation in needle-based interventions. Limited research has been conducted regarding suitable navigation visualizations. In this work, three navigation concepts based on existing approaches were compared in a user study using a projective AR setup. Each concept was implemented with three different scales for accuracy-to-color mapping and two methods of navigation indicator scaling. Participants were asked to perform simulated needle insertion tasks with each of the resulting 18 prototypes. Insertion angle and insertion depth accuracies were measured and analyzed, as well as task completion time and participants’ subjectively perceived task difficulty. Results show a clear ranking of visualization concepts across variables. Less consistent results were obtained for the color and indicator scaling factors. Results suggest that logarithmic indicator scaling achieved better accuracy, but participants perceived it to be more difficult than linear scaling. With specific results for angle and depth accuracy, our study contributes to the future composition of improved navigation support and systems for precise needle insertion or similar applications.", "abstracts": [ { "abstractType": "Regular", "content": "Augmented reality (AR) is a promising tool to improve instrument navigation in needle-based interventions. Limited research has been conducted regarding suitable navigation visualizations. In this work, three navigation concepts based on existing approaches were compared in a user study using a projective AR setup. Each concept was implemented with three different scales for accuracy-to-color mapping and two methods of navigation indicator scaling. Participants were asked to perform simulated needle insertion tasks with each of the resulting 18 prototypes. Insertion angle and insertion depth accuracies were measured and analyzed, as well as task completion time and participants’ subjectively perceived task difficulty. Results show a clear ranking of visualization concepts across variables. Less consistent results were obtained for the color and indicator scaling factors. Results suggest that logarithmic indicator scaling achieved better accuracy, but participants perceived it to be more difficult than linear scaling. With specific results for angle and depth accuracy, our study contributes to the future composition of improved navigation support and systems for precise needle insertion or similar applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Augmented reality (AR) is a promising tool to improve instrument navigation in needle-based interventions. Limited research has been conducted regarding suitable navigation visualizations. In this work, three navigation concepts based on existing approaches were compared in a user study using a projective AR setup. Each concept was implemented with three different scales for accuracy-to-color mapping and two methods of navigation indicator scaling. Participants were asked to perform simulated needle insertion tasks with each of the resulting 18 prototypes. Insertion angle and insertion depth accuracies were measured and analyzed, as well as task completion time and participants’ subjectively perceived task difficulty. Results show a clear ranking of visualization concepts across variables. Less consistent results were obtained for the color and indicator scaling factors. Results suggest that logarithmic indicator scaling achieved better accuracy, but participants perceived it to be more difficult than linear scaling. With specific results for angle and depth accuracy, our study contributes to the future composition of improved navigation support and systems for precise needle insertion or similar applications.", "title": "Comparison of Projective Augmented Reality Concepts to Support Medical Needle Insertion", "normalizedTitle": "Comparison of Projective Augmented Reality Concepts to Support Medical Needle Insertion", "fno": "08667734", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Needles", "Visualization", "Navigation", "Instruments", "Task Analysis", "Monitoring", "Computed Tomography", "Visualization", "Augmented Reality", "Evaluation", "Medical Navigation Systems", "Instrument Guidance", "Needle Placement" ], "authors": [ { "givenName": "Florian", "surname": "Heinrich", "fullName": "Florian Heinrich", "affiliation": "University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Fabian", "surname": "Joeres", "fullName": "Fabian Joeres", "affiliation": "University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Kai", "surname": "Lawonn", "fullName": "Kai Lawonn", "affiliation": "University of Koblenz-Landau, Koblenz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Hansen", "fullName": "Christian Hansen", "affiliation": "University of Magdeburg, Magdeburg, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2019-06-01 00:00:00", "pubType": "trans", "pages": "2157-2167", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892259", "title": "Study of interaction fidelity for two viewpoint changing techniques in a virtual biopsy trainer", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892259/12OmNAZOJZ9", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223388", "title": "Preliminary evaluation of a virtual needle insertion training system", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223388/12OmNCdk2Jm", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890344", "title": "Simulated Interactive Needle Insertion", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890344/12OmNyKa5Y6", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2008/2005/0/04479920", "title": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479920/12OmNyOq4T4", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030188", "title": "Haptic Simulator for Prostate Brachytherapy with Simulated Needle and Probe Interaction", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030188/13rRUILtJr3", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030199", "title": "Integrating Haptics with Augmented Reality in a Femoral Palpation and Needle Insertion Training Simulation", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030199/13rRUxd2aZb", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/03/tth2011030155", "title": "Perception and Action in Teleoperated Needle Insertion", "doi": null, "abstractUrl": "/journal/th/2011/03/tth2011030155/13rRUyoPSPf", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a260", "title": "2D versus 3D: A Comparison of Needle Navigation Concepts between Augmented Reality Display Devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a260/1CJcmBBWb1S", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/03/08948290", "title": "Surgical Navigation System for Low-Dose-Rate Brachytherapy Based on Mixed Reality", "doi": null, "abstractUrl": "/magazine/cg/2021/03/08948290/1geNLto4KGs", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/12/09211732", "title": "Comparison of Augmented Reality Display Techniques to Support Medical Needle Insertion", "doi": null, "abstractUrl": "/journal/tg/2020/12/09211732/1nB9X7YX7eU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08703194", "articleId": "19Er7j5Ad7a", "__typename": "AdjacentArticleType" }, "next": { "fno": "08667661", "articleId": "18q6nouFfmo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrFBPWx", "title": "September", "year": "2011", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "September", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwwaKt4", "doi": "10.1109/TVCG.2011.41", "abstract": "In visual perception, change blindness describes the phenomenon that persons viewing a visual scene may apparently fail to detect significant changes in that scene. These phenomena have been observed in both computer-generated imagery and real-world scenes. Several studies have demonstrated that change blindness effects occur primarily during visual disruptions such as blinks or saccadic eye movements. However, until now the influence of stereoscopic vision on change blindness has not been studied thoroughly in the context of visual perception research. In this paper, we introduce change blindness techniques for stereoscopic virtual reality (VR) systems, providing the ability to substantially modify a virtual scene in a manner that is difficult for observers to perceive. We evaluate techniques for semiimmersive VR systems, i.e., a passive and active stereoscopic projection system as well as an immersive VR system, i.e., a head-mounted display, and compare the results to those of monoscopic viewing conditions. For stereoscopic viewing conditions, we found that change blindness phenomena occur with the same magnitude as in monoscopic viewing conditions. Furthermore, we have evaluated the potential of the presented techniques for allowing abrupt, and yet significant, changes of a stereoscopically displayed virtual reality environment.", "abstracts": [ { "abstractType": "Regular", "content": "In visual perception, change blindness describes the phenomenon that persons viewing a visual scene may apparently fail to detect significant changes in that scene. These phenomena have been observed in both computer-generated imagery and real-world scenes. Several studies have demonstrated that change blindness effects occur primarily during visual disruptions such as blinks or saccadic eye movements. However, until now the influence of stereoscopic vision on change blindness has not been studied thoroughly in the context of visual perception research. In this paper, we introduce change blindness techniques for stereoscopic virtual reality (VR) systems, providing the ability to substantially modify a virtual scene in a manner that is difficult for observers to perceive. We evaluate techniques for semiimmersive VR systems, i.e., a passive and active stereoscopic projection system as well as an immersive VR system, i.e., a head-mounted display, and compare the results to those of monoscopic viewing conditions. For stereoscopic viewing conditions, we found that change blindness phenomena occur with the same magnitude as in monoscopic viewing conditions. Furthermore, we have evaluated the potential of the presented techniques for allowing abrupt, and yet significant, changes of a stereoscopically displayed virtual reality environment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In visual perception, change blindness describes the phenomenon that persons viewing a visual scene may apparently fail to detect significant changes in that scene. These phenomena have been observed in both computer-generated imagery and real-world scenes. Several studies have demonstrated that change blindness effects occur primarily during visual disruptions such as blinks or saccadic eye movements. However, until now the influence of stereoscopic vision on change blindness has not been studied thoroughly in the context of visual perception research. In this paper, we introduce change blindness techniques for stereoscopic virtual reality (VR) systems, providing the ability to substantially modify a virtual scene in a manner that is difficult for observers to perceive. We evaluate techniques for semiimmersive VR systems, i.e., a passive and active stereoscopic projection system as well as an immersive VR system, i.e., a head-mounted display, and compare the results to those of monoscopic viewing conditions. For stereoscopic viewing conditions, we found that change blindness phenomena occur with the same magnitude as in monoscopic viewing conditions. Furthermore, we have evaluated the potential of the presented techniques for allowing abrupt, and yet significant, changes of a stereoscopically displayed virtual reality environment.", "title": "Change Blindness Phenomena for Virtual Reality Display Systems", "normalizedTitle": "Change Blindness Phenomena for Virtual Reality Display Systems", "fno": "ttg2011091223", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Change Blindness", "Stereoscopic Display", "Virtual Reality" ], "authors": [ { "givenName": "Frank", "surname": "Steinicke", "fullName": "Frank Steinicke", "affiliation": "University of Münster, Münster", "__typename": "ArticleAuthorType" }, { "givenName": "Gerd", "surname": "Bruder", "fullName": "Gerd Bruder", "affiliation": "University of Münster, Münster", "__typename": "ArticleAuthorType" }, { "givenName": "Klaus", "surname": "Hinrichs", "fullName": "Klaus Hinrichs", "affiliation": "University of Münster, Münster", "__typename": "ArticleAuthorType" }, { "givenName": "Pete", "surname": "Willemsen", "fullName": "Pete Willemsen", "affiliation": "University of Minnesota Duluth, Duluth", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2011-09-01 00:00:00", "pubType": "trans", "pages": "1223-1233", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/sitis/2015/9721/0/9721a089", "title": "Exploiting Change Blindness for Image Compression", "doi": null, "abstractUrl": "/proceedings-article/sitis/2015/9721a089/12OmNAq3hFn", "parentPublication": { "id": "proceedings/sitis/2015/9721/0", "title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444790", "title": "Change blindness phenomena for stereoscopic projection systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444790/12OmNBTs7wG", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2010/6237/0/05444752", "title": "Exploiting change blindness to expand walkable space in a virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444752/12OmNCwlahs", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759455", "title": "Leveraging change blindness for redirection in virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759455/12OmNwFid4R", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/1999/0185/0/01850080", "title": "A Technique for Precise Depth Representation in Stereoscopic Display", "doi": null, "abstractUrl": "/proceedings-article/cgi/1999/01850080/12OmNx3q6XQ", "parentPublication": { "id": "proceedings/cgi/1999/0185/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eisic/2011/4406/0/4406a142", "title": "Change Blindness in Intelligence: Effects of Attention Guidance by Instructions", "doi": null, "abstractUrl": "/proceedings-article/eisic/2011/4406a142/12OmNyqiaTg", "parentPublication": { "id": "proceedings/eisic/2011/4406/0", "title": "European Intelligence and Security Informatics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/11/ttg2013111808", "title": "Change Blindness Images", "doi": null, "abstractUrl": "/journal/tg/2013/11/ttg2013111808/13rRUwh80uz", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2022/9617/0/961700a205", "title": "Body Warping Versus Change Blindness Remapping: A Comparison of Two Approaches to Repurposing Haptic Proxies for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2022/961700a205/1CJbOUWTweQ", "parentPublication": { "id": "proceedings/vr/2022/9617/0", "title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049723", "title": "A Study of Change Blindness in Immersive Environments", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049723/1KYovPd66oo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2019/4050/0/08809587", "title": "Leveraging Change Blindness for Haptic Remapping in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/wevr/2019/08809587/1cI62p6yHYs", "parentPublication": { "id": "proceedings/wevr/2019/4050/0", "title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2011091209", "articleId": "13rRUB7a10Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2011091193", "articleId": "13rRUB6Sq0x", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqyUUIb", "title": "October", "year": "2017", "issueNum": "10", "idPrefix": "co", "pubType": "magazine", "volume": "50", "label": "October", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUILc8aJ", "doi": "10.1109/MC.2017.3641627", "abstract": "Electrical muscle stimulation (EMS) has been used since the 1960s in rehabilitative medicine to regenerate lost motor functions, but in recent years researchers have started to explore new EMS applications including guided training, muscle-propelled force feedback for more immersive virtual experiences, and novel forms of information access. The authors analyze the interactive potential of EMS and compare it to more traditional mechanical actuation. The web extra at https://youtu.be/YA0mv9X9Ncw demonstrates Muscle-plotter, an interactive system that combines EMS with a motion-tracking digital pen.", "abstracts": [ { "abstractType": "Regular", "content": "Electrical muscle stimulation (EMS) has been used since the 1960s in rehabilitative medicine to regenerate lost motor functions, but in recent years researchers have started to explore new EMS applications including guided training, muscle-propelled force feedback for more immersive virtual experiences, and novel forms of information access. The authors analyze the interactive potential of EMS and compare it to more traditional mechanical actuation. The web extra at https://youtu.be/YA0mv9X9Ncw demonstrates Muscle-plotter, an interactive system that combines EMS with a motion-tracking digital pen.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Electrical muscle stimulation (EMS) has been used since the 1960s in rehabilitative medicine to regenerate lost motor functions, but in recent years researchers have started to explore new EMS applications including guided training, muscle-propelled force feedback for more immersive virtual experiences, and novel forms of information access. The authors analyze the interactive potential of EMS and compare it to more traditional mechanical actuation. The web extra at https://youtu.be/YA0mv9X9Ncw demonstrates Muscle-plotter, an interactive system that combines EMS with a motion-tracking digital pen.", "title": "Interactive Systems Based on Electrical Muscle Stimulation", "normalizedTitle": "Interactive Systems Based on Electrical Muscle Stimulation", "fno": "mco2017100028", "hasPdf": true, "idPrefix": "co", "keywords": [ "Medical Services", "Muscles", "Skin", "Electrodes", "Environmental Management", "Accelerometers", "Interactive Systems", "Epidermis", "Patient Rehabilitation", "On Skin Interfaces", "Electrical Muscle Stimulation", "EMS", "Functional Electrical Stimulation", "FES", "Human Computer Interaction", "HCI", "Mobile", "Wearables", "I O Devices", "Healthcare", "Pervasive Computing", "Hardware", "Teleoperation", "Pose IO", "Implanted Devices", "Interaction", "Actuation", "Force Feedback", "Proprioception", "Proprioceptive", "Muscle Plotter" ], "authors": [ { "givenName": "Pedro", "surname": "Lopes", "fullName": "Pedro Lopes", "affiliation": "Hasso Plattner Institute", "__typename": "ArticleAuthorType" }, { "givenName": "Patrick", "surname": "Baudisch", "fullName": "Patrick Baudisch", "affiliation": "Hasso Plattner Institute", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2017-10-01 00:00:00", "pubType": "mags", "pages": "28-35", "year": "2017", "issn": "0018-9162", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vbc/1990/2039/0/00109305", "title": "A physical model of facial tissue and muscle articulation", "doi": null, "abstractUrl": "/proceedings-article/vbc/1990/00109305/12OmNwEJ0PY", "parentPublication": { "id": "proceedings/vbc/1990/2039/0", "title": "[1990] Proceedings of the First Conference on Visualization in Biomedical Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isms/2012/4668/0/4668a177", "title": "Development of Hamstrings Muscle Model for Paraplegic with Functional Electrical Stimulation", "doi": null, "abstractUrl": "/proceedings-article/isms/2012/4668a177/12OmNxFaLdS", "parentPublication": { "id": "proceedings/isms/2012/4668/0", "title": "Intelligent Systems, Modelling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compeng/2010/3974/0/3974a132", "title": "Development of Dynamic Muscle Model with Functional Electrical Stimulation", "doi": null, "abstractUrl": "/proceedings-article/compeng/2010/3974a132/12OmNyFU7bN", "parentPublication": { "id": "proceedings/compeng/2010/3974/0", "title": "Engineering. Complexity in", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2016/1552/0/07574698", "title": "A proposal of virtual food texture by electric muscle stimulation", "doi": null, "abstractUrl": "/proceedings-article/icmew/2016/07574698/12OmNzdoMhX", "parentPublication": { "id": "proceedings/icmew/2016/1552/0", "title": "2016 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2008/02/mpc2008020062", "title": "Rapid Prototyping for Functional Electrical Stimulation Control", "doi": null, "abstractUrl": "/magazine/pc/2008/02/mpc2008020062/13rRUwjXZPz", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2017/03/mpc2017030012", "title": "Immense Power in a Tiny Package: Wearables Based on Electrical Muscle Stimulation", "doi": null, "abstractUrl": "/magazine/pc/2017/03/mpc2017030012/13rRUxAAT4D", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscer/2022/8478/0/847800a026", "title": "Multichannel asynchronous electrical stimulation device relieves muscle fatigue caused by stimulation therapy", "doi": null, "abstractUrl": "/proceedings-article/iscer/2022/847800a026/1HbbCwGuMHC", "parentPublication": { "id": "proceedings/iscer/2022/8478/0", "title": "2022 International Symposium on Control Engineering and Robotics (ISCER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798102", "title": "wavEMS: Improving Signal Variation Freedom of Electrical Muscle Stimulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798102/1cJ0RCt09mU", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2020/9146/0/914600a054", "title": "Control Strategy for Upper Limb Rehabilitation Robot Based on Muscle Strength Estimation", "doi": null, "abstractUrl": "/proceedings-article/icaice/2020/914600a054/1rCgcpdOyhW", "parentPublication": { "id": "proceedings/icaice/2020/9146/0", "title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismii/2021/1290/0/129000a070", "title": "Research on Promotion of Lower Limb Movement Function Recovery after Stroke by Using Lower Limb Rehabilitation Robot in Combination with Constant Velocity Muscle Strength Training", "doi": null, "abstractUrl": "/proceedings-article/ismii/2021/129000a070/1sZ2L7YVSCs", "parentPublication": { "id": "proceedings/ismii/2021/1290/0", "title": "2021 7th International Symposium on Mechatronics and Industrial Informatics (ISMII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mco2017100019", "articleId": "13rRUwwJWCl", "__typename": "AdjacentArticleType" }, "next": { "fno": "mco2017100036", "articleId": "13rRUwcAqt6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYet0L", "name": "mco2017100028s1.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/mco2017100028s1.mp4", "extension": "mp4", "size": "15.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzw8iT5", "title": "July-Sept.", "year": "2017", "issueNum": "03", "idPrefix": "pc", "pubType": "magazine", "volume": "16", "label": "July-Sept.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUIJcWu4", "doi": "10.1109/MPRV.2017.2940966", "abstract": "Mid-air gestures have been largely overlooked for transferring content between large displays and personal mobile devices. To fully utilize the ubiquitous nature of mid-air gestures for this purpose, the authors developed SimSense, a smart space system that automatically pairs users with their mobile devices based on location data. Users can then interact with a gesture-controlled large display and move content onto their handheld devices. In a user study, the authors investigated two mid-air gestures for content transfer: grab-and-pull and grab-and-drop. Their results show that mid-air gestures are well suited for content-retrieval scenarios and offer an impressive user experience; grab-and-pull is the preferred for scenarios when content is transferred to the user, whereas grab-and-drop is presumably ideal when the recipient is another person or a device; and distinct gestures can be successfully combined with common point-and-dwell mechanics prominent in many gesture-controlled applications.", "abstracts": [ { "abstractType": "Regular", "content": "Mid-air gestures have been largely overlooked for transferring content between large displays and personal mobile devices. To fully utilize the ubiquitous nature of mid-air gestures for this purpose, the authors developed SimSense, a smart space system that automatically pairs users with their mobile devices based on location data. Users can then interact with a gesture-controlled large display and move content onto their handheld devices. In a user study, the authors investigated two mid-air gestures for content transfer: grab-and-pull and grab-and-drop. Their results show that mid-air gestures are well suited for content-retrieval scenarios and offer an impressive user experience; grab-and-pull is the preferred for scenarios when content is transferred to the user, whereas grab-and-drop is presumably ideal when the recipient is another person or a device; and distinct gestures can be successfully combined with common point-and-dwell mechanics prominent in many gesture-controlled applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Mid-air gestures have been largely overlooked for transferring content between large displays and personal mobile devices. To fully utilize the ubiquitous nature of mid-air gestures for this purpose, the authors developed SimSense, a smart space system that automatically pairs users with their mobile devices based on location data. Users can then interact with a gesture-controlled large display and move content onto their handheld devices. In a user study, the authors investigated two mid-air gestures for content transfer: grab-and-pull and grab-and-drop. Their results show that mid-air gestures are well suited for content-retrieval scenarios and offer an impressive user experience; grab-and-pull is the preferred for scenarios when content is transferred to the user, whereas grab-and-drop is presumably ideal when the recipient is another person or a device; and distinct gestures can be successfully combined with common point-and-dwell mechanics prominent in many gesture-controlled applications.", "title": "\"It's Natural to Grab and Pull\": Retrieving Content from Large Displays Using Mid-Air Gestures", "normalizedTitle": "\"It's Natural to Grab and Pull\": Retrieving Content from Large Displays Using Mid-Air Gestures", "fno": "mpc2017030070", "hasPdf": true, "idPrefix": "pc", "keywords": [ "Mobile Handsets", "Mobile Applications", "Mobile Communication", "Visualization", "Navigation", "Content Management", "Avatars", "Ubiquitous Computing", "Internet Of Things", "Virtualization", "Content Transfer", "Large Displays", "Mid Air Gestures", "Cross Device Interaction", "Mobile Devices", "Smart Spaces", "Ubiquitous Computing", "Pervasive Computing", "Mobile", "Internet Of Things", "Virtualization" ], "authors": [ { "givenName": "Ville", "surname": "Mäkelä", "fullName": "Ville Mäkelä", "affiliation": "University of Tampere, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Jobin", "surname": "James", "fullName": "Jobin James", "affiliation": "University of Tampere, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Tuuli", "surname": "Keskinen", "fullName": "Tuuli Keskinen", "affiliation": "University of Tampere, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Jaakko", "surname": "Hakulinen", "fullName": "Jaakko Hakulinen", "affiliation": "University of Tampere, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Markku", "surname": "Turunen", "fullName": "Markku Turunen", "affiliation": "University of Tampere, Finland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2017-07-01 00:00:00", "pubType": "mags", "pages": "70-77", "year": "2017", "issn": "1536-1268", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2013/6097/0/06550223", "title": "Poster: Gesture-based control of avatars for social TV", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550223/12OmNAGepYr", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2017/6716/0/07893331", "title": "Gesture elicitation for 3D travel via multi-touch and mid-Air systems for procedurally generated pseudo-universe", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893331/12OmNBpVQ5U", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2014/3624/0/06798833", "title": "Mid-air interactions above stereoscopic interactive tables", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798833/12OmNCzKlMB", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2017/6716/0/07893332", "title": "Mid-air modeling with Boolean operations in VR", "doi": null, "abstractUrl": "/proceedings-article/3dui/2017/07893332/12OmNyGbI5i", "parentPublication": { "id": "proceedings/3dui/2017/6716/0", "title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2016/8942/0/8942a242", "title": "Categorizing Issues in Mid-air InfoVis Interaction", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a242/12OmNyKrH2A", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a637", "title": "Blending On-Body and Mid-Air Interaction in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a637/1JrRmvhGko0", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049698", "title": "Gaining the High Ground: Teleportation to Mid-Air Targets in Immersive Virtual Environments", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049698/1KYotugT0xW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a553", "title": "Mid-Air: A Multi-Modal Dataset for Extremely Low Altitude Drone Flights", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a553/1iTvqVGhAek", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a492", "title": "Determining the Target Point of the Mid-Air Pinch Gesture", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a492/1tnXsQx2NOw", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/12/09507320", "title": "OctoPocus in VR: Using a Dynamic Guide for 3D Mid-Air Gestures in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2021/12/09507320/1vNfMheqZ2w", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mpc2017030062", "articleId": "13rRUyuNsCQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "mpc2017030078", "articleId": "13rRUxOdD5o", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyq0zFI", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1hpPDKs9c7C", "doi": "10.1109/TVCG.2020.2973065", "abstract": "Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360&#x00B0; video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.", "abstracts": [ { "abstractType": "Regular", "content": "Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360&#x00B0; video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360° video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.", "title": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration", "normalizedTitle": "Augmented Virtual Teleportation for High-Fidelity Telecollaboration", "fno": "08998353", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Augmented Reality", "Avatars", "Groupware", "Rendering Computer Graphics", "High Fidelity Telecollaboration", "Spatial References", "Remote Collaboration", "AVT", "High Fidelity Telepresence", "Remote VR User", "Real World Collaboration Space", "Local AR User", "Omni Directional Scenes", "Remote VR Traveler", "VR Headset Experiences", "Real Time Rendering", "3 D Avatar", "Mixed Reality Use Case Scenarios", "Mixed Reality Collaboration Space", "Augmented Virtual Teleportation", "Spatial Presence", "Virtual Objects", "MRC Space", "Collaboration", "Visualization", "Avatars", "Telepresence", "Three Dimensional Displays", "Teleportation", "Telepresence", "Collaboration", "Real Time", "Mixed Reality", "360 Panoramic Video" ], "authors": [ { "givenName": "Taehyun", "surname": "Rhee", "fullName": "Taehyun Rhee", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen", "surname": "Thompson", "fullName": "Stephen Thompson", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Medeiros", "fullName": "Daniel Medeiros", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Rafael", "surname": "dos Anjos", "fullName": "Rafael dos Anjos", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Chalmers", "fullName": "Andrew Chalmers", "affiliation": "Victoria University of Wellington", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1923-1933", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2017/6647/0/07892290", "title": "Asymetric telecollaboration in virtual reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892290/12OmNwDACwE", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a218", "title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446561", "title": "Augmented VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446561/13bd1eSlysy", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2018/7459/0/745900a165", "title": "Effects of Sharing Real-Time Multi-Sensory Heart Rate Feedback in Different Immersive Collaborative Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/ismar/2018/745900a165/17D45VTRov4", "parentPublication": { "id": "proceedings/ismar/2018/7459/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a956", "title": "[DC]Using Multimodal Input in Augmented Virtual Teleportation", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a956/1CJcYgs1MY0", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a238", "title": "Comparing Teleportation Methods for Travel in Everyday Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a238/1CJdYyJV76E", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797719", "title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a022", "title": "Merging Live and Static 360 Panoramas Inside a 3D Scene for Mixed Reality Remote Collaboration", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a022/1gysn0YPLm8", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a608", "title": "Walking and Teleportation in Wide-area Virtual Reality Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a377", "title": "Multisensory Teleportation in Virtual Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a377/1tnXGQKSUPm", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08998401", "articleId": "1hrXgAAK6NW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08998348", "articleId": "1hrXedrZXos", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iEfOM15BqU", "name": "ttg202005-08998353s1-supp1-2973065.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202005-08998353s1-supp1-2973065.mp4", "extension": "mp4", "size": "207 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1JYZzPXxXr2", "title": "Nov.-Dec.", "year": "2022", "issueNum": "06", "idPrefix": "it", "pubType": "magazine", "volume": "24", "label": "Nov.-Dec.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1JYZF1FB6ww", "doi": "10.1109/MITP.2022.3203820", "abstract": "The metaverse is a concept of a persistent, online, 3-D World that combines multiple virtual spaces. With Metaverse, these independent, computer-generated environments developed by different organizations come together within a single, integrated network of 3-D worlds where users can hop from one universe to another. For achieving a near-perfect virtual world, Metaverse uses numerous advanced technologies including virtual reality, augmented reality, artificial intelligence, blockchain, etc. A novel technology like metaverse gives rise to various challenges like the standing of virtual avatars in legal systems in case of injury or harm, demand for expensive resources in its upscaling, privacy of user data, and increased risk of cybercrimes. This research presents a brief overview of technologies used in the development of metaverse, the challenges, and the potential aspects of the virtual world.", "abstracts": [ { "abstractType": "Regular", "content": "The metaverse is a concept of a persistent, online, 3-D World that combines multiple virtual spaces. With Metaverse, these independent, computer-generated environments developed by different organizations come together within a single, integrated network of 3-D worlds where users can hop from one universe to another. For achieving a near-perfect virtual world, Metaverse uses numerous advanced technologies including virtual reality, augmented reality, artificial intelligence, blockchain, etc. A novel technology like metaverse gives rise to various challenges like the standing of virtual avatars in legal systems in case of injury or harm, demand for expensive resources in its upscaling, privacy of user data, and increased risk of cybercrimes. This research presents a brief overview of technologies used in the development of metaverse, the challenges, and the potential aspects of the virtual world.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The metaverse is a concept of a persistent, online, 3-D World that combines multiple virtual spaces. With Metaverse, these independent, computer-generated environments developed by different organizations come together within a single, integrated network of 3-D worlds where users can hop from one universe to another. For achieving a near-perfect virtual world, Metaverse uses numerous advanced technologies including virtual reality, augmented reality, artificial intelligence, blockchain, etc. A novel technology like metaverse gives rise to various challenges like the standing of virtual avatars in legal systems in case of injury or harm, demand for expensive resources in its upscaling, privacy of user data, and increased risk of cybercrimes. This research presents a brief overview of technologies used in the development of metaverse, the challenges, and the potential aspects of the virtual world.", "title": "Virtual Dimension&#x2014;A Primer to Metaverse", "normalizedTitle": "Virtual Dimension—A Primer to Metaverse", "fno": "10017431", "hasPdf": true, "idPrefix": "it", "keywords": [ "Augmented Reality", "Avatars", "3 D World", "Artificial Intelligence", "Cybercrimes", "Independent Computer Generated Environments", "Metaverse", "Multiple Virtual Spaces", "Near Perfect Virtual World", "Virtual Avatars", "Virtual Dimension", "Virtual Reality", "Data Privacy", "Metaverse", "Law", "Education", "Blockchains", "Cryptocurrency" ], "authors": [ { "givenName": "Hijab E.", "surname": "Zainab", "fullName": "Hijab E. Zainab", "affiliation": "Research Center for Computing, Jinnah University for Women, Karachi, Pakistan", "__typename": "ArticleAuthorType" }, { "givenName": "Narmeen Zakaria", "surname": "Bawany", "fullName": "Narmeen Zakaria Bawany", "affiliation": "Research Center for Computing, Jinnah University for Women, Karachi, Pakistan", "__typename": "ArticleAuthorType" }, { "givenName": "Jaweria", "surname": "Imran", "fullName": "Jaweria Imran", "affiliation": "Research Center for Computing, Jinnah University for Women, Karachi, Pakistan", "__typename": "ArticleAuthorType" }, { "givenName": "Wajiha", "surname": "Rehman", "fullName": "Wajiha Rehman", "affiliation": "Research Center for Computing, Jinnah University for Women, Karachi, Pakistan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "06", "pubDate": "2022-11-01 00:00:00", "pubType": "mags", "pages": "27-33", "year": "2022", "issn": "1520-9202", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/oj/2022/01/09815155", "title": "Fusing Blockchain and AI With Metaverse: A Survey", "doi": null, "abstractUrl": "/journal/oj/2022/01/09815155/1EJBce8LdBe", "parentPublication": { "id": "trans/oj", "title": "IEEE Open Journal of the Computer Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2022/8810/0/881000a401", "title": "Connecting Everyday Objects with the Metaverse: A Unified Recognition Framework", "doi": null, "abstractUrl": "/proceedings-article/compsac/2022/881000a401/1FJ5lIJDqYU", "parentPublication": { "id": "proceedings/compsac/2022/8810/0", "title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/oj/2022/01/09893188", "title": "Fusion of Building Information Modeling and Blockchain for Metaverse: A Survey", "doi": null, "abstractUrl": "/journal/oj/2022/01/09893188/1GGLcptbShO", "parentPublication": { "id": "trans/oj", "title": "IEEE Open Journal of the Computer Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a055", "title": "The Digital Big Bang in the Metaverse Era", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a055/1J7WdsYCPEQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a027", "title": "Towards a virtual business ecosystem in the Metaverse Era", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a027/1J7WujOWeCA", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/5555/01/10049293", "title": "Blockchain Empowered Privacy-Preserving Digital Objects Trading in Metaverse", "doi": null, "abstractUrl": "/magazine/mu/5555/01/10049293/1KYok94FH20", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaml/2022/6265/0/626500a444", "title": "The Current Situation and Prospect of the Development of Metaverse Technology", "doi": null, "abstractUrl": "/proceedings-article/icaml/2022/626500a444/1Lkfy6e2UW4", "parentPublication": { "id": "proceedings/icaml/2022/6265/0", "title": "2022 4th International Conference on Applied Machine Learning (ICAML)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tps-isa/2022/7408/0/740800a039", "title": "Auditing Metaverse Requires Multimodal Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/tps-isa/2022/740800a039/1Lxf665eb2U", "parentPublication": { "id": "proceedings/tps-isa/2022/7408/0", "title": "2022 IEEE 4th International Conference on Trust, Privacy and Security in Intelligent Systems, and Applications (TPS-ISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icei/2022/9327/0/932700a007", "title": "Metaverse Applications in Energy Internet", "doi": null, "abstractUrl": "/proceedings-article/icei/2022/932700a007/1MhIoEOvaI8", "parentPublication": { "id": "proceedings/icei/2022/9327/0", "title": "2022 IEEE International Conference on Energy Internet (ICEI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fcsit/2022/6353/0/635300a234", "title": "Flowing through Virtual Animated Worlds &#x2013; Perceptions of the Metaverse", "doi": null, "abstractUrl": "/proceedings-article/fcsit/2022/635300a234/1Ml2akY0wfu", "parentPublication": { "id": "proceedings/fcsit/2022/6353/0", "title": "2022 Euro-Asia Conference on Frontiers of Computer Science and Information Technology (FCSIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10017417", "articleId": "1JYZEh4gbvO", "__typename": "AdjacentArticleType" }, "next": { "fno": "10017413", "articleId": "1JYZFCMvyTu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1JYZzPXxXr2", "title": "Nov.-Dec.", "year": "2022", "issueNum": "06", "idPrefix": "it", "pubType": "magazine", "volume": "24", "label": "Nov.-Dec.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1JYZFCMvyTu", "doi": "10.1109/MITP.2022.3225064", "abstract": "The digital twin has recently emerged as a virtual representation, that enables a real-time digital counterpart of a process or a physical object. Further, as the investments in Industry 5.0 are growing rapidly, their primary focus is to enhance the interactions between cyber-physical systems (CPS) and humans, for which outstanding contribution is expected through the metaverse. It enables humans to immerse into a high-dimensional 3-D virtual world, tackles the interactions among the CPS, and explore their status, which is found to be promising through the digital clones of CPS. This work presents the service-oriented digital twin architecture in conjunction with metaverse-enabled platforms with recommendations for ambitious interactions with the CPS for Industry 5.0 scenarios and beyond. They account for revolutionary changes in modern industries, supported through the Internet of Everything (IoE), VR/AR gadgets, and extended reality (XR) as prominent technology enablers.", "abstracts": [ { "abstractType": "Regular", "content": "The digital twin has recently emerged as a virtual representation, that enables a real-time digital counterpart of a process or a physical object. Further, as the investments in Industry 5.0 are growing rapidly, their primary focus is to enhance the interactions between cyber-physical systems (CPS) and humans, for which outstanding contribution is expected through the metaverse. It enables humans to immerse into a high-dimensional 3-D virtual world, tackles the interactions among the CPS, and explore their status, which is found to be promising through the digital clones of CPS. This work presents the service-oriented digital twin architecture in conjunction with metaverse-enabled platforms with recommendations for ambitious interactions with the CPS for Industry 5.0 scenarios and beyond. They account for revolutionary changes in modern industries, supported through the Internet of Everything (IoE), VR/AR gadgets, and extended reality (XR) as prominent technology enablers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The digital twin has recently emerged as a virtual representation, that enables a real-time digital counterpart of a process or a physical object. Further, as the investments in Industry 5.0 are growing rapidly, their primary focus is to enhance the interactions between cyber-physical systems (CPS) and humans, for which outstanding contribution is expected through the metaverse. It enables humans to immerse into a high-dimensional 3-D virtual world, tackles the interactions among the CPS, and explore their status, which is found to be promising through the digital clones of CPS. This work presents the service-oriented digital twin architecture in conjunction with metaverse-enabled platforms with recommendations for ambitious interactions with the CPS for Industry 5.0 scenarios and beyond. They account for revolutionary changes in modern industries, supported through the Internet of Everything (IoE), VR/AR gadgets, and extended reality (XR) as prominent technology enablers.", "title": "Building Digital Twins of Cyber Physical Systems With Metaverse for Industry 5.0 and Beyond", "normalizedTitle": "Building Digital Twins of Cyber Physical Systems With Metaverse for Industry 5.0 and Beyond", "fno": "10017413", "hasPdf": true, "idPrefix": "it", "keywords": [ "Cyber Physical Systems", "Digital Twins", "Internet", "Production Engineering Computing", "Virtual Reality", "3 D Virtual World", "AR Gadgets", "CPS", "Cyber Physical Systems", "Digital Clones", "Industry 5 0", "Internet Of Everything", "Investments", "Metaverse Enabled Platforms", "Physical Object", "Real Time Digital Counterpart", "Service Oriented Digital Twin Architecture", "Virtual Representation", "VR Gadgets", "Industries", "Metaverse", "Supply Chains", "Cloning", "Virtual Groups", "Real Time Systems", "Product Development" ], "authors": [ { "givenName": "Senthil Kumar", "surname": "Jagatheesaperumal", "fullName": "Senthil Kumar Jagatheesaperumal", "affiliation": "Mepco Schlenk Engineering College, Sivakasi, India", "__typename": "ArticleAuthorType" }, { "givenName": "Mohamed", "surname": "Rahouti", "fullName": "Mohamed Rahouti", "affiliation": "Fordham University, Bronx, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2022-11-01 00:00:00", "pubType": "mags", "pages": "34-40", "year": "2022", "issn": "1520-9202", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/oj/2022/01/09815155", "title": "Fusing Blockchain and AI With Metaverse: A Survey", "doi": null, "abstractUrl": "/journal/oj/2022/01/09815155/1EJBce8LdBe", "parentPublication": { "id": "trans/oj", "title": "IEEE Open Journal of the Computer Society", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cost/2022/6248/0/624800a090", "title": "Innovation and Prospect of Digital Scene Setting in the Context of Metaverse", "doi": null, "abstractUrl": "/proceedings-article/cost/2022/624800a090/1H2phdYgv4I", "parentPublication": { "id": "proceedings/cost/2022/6248/0", "title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a055", "title": "The Digital Big Bang in the Metaverse Era", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a055/1J7WdsYCPEQ", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/5555/01/10049293", "title": "Blockchain Empowered Privacy-Preserving Digital Objects Trading in Metaverse", "doi": null, "abstractUrl": "/magazine/mu/5555/01/10049293/1KYok94FH20", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3cbit/2022/9225/0/922500a160", "title": "Metaverse for Public Welfare and the United Nations Sustainable Development Goals", "doi": null, "abstractUrl": "/proceedings-article/3cbit/2022/922500a160/1La4KMp1aEw", "parentPublication": { "id": "proceedings/3cbit/2022/9225/0", "title": "2022 International Conference on Cloud Computing, Big Data and Internet of Things (3CBIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaml/2022/6265/0/626500a444", "title": "The Current Situation and Prospect of the Development of Metaverse Technology", "doi": null, "abstractUrl": "/proceedings-article/icaml/2022/626500a444/1Lkfy6e2UW4", "parentPublication": { "id": "proceedings/icaml/2022/6265/0", "title": "2022 4th International Conference on Applied Machine Learning (ICAML)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fnwf/2022/6250/0/625000a628", "title": "Semantic Information Market For The Metaverse: An Auction Based Approach", "doi": null, "abstractUrl": "/proceedings-article/fnwf/2022/625000a628/1LlH4d93Kc8", "parentPublication": { "id": "proceedings/fnwf/2022/6250/0", "title": "2022 IEEE Future Networks World Forum (FNWF)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tps-isa/2022/7408/0/740800a039", "title": "Auditing Metaverse Requires Multimodal Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/tps-isa/2022/740800a039/1Lxf665eb2U", "parentPublication": { "id": "proceedings/tps-isa/2022/7408/0", "title": "2022 IEEE 4th International Conference on Trust, Privacy and Security in Intelligent Systems, and Applications (TPS-ISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icei/2022/9327/0/932700a007", "title": "Metaverse Applications in Energy Internet", "doi": null, "abstractUrl": "/proceedings-article/icei/2022/932700a007/1MhIoEOvaI8", "parentPublication": { "id": "proceedings/icei/2022/9327/0", "title": "2022 IEEE International Conference on Energy Internet (ICEI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sescps/2019/2282/0/228200a025", "title": "Reference Framework for Digital Twins within Cyber-Physical Systems", "doi": null, "abstractUrl": "/proceedings-article/sescps/2019/228200a025/1d5kAkmA5So", "parentPublication": { "id": "proceedings/sescps/2019/2282/0", "title": "2019 IEEE/ACM 5th International Workshop on Software Engineering for Smart Cyber-Physical Systems (SEsCPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "10017431", "articleId": "1JYZF1FB6ww", "__typename": "AdjacentArticleType" }, "next": { "fno": "10017408", "articleId": "1JYZFOz93Hy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }