data
dict
{ "issue": { "id": "12OmNAsk4xG", "title": "Oct.", "year": "2019", "issueNum": "10", "idPrefix": "tk", "pubType": "journal", "volume": "31", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13WBGMfNvFN", "doi": "10.1109/TKDE.2018.2872602", "abstract": "Given the edge list of a social network, the node embedding method learns the structural features for every node and embeds the features into a vector space. The current related work on node embedding exploits only a portion of existing networks, e.g., static networks. However, social networks are inherently hierarchical and dynamic systems in which the topology changes constantly and the strength of influence of information among neighbors varies with different numbers of hops. We propose a highly efficient node embedding method, DNPS, that is faster and more accurate than state-of-the-art methods and that can further boost the training progress, especially under dynamic conditions. In this paper, we attempt to model the hierarchical and dynamic features of social networks by designing a damping-based sampling algorithm corresponding to a local search-based incremental learning algorithm, which can easily be extended to large-scale scenarios. We conduct extensive experiments on six real-world social networks with three challenging tasks, including missing link prediction, dynamic link prediction, and multi-label classification. The results of the experiments on these tasks demonstrate that the proposed method significantly outperforms the existing methods with different settings.", "abstracts": [ { "abstractType": "Regular", "content": "Given the edge list of a social network, the node embedding method learns the structural features for every node and embeds the features into a vector space. The current related work on node embedding exploits only a portion of existing networks, e.g., static networks. However, social networks are inherently hierarchical and dynamic systems in which the topology changes constantly and the strength of influence of information among neighbors varies with different numbers of hops. We propose a highly efficient node embedding method, DNPS, that is faster and more accurate than state-of-the-art methods and that can further boost the training progress, especially under dynamic conditions. In this paper, we attempt to model the hierarchical and dynamic features of social networks by designing a damping-based sampling algorithm corresponding to a local search-based incremental learning algorithm, which can easily be extended to large-scale scenarios. We conduct extensive experiments on six real-world social networks with three challenging tasks, including missing link prediction, dynamic link prediction, and multi-label classification. The results of the experiments on these tasks demonstrate that the proposed method significantly outperforms the existing methods with different settings.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Given the edge list of a social network, the node embedding method learns the structural features for every node and embeds the features into a vector space. The current related work on node embedding exploits only a portion of existing networks, e.g., static networks. However, social networks are inherently hierarchical and dynamic systems in which the topology changes constantly and the strength of influence of information among neighbors varies with different numbers of hops. We propose a highly efficient node embedding method, DNPS, that is faster and more accurate than state-of-the-art methods and that can further boost the training progress, especially under dynamic conditions. In this paper, we attempt to model the hierarchical and dynamic features of social networks by designing a damping-based sampling algorithm corresponding to a local search-based incremental learning algorithm, which can easily be extended to large-scale scenarios. We conduct extensive experiments on six real-world social networks with three challenging tasks, including missing link prediction, dynamic link prediction, and multi-label classification. The results of the experiments on these tasks demonstrate that the proposed method significantly outperforms the existing methods with different settings.", "title": "Modeling Large-Scale Dynamic Social Networks via Node Embeddings", "normalizedTitle": "Modeling Large-Scale Dynamic Social Networks via Node Embeddings", "fno": "08476241", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Learning Artificial Intelligence", "Pattern Classification", "Search Problems", "Social Networking Online", "Static Networks", "Dynamic Systems", "Local Search Based Incremental Learning Algorithm", "Real World Social Networks", "Dynamic Link Prediction", "Node Embeddings", "Large Scale Dynamic Social Networks", "Multilabel Classification", "Social Network Services", "Heuristic Algorithms", "Task Analysis", "Training", "Computational Modeling", "Network Topology", "Topology", "Node Embeddings", "Distributed Representation", "Dynamic Social Networks", "Link Analysis" ], "authors": [ { "givenName": "Aakas", "surname": "Zhiyuli", "fullName": "Aakas Zhiyuli", "affiliation": "Department of Computer Science, Renmin University of China, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xun", "surname": "Liang", "fullName": "Xun Liang", "affiliation": "Department of Computer Science, Renmin University of China, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yanfang", "surname": "Chen", "fullName": "Yanfang Chen", "affiliation": "School of Information Resource Management, Renmin University of China, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaoyong", "surname": "Du", "fullName": "Xiaoyong Du", "affiliation": "Key Laboratory of Data Engineering and Knowledge Engineering, Ministry of Education, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2019-10-01 00:00:00", "pubType": "trans", "pages": "1994-2007", "year": "2019", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/asonam/2015/3854/0/07403520", "title": "Node embeddings in social network analysis", "doi": null, "abstractUrl": "/proceedings-article/asonam/2015/07403520/12OmNBTawkM", "parentPublication": { "id": "proceedings/asonam/2015/3854/0", "title": "2015 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa/2014/4293/0/4293a093", "title": "Efficient Routing Algorithms Combining History and Social Predictors in Mobile Social Networks", "doi": null, "abstractUrl": "/proceedings-article/ispa/2014/4293a093/12OmNwF0C42", "parentPublication": { "id": "proceedings/ispa/2014/4293/0", "title": "2014 IEEE International Symposium on Parallel and Distributed Processing with Applications (ISPA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2015/9926/0/07364029", "title": "Dynamic community detection based on game theory in social networks", "doi": null, "abstractUrl": "/proceedings-article/big-data/2015/07364029/12OmNyFU78a", "parentPublication": { "id": "proceedings/big-data/2015/9926/0", "title": "2015 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicse/2015/0454/0/0454a223", "title": "A Method of Social Network Node Preference Evaluation Based on the Topology Potential", "doi": null, "abstractUrl": "/proceedings-article/icicse/2015/0454a223/12OmNyqzM5m", "parentPublication": { "id": "proceedings/icicse/2015/0454/0", "title": "2015 Eighth International Conference on Internet Computing for Science and Engineering (ICICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2017/02/07605539", "title": "Influential Node Tracking on Dynamic Social Network: An Interchange Greedy Approach", "doi": null, "abstractUrl": "/journal/tk/2017/02/07605539/13rRUxAASTE", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258232", "title": "Evaluating the quality of graph embeddings via topological feature reconstruction", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258232/17D45XDIXOG", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08580402", "title": "Exploring Evolution of Dynamic Networks via Diachronic Node Embeddings", "doi": null, "abstractUrl": "/journal/tg/2020/07/08580402/17D45XacGi5", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/05/09709650", "title": "Streaming Graph Embeddings via Incremental Neighborhood Sketching", "doi": null, "abstractUrl": "/journal/tk/2023/05/09709650/1ASFjMEiQZW", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2019/6934/0/08909497", "title": "EpiRep: Learning Node Representations through Epidemic Dynamics on Networks", "doi": null, "abstractUrl": "/proceedings-article/wi/2019/08909497/1febnN5Txbq", "parentPublication": { "id": "proceedings/wi/2019/6934/0", "title": "2019 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006430", "title": "DNA: Dynamic Social Network Alignment", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006430/1hJrWxJHhqo", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08471205", "articleId": "13WBGNdYtft", "__typename": "AdjacentArticleType" }, "next": { "fno": "08468044", "articleId": "13HFz28TNuR", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvDqsVX", "title": "Sept.", "year": "2018", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxlgxOq", "doi": "10.1109/TVCG.2017.2758362", "abstract": "Urban data is massive, heterogeneous, and spatio-temporal, posing a substantial challenge for visualization and analysis. In this paper, we design and implement a novel visual analytics approach, Visual Analyzer for Urban Data (VAUD), that supports the visualization, querying, and exploration of urban data. Our approach allows for cross-domain correlation from multiple data sources by leveraging spatial-temporal and social inter-connectedness features. Through our approach, the analyst is able to select, filter, aggregate across multiple data sources and extract information that would be hidden to a single data subset. To illustrate the effectiveness of our approach, we provide case studies on a real urban dataset that contains the cyber-, physical-, and social- information of 14 million citizens over 22 days.", "abstracts": [ { "abstractType": "Regular", "content": "Urban data is massive, heterogeneous, and spatio-temporal, posing a substantial challenge for visualization and analysis. In this paper, we design and implement a novel visual analytics approach, Visual Analyzer for Urban Data (VAUD), that supports the visualization, querying, and exploration of urban data. Our approach allows for cross-domain correlation from multiple data sources by leveraging spatial-temporal and social inter-connectedness features. Through our approach, the analyst is able to select, filter, aggregate across multiple data sources and extract information that would be hidden to a single data subset. To illustrate the effectiveness of our approach, we provide case studies on a real urban dataset that contains the cyber-, physical-, and social- information of 14 million citizens over 22 days.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Urban data is massive, heterogeneous, and spatio-temporal, posing a substantial challenge for visualization and analysis. In this paper, we design and implement a novel visual analytics approach, Visual Analyzer for Urban Data (VAUD), that supports the visualization, querying, and exploration of urban data. Our approach allows for cross-domain correlation from multiple data sources by leveraging spatial-temporal and social inter-connectedness features. Through our approach, the analyst is able to select, filter, aggregate across multiple data sources and extract information that would be hidden to a single data subset. To illustrate the effectiveness of our approach, we provide case studies on a real urban dataset that contains the cyber-, physical-, and social- information of 14 million citizens over 22 days.", "title": "VAUD: A Visual Analysis Approach for Exploring Spatio-Temporal Urban Data", "normalizedTitle": "VAUD: A Visual Analysis Approach for Exploring Spatio-Temporal Urban Data", "fno": "08054703", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Data Visualization", "Visual Analytics", "Public Transportation", "Trajectory", "Urban Areas", "Urban Data", "Visual Analysis", "Visual Reasoning", "Heterogeneous", "Spatio Temporal" ], "authors": [ { "givenName": "Wei", "surname": "Chen", "fullName": "Wei Chen", "affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang", "__typename": "ArticleAuthorType" }, { "givenName": "Zhaosong", "surname": "Huang", "fullName": "Zhaosong Huang", "affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Feiran", "surname": "Wu", "fullName": "Feiran Wu", "affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Minfeng", "surname": "Zhu", "fullName": "Minfeng Zhu", "affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Huihua", "surname": "Guan", "fullName": "Huihua Guan", "affiliation": "The State Key Lab of CAD & CG, Zhejiang University, Hangzhou, Zhejiang, China", "__typename": "ArticleAuthorType" }, { "givenName": "Ross", "surname": "Maciejewski", "fullName": "Ross Maciejewski", "affiliation": "The School of Computing Informatics & Decision Systems Engineering, Arizona State University, Tempe, AZ", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2018-09-01 00:00:00", "pubType": "trans", "pages": "2636-2648", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/uic-atc-scalcom-cbdcom-iop-smartworld/2016/2771/0/07816948", "title": "Urban Knowledge Extraction, Representation and Reasoning as a Bridge from Data City towards Smart City", "doi": null, "abstractUrl": "/proceedings-article/uic-atc-scalcom-cbdcom-iop-smartworld/2016/07816948/12OmNzahccM", "parentPublication": { "id": "proceedings/uic-atc-scalcom-cbdcom-iop-smartworld/2016/2771/0", "title": "2016 Intl IEEE Conferences on Ubiquitous Intelligence & Computing, Advanced and Trusted Computing, Scalable Computing and Communications, Cloud and Big Data Computing, Internet of People, and Smart World Congress (UIC/ATC/ScalCom/CBDCom/IoP/SmartWorld)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/05/mcg2018050038", "title": "VitalVizor: A Visual Analytics System for Studying Urban Vitality", "doi": null, "abstractUrl": "/magazine/cg/2018/05/mcg2018050038/13WBGNxhc5X", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2018/05/mcg2018050026", "title": "Spatio-Temporal Urban Data Analysis: A Visual Analytics Perspective", "doi": null, "abstractUrl": "/magazine/cg/2018/05/mcg2018050026/13WBGTItFGV", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2016/03/07506246", "title": "Visual Analytics in Urban Computing: An Overview", "doi": null, "abstractUrl": "/journal/bd/2016/03/07506246/13rRUB6SpUe", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2017/05/mcg2017050050", "title": "Urban Space Explorer: A Visual Analytics System for Urban Planning", "doi": null, "abstractUrl": "/magazine/cg/2017/05/mcg2017050050/13rRUEgarq3", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192687", "title": "TrajGraph: A Graph-Based Visual Analytics Approach to Studying Urban Network Centralities Using Taxi Trajectory Data", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192687/13rRUwInvBa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017655", "title": "StreetVizor: Visual Exploration of Human-Scale Urban Forms Based on Street Views", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017655/13rRUwInvsW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2018/9288/0/928800a572", "title": "A Comparative Study of Urban Mobility Patterns Using Large-Scale Spatio-Temporal Data", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2018/928800a572/18jXF1iMjLi", "parentPublication": { "id": "proceedings/icdmw/2018/9288/0", "title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2022/2197/0/219700a234", "title": "Urban Event Detection from Spatio-temporal IoT Sensor Data Using Graph-Based Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2022/219700a234/1BYICHFmoM0", "parentPublication": { "id": "proceedings/bigcomp/2022/2197/0", "title": "2022 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006289", "title": "Understanding Spatio-Temporal Urban Processes", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006289/1hJrXYjoEWA", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08038807", "articleId": "13rRUxBa5xp", "__typename": "AdjacentArticleType" }, "next": { "fno": "08052554", "articleId": "13rRUILtJqY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRTI", "name": "ttg201809-08054703s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201809-08054703s1.zip", "extension": "zip", "size": "25.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1p1cntpQSWc", "title": "Jan.", "year": "2021", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1bLyqDlC7cY", "doi": "10.1109/TVCG.2019.2928794", "abstract": "Recently, effort has been made to apply deep learning to the detection of mesh saliency. However, one major barrier is to collect a large amount of vertex-level annotation as saliency ground truth for training the neural networks. Quite a few pilot studies showed that this task is difficult. In this work, we solve this problem by developing a novel network trained in a weakly supervised manner. The training is end-to-end and does not require any saliency ground truth but only the class membership of meshes. Our Classification-for-Saliency CNN (CfS-CNN) employs a multi-view setup and contains a newly designed two-channel structure which integrates view-based features of both classification and saliency. It essentially transfers knowledge from 3D object classification to mesh saliency. Our approach significantly outperforms the existing state-of-the-art methods according to extensive experimental results. Also, the CfS-CNN can be directly used for scene saliency. We showcase two novel applications based on scene saliency to demonstrate its utility.", "abstracts": [ { "abstractType": "Regular", "content": "Recently, effort has been made to apply deep learning to the detection of mesh saliency. However, one major barrier is to collect a large amount of vertex-level annotation as saliency ground truth for training the neural networks. Quite a few pilot studies showed that this task is difficult. In this work, we solve this problem by developing a novel network trained in a weakly supervised manner. The training is end-to-end and does not require any saliency ground truth but only the class membership of meshes. Our Classification-for-Saliency CNN (CfS-CNN) employs a multi-view setup and contains a newly designed two-channel structure which integrates view-based features of both classification and saliency. It essentially transfers knowledge from 3D object classification to mesh saliency. Our approach significantly outperforms the existing state-of-the-art methods according to extensive experimental results. Also, the CfS-CNN can be directly used for scene saliency. We showcase two novel applications based on scene saliency to demonstrate its utility.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Recently, effort has been made to apply deep learning to the detection of mesh saliency. However, one major barrier is to collect a large amount of vertex-level annotation as saliency ground truth for training the neural networks. Quite a few pilot studies showed that this task is difficult. In this work, we solve this problem by developing a novel network trained in a weakly supervised manner. The training is end-to-end and does not require any saliency ground truth but only the class membership of meshes. Our Classification-for-Saliency CNN (CfS-CNN) employs a multi-view setup and contains a newly designed two-channel structure which integrates view-based features of both classification and saliency. It essentially transfers knowledge from 3D object classification to mesh saliency. Our approach significantly outperforms the existing state-of-the-art methods according to extensive experimental results. Also, the CfS-CNN can be directly used for scene saliency. We showcase two novel applications based on scene saliency to demonstrate its utility.", "title": "Mesh Saliency via Weakly Supervised Classification-for-Saliency CNN", "normalizedTitle": "Mesh Saliency via Weakly Supervised Classification-for-Saliency CNN", "fno": "08765747", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Convolutional Neural Nets", "Feature Extraction", "Image Classification", "Mesh Generation", "Stereo Image Processing", "Supervised Learning", "Mesh Saliency Detection", "Cf S CNN", "3 D Object Classification", "Deep Learning", "Vertex Level Annotation", "Neural Network Training", "Weakly Supervised Classification For Saliency CNN", "Weakly Supervised Training", "End To End Training", "View Based Features", "Convolutional Neural Network", "Knowledge Transfer", "Three Dimensional Displays", "Training", "Two Dimensional Displays", "Neural Networks", "Deep Learning", "Task Analysis", "Solid Modeling", "Mesh Saliency", "Deep Learning", "Transfer Learning", "Weak Supervision" ], "authors": [ { "givenName": "Ran", "surname": "Song", "fullName": "Ran Song", "affiliation": "Centre for Secure, Intelligent and Usable Systems, School of Computing, Engineering and Mathematics, University of Brighton, Brighton, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Yonghuai", "surname": "Liu", "fullName": "Yonghuai Liu", "affiliation": "Department of Computer Science, Edge Hill University, Ormskirk, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Paul L.", "surname": "Rosin", "fullName": "Paul L. Rosin", "affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2021-01-01 00:00:00", "pubType": "trans", "pages": "151-164", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2017/6067/0/08019343", "title": "Saliency-guided video classification via adaptively weighted learning", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019343/12OmNvStcJC", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b707", "title": "Learning Gaze Transitions from Depth to Improve Video Saliency Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b707/12OmNwwMf0f", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000b420", "title": "Cube Padding for Weakly-Supervised Saliency Prediction in 360° Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000b420/17D45WB0qcO", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iseeie/2022/6874/0/687400a097", "title": "Weakly-Supervised Audiovisual Network For Video Saliency Estimation", "doi": null, "abstractUrl": "/proceedings-article/iseeie/2022/687400a097/1FWmJtXCMpi", "parentPublication": { "id": "proceedings/iseeie/2022/6874/0", "title": "2022 International Symposium on Electrical, Electronics and Information Engineering (ISEEIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956579", "title": "Learning to Predict 3D Mesh Saliency", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956579/1IHpMRBlt04", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300i837", "title": "Deep Learning for Light Field Saliency Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300i837/1hQqx7hSwEg", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j784", "title": "Mesh R-CNN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j784/1hVlAhHbyIo", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102867", "title": "Ransp: Ranking Attention Network For Saliency Prediction On Omnidirectional Images", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102867/1kwrafKJPYk", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102796", "title": "Mesh Saliency Detection Using Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102796/1kwrdkB8P3G", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i849", "title": "Mesh Saliency: An Independent Perceptual Measure or A Derivative of Image Saliency?", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i849/1yeK3a8my4M", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08812922", "articleId": "1cPXcWXVlHq", "__typename": "AdjacentArticleType" }, "next": { "fno": "08809840", "articleId": "1cHE3iFCYpy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1p1d9FSt4R2", "name": "ttg202101-08765747s1-sup.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202101-08765747s1-sup.pdf", "extension": "pdf", "size": "16.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvjgWIL", "title": "June", "year": "2014", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyYSWsY", "doi": "10.1109/TVCG.2014.2298016", "abstract": "While image inpainting has recently become widely available in image manipulation tools, existing approaches to video inpainting typically do not even achieve interactive frame rates yet as they are highly computationally expensive. Further, they either apply severe restrictions on the movement of the camera or do not provide a high-quality coherent video stream. In this paper we will present our approach to high-quality real-time capable image and video inpainting. Our PixMix approach even allows for the manipulation of live video streams, providing the basis for real Diminished Reality (DR) applications. We will show how our approach generates coherent video streams dealing with quite heterogeneous background environments and non-trivial camera movements, even applying constraints in real-time.", "abstracts": [ { "abstractType": "Regular", "content": "While image inpainting has recently become widely available in image manipulation tools, existing approaches to video inpainting typically do not even achieve interactive frame rates yet as they are highly computationally expensive. Further, they either apply severe restrictions on the movement of the camera or do not provide a high-quality coherent video stream. In this paper we will present our approach to high-quality real-time capable image and video inpainting. Our PixMix approach even allows for the manipulation of live video streams, providing the basis for real Diminished Reality (DR) applications. We will show how our approach generates coherent video streams dealing with quite heterogeneous background environments and non-trivial camera movements, even applying constraints in real-time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "While image inpainting has recently become widely available in image manipulation tools, existing approaches to video inpainting typically do not even achieve interactive frame rates yet as they are highly computationally expensive. Further, they either apply severe restrictions on the movement of the camera or do not provide a high-quality coherent video stream. In this paper we will present our approach to high-quality real-time capable image and video inpainting. Our PixMix approach even allows for the manipulation of live video streams, providing the basis for real Diminished Reality (DR) applications. We will show how our approach generates coherent video streams dealing with quite heterogeneous background environments and non-trivial camera movements, even applying constraints in real-time.", "title": "High-Quality Real-Time Video Inpaintingwith PixMix", "normalizedTitle": "High-Quality Real-Time Video Inpaintingwith PixMix", "fno": "06714519", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Streaming Media", "Real Time Systems", "Visualization", "Cameras", "Coherence", "Cost Function", "Image Resolution", "Object Removal", "Video Inpainting", "Diminished Reality", "Real Time", "Image Inpainting", "Image Completion" ], "authors": [ { "givenName": "Jan", "surname": "Herling", "fullName": "Jan Herling", "affiliation": "fayteq GmbH, Erfurt,", "__typename": "ArticleAuthorType" }, { "givenName": "Wolfgang", "surname": "Broll", "fullName": "Wolfgang Broll", "affiliation": "Virtual Worlds and Digtial Games group, Ilmenau University of Technology, Ilmenau, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2014-06-01 00:00:00", "pubType": "trans", "pages": "866-879", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/08099917", "title": "High-Resolution Image Inpainting Using Multi-scale Neural Patch Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/08099917/12OmNBqv2py", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2011/707/0/05753126", "title": "Multiscale ultrawide foveated video extrapolation", "doi": null, "abstractUrl": "/proceedings-article/iccp/2011/05753126/12OmNwpGgHw", "parentPublication": { "id": "proceedings/iccp/2011/707/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2005/2489/0/24890802", "title": "A Global-Time-Based Approach for High-Quality Real-Time Video Streaming Services", "doi": null, "abstractUrl": "/proceedings-article/ism/2005/24890802/12OmNx9FhR0", "parentPublication": { "id": "proceedings/ism/2005/2489/0", "title": "Seventh IEEE International Symposium on Multimedia (ISM'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402551", "title": "PixMix: A real-time approach to high-quality Diminished Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402551/12OmNzvQI6W", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/06/07920337", "title": "Real-Time Video Stylization Using Object Flows", "doi": null, "abstractUrl": "/journal/tg/2018/06/07920337/13rRUxC0SEl", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2017/3220/1/08005816", "title": "Component-Based Distributed Framework for Coherent and Real-Time Video Dehazing", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2017/08005816/17D45WrVg4f", "parentPublication": { "id": "proceedings/cse-euc/2017/3220/1", "title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08587214", "title": "Inpainting of Wide-Baseline Multiple Viewpoint Video", "doi": null, "abstractUrl": "/journal/tg/2020/07/08587214/17D45XwUAIi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a782", "title": "Neural Video Portrait Relighting in Real-time via Consistency Modeling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a782/1BmEMHlM0Du", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4448", "title": "Occlusion-Aware Video Object Inpainting", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4448/1BmJrj3Gicg", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600d501", "title": "DLFormer: Discrete Latent Transformer for Video Inpainting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600d501/1H1i2uqCsXm", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06671918", "articleId": "13rRUxOdD2E", "__typename": "AdjacentArticleType" }, "next": { "fno": "06767158", "articleId": "13rRUxYrbMh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxwENE7", "title": "May", "year": "2020", "issueNum": "05", "idPrefix": "tp", "pubType": "journal", "volume": "42", "label": "May", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1fFWEDjNHPO", "doi": "10.1109/TPAMI.2019.2958083", "abstract": "Video inpainting aims to fill in spatio-temporal holes in videos with plausible content. Despite tremendous progress on deep learning-based inpainting of a single image, it is still challenging to extend these methods to video domain due to the additional time dimension. In this paper, we propose a recurrent temporal aggregation framework for fast deep video inpainting. In particular, we construct an encoder-decoder model, where the encoder takes multiple reference frames which can provide visible pixels revealed from the scene dynamics. These hints are aggregated and fed into the decoder. We apply a recurrent feedback in an auto-regressive manner to enforce temporal consistency in the video results. We propose two architectural designs based on this framework. Our first model is a blind video decaptioning network (BVDNet) that is designed to automatically remove and inpaint text overlays in videos without any mask information. Our BVDNet wins the first place in the ECCV Chalearn 2018 LAP Inpainting Competition Track 2: Video Decaptioning. Second, we propose a network for more general video inpainting (VINet) to deal with more arbitrary and larger holes. Video results demonstrate the advantage of our framework compared to state-of-the-art methods both qualitatively and quantitatively. The codes are available at https://github.com/mcahny/Deep-Video-Inpainting, and https://github.com/shwoo93/video_decaptioning.", "abstracts": [ { "abstractType": "Regular", "content": "Video inpainting aims to fill in spatio-temporal holes in videos with plausible content. Despite tremendous progress on deep learning-based inpainting of a single image, it is still challenging to extend these methods to video domain due to the additional time dimension. In this paper, we propose a recurrent temporal aggregation framework for fast deep video inpainting. In particular, we construct an encoder-decoder model, where the encoder takes multiple reference frames which can provide visible pixels revealed from the scene dynamics. These hints are aggregated and fed into the decoder. We apply a recurrent feedback in an auto-regressive manner to enforce temporal consistency in the video results. We propose two architectural designs based on this framework. Our first model is a blind video decaptioning network (BVDNet) that is designed to automatically remove and inpaint text overlays in videos without any mask information. Our BVDNet wins the first place in the ECCV Chalearn 2018 LAP Inpainting Competition Track 2: Video Decaptioning. Second, we propose a network for more general video inpainting (VINet) to deal with more arbitrary and larger holes. Video results demonstrate the advantage of our framework compared to state-of-the-art methods both qualitatively and quantitatively. The codes are available at https://github.com/mcahny/Deep-Video-Inpainting, and https://github.com/shwoo93/video_decaptioning.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Video inpainting aims to fill in spatio-temporal holes in videos with plausible content. Despite tremendous progress on deep learning-based inpainting of a single image, it is still challenging to extend these methods to video domain due to the additional time dimension. In this paper, we propose a recurrent temporal aggregation framework for fast deep video inpainting. In particular, we construct an encoder-decoder model, where the encoder takes multiple reference frames which can provide visible pixels revealed from the scene dynamics. These hints are aggregated and fed into the decoder. We apply a recurrent feedback in an auto-regressive manner to enforce temporal consistency in the video results. We propose two architectural designs based on this framework. Our first model is a blind video decaptioning network (BVDNet) that is designed to automatically remove and inpaint text overlays in videos without any mask information. Our BVDNet wins the first place in the ECCV Chalearn 2018 LAP Inpainting Competition Track 2: Video Decaptioning. Second, we propose a network for more general video inpainting (VINet) to deal with more arbitrary and larger holes. Video results demonstrate the advantage of our framework compared to state-of-the-art methods both qualitatively and quantitatively. The codes are available at https://github.com/mcahny/Deep-Video-Inpainting, and https://github.com/shwoo93/video_decaptioning.", "title": "Recurrent Temporal Aggregation Framework for Deep Video Inpainting", "normalizedTitle": "Recurrent Temporal Aggregation Framework for Deep Video Inpainting", "fno": "08931251", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Autoregressive Processes", "Image Motion Analysis", "Image Sequences", "Learning Artificial Intelligence", "Video Signal Processing", "Video Results", "Recurrent Temporal Aggregation Framework", "Spatio Temporal Holes", "Deep Learning Based Inpainting", "Video Domain", "Additional Time Dimension", "Fast Deep Video Inpainting", "Encoder Decoder Model", "Recurrent Feedback", "Temporal Consistency", "Blind Video Decaptioning Network", "General Video Inpainting", "ECCV Chalearn 2018 LAP Inpainting Competition Track 2", "Streaming Media", "Task Analysis", "Cameras", "Three Dimensional Displays", "Semantics", "Decoding", "Image Restoration", "Video Inpainting", "Video Completion", "Video Object Removal", "Video Caption Removal", "Video Decaptioning", "Video Editing" ], "authors": [ { "givenName": "Dahun", "surname": "Kim", "fullName": "Dahun Kim", "affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Yuseong-gu, Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Sanghyun", "surname": "Woo", "fullName": "Sanghyun Woo", "affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Yuseong-gu, Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Joon-Young", "surname": "Lee", "fullName": "Joon-Young Lee", "affiliation": "Adobe Research, San Jose, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "In So", "surname": "Kweon", "fullName": "In So Kweon", "affiliation": "Korea Advanced Institute of Science and Technology (KAIST), Yuseong-gu, Daejeon, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2020-05-01 00:00:00", "pubType": "trans", "pages": "1038-1052", "year": "2020", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2008/2570/0/04607559", "title": "Video coding with spatio-temporal texture synthesis and edge-based inpainting", "doi": null, "abstractUrl": "/proceedings-article/icme/2008/04607559/12OmNvT2p7M", "parentPublication": { "id": "proceedings/icme/2008/2570/0", "title": "2008 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08587214", "title": "Inpainting of Wide-Baseline Multiple Viewpoint Video", "doi": null, "abstractUrl": "/journal/tg/2020/07/08587214/17D45XwUAIi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/06/08642935", "title": "A Disocclusion Inpainting Framework for Depth-Based View Synthesis", "doi": null, "abstractUrl": "/journal/tp/2020/06/08642935/17PYElAbxtK", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200i168", "title": "Frequency-Aware Spatiotemporal Transformers for Video Inpainting Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200i168/1BmKJkcuULK", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300d718", "title": "Deep Flow-Guided Video Inpainting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300d718/1gyrEiPH57y", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300f785", "title": "Deep Video Inpainting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300f785/1gyrROSF1Pa", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/08954445", "title": "Deep Blind Video Decaptioning by Temporal Aggregation and Recurrence", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/08954445/1gyscPuax1e", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300e412", "title": "Copy-and-Paste Networks for Deep Video Inpainting", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300e412/1hQqrjCYko0", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300j065", "title": "Free-Form Video Inpainting With 3D Gated Convolution and Temporal PatchGAN", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300j065/1hVlGyKqw2Q", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a283", "title": "Vision-Infused Deep Audio Inpainting", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a283/1hVlUf6UINO", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08941241", "articleId": "1g1FHj1eclW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08892406", "articleId": "1eJR40lgVfG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1iHzKM5PHLW", "name": "ttp202005-08931251s1-supp1-2958083.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202005-08931251s1-supp1-2958083.mp4", "extension": "mp4", "size": "34.8 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNqzu6X2", "title": "Apr.-Jun.", "year": "2018", "issueNum": "02", "idPrefix": "mu", "pubType": "magazine", "volume": "25", "label": "Apr.-Jun.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASumS", "doi": "10.1109/MMUL.2018.023121161", "abstract": "With the recent resurgence of neural networks and the proliferation of massive amounts of unlabeled multimodal data, recommendation systems and multimodal retrieval systems based on continuous representation spaces and deep learning methods are becoming of great interest. Multimodal representations are typically obtained with autoencoders that reconstruct multimodal data. In this article, we describe an alternative method to perform high-level multimodal fusion that leverages crossmodal translation by means of symmetrical encoders cast into a bidirectional deep neural network (BiDNN). Using the lessons learned from multimodal retrieval, we present a BiDNN-based system that performs video hyperlinking and recommends interesting video segments to a viewer. Results established using TRECVIDs 2016 video hyperlinking benchmarking initiative show that our method obtained the best score, thus defining the state of the art.", "abstracts": [ { "abstractType": "Regular", "content": "With the recent resurgence of neural networks and the proliferation of massive amounts of unlabeled multimodal data, recommendation systems and multimodal retrieval systems based on continuous representation spaces and deep learning methods are becoming of great interest. Multimodal representations are typically obtained with autoencoders that reconstruct multimodal data. In this article, we describe an alternative method to perform high-level multimodal fusion that leverages crossmodal translation by means of symmetrical encoders cast into a bidirectional deep neural network (BiDNN). Using the lessons learned from multimodal retrieval, we present a BiDNN-based system that performs video hyperlinking and recommends interesting video segments to a viewer. Results established using TRECVIDs 2016 video hyperlinking benchmarking initiative show that our method obtained the best score, thus defining the state of the art.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With the recent resurgence of neural networks and the proliferation of massive amounts of unlabeled multimodal data, recommendation systems and multimodal retrieval systems based on continuous representation spaces and deep learning methods are becoming of great interest. Multimodal representations are typically obtained with autoencoders that reconstruct multimodal data. In this article, we describe an alternative method to perform high-level multimodal fusion that leverages crossmodal translation by means of symmetrical encoders cast into a bidirectional deep neural network (BiDNN). Using the lessons learned from multimodal retrieval, we present a BiDNN-based system that performs video hyperlinking and recommends interesting video segments to a viewer. Results established using TRECVIDs 2016 video hyperlinking benchmarking initiative show that our method obtained the best score, thus defining the state of the art.", "title": "A Crossmodal Approach to Multimodal Fusion in Video Hyperlinking", "normalizedTitle": "A Crossmodal Approach to Multimodal Fusion in Video Hyperlinking", "fno": "mmu2018020011", "hasPdf": true, "idPrefix": "mu", "keywords": [ "Image Retrieval", "Learning Artificial Intelligence", "Multimedia Computing", "Neural Nets", "Recommender Systems", "High Level Multimodal Fusion", "Leverages Crossmodal Translation", "Symmetrical Encoders", "Bidirectional Deep Neural Network", "Bi DNN Based System", "Interesting Video Segments", "TRECVI Ds 2016 Video Hyperlinking Benchmarking Initiative Show", "Crossmodal Approach", "Recent Resurgence", "Neural Networks", "Unlabeled Multimodal Data", "Recommendation Systems", "Multimodal Retrieval Systems", "Continuous Representation Spaces", "Deep Learning Methods", "Multimodal Representations", "Computer Architecture", "Task Analysis", "Neural Networks", "Visualization", "Streaming Media", "Hypertext Systems", "Training", "Crossmodal", "Multimodal Fusion", "Video Hyperlinking", "Multimodal Autoencoders", "Multimodal Retrieval", "Neural Networks", "Deep Learning", "Unsupervised Representation Learning", "Video Retrieval", "Multimedia" ], "authors": [ { "givenName": "Vedran", "surname": "Vukotić", "fullName": "Vedran Vukotić", "affiliation": "INRIA/IRISA Rennes and INSA Rennes", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Raymond", "fullName": "Christian Raymond", "affiliation": "INRIA/IRISA Rennes and INSA Rennes", "__typename": "ArticleAuthorType" }, { "givenName": "Guillaume", "surname": "Gravier", "fullName": "Guillaume Gravier", "affiliation": "INRIA/IRISA Rennes and CNRS", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-04-01 00:00:00", "pubType": "mags", "pages": "11-23", "year": "2018", "issn": "1070-986X", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2007/1016/0/04284946", "title": "Dynamic Multimodal Fusion in Video Search", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284946/12OmNCmpcS0", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06012242", "title": "Just-in-time multimodal association and fusion from home entertainment", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06012242/12OmNqBKTMh", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/08237712", "title": "Attention-Based Multimodal Fusion for Video Description", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/08237712/12OmNvFHfIH", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscsct/2008/3498/2/3498b489", "title": "A Novel Video Searching Model Based on Ontology Inference and Multimodal Information Fusion", "doi": null, "abstractUrl": "/proceedings-article/iscsct/2008/3498b489/12OmNy2rS9g", "parentPublication": { "id": "proceedings/iscsct/2008/3498/1", "title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08585103", "title": "Multimodal Analysis of Video Collections: Visual Exploration of Presentation Techniques in TED Talks", "doi": null, "abstractUrl": "/journal/tg/2020/07/08585103/17D45WUj91g", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdsba/2021/4590/0/459000a131", "title": "Design and Implementation of Multimodal Video Retrieval System", "doi": null, "abstractUrl": "/proceedings-article/icdsba/2021/459000a131/1AH7GyPxIPe", "parentPublication": { "id": "proceedings/icdsba/2021/4590/0", "title": "2021 5th Annual International Conference on Data Science and Business Analytics (ICDSBA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200i128", "title": "Attention is not Enough: Mitigating the Distribution Discrepancy in Asynchronous Multimodal Sequence Fusion", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200i128/1BmEWHsMrC0", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2022/5963/0/596300a079", "title": "Multi-turn Query with Similarity Feedback Facilitates Multimodal Video Clip Retrieval", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2022/596300a079/1JvaNKdifVS", "parentPublication": { "id": "proceedings/bigmm/2022/5963/0", "title": "2022 IEEE Eighth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2021/03/08762142", "title": "EmoBed: Strengthening Monomodal Emotion Recognition via Training with Crossmodal Emotion Embeddings", "doi": null, "abstractUrl": "/journal/ta/2021/03/08762142/1bIevePSL7i", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300b868", "title": "Fusion of Multimodal Embeddings for Ad-Hoc Video Search", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300b868/1i5mBTIT5qU", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mmu2018020007", "articleId": "13rRUwjGoIk", "__typename": "AdjacentArticleType" }, "next": { "fno": "mmu2018020024", "articleId": "13rRUy08MCa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxEjY42", "title": "Feb.", "year": "2019", "issueNum": "02", "idPrefix": "tp", "pubType": "journal", "volume": "41", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WZZ7CG", "doi": "10.1109/TPAMI.2018.2798607", "abstract": "Our experience of the world is multimodal - we see objects, hear sounds, feel texture, smell odors, and taste flavors. <italic>Modality</italic> refers to the way in which something happens or is experienced and a research problem is characterized as <italic>multimodal</italic> when it includes multiple such modalities. In order for Artificial Intelligence to make progress in understanding the world around us, it needs to be able to interpret such multimodal signals together. <italic>Multimodal machine learning</italic> aims to build models that can process and relate information from multiple modalities. It is a vibrant multi-disciplinary field of increasing importance and with extraordinary potential. Instead of focusing on specific multimodal applications, this paper surveys the recent advances in multimodal machine learning itself and presents them in a common taxonomy. We go beyond the typical early and late fusion categorization and identify broader challenges that are faced by multimodal machine learning, namely: representation, translation, alignment, fusion, and co-learning. This new taxonomy will enable researchers to better understand the state of the field and identify directions for future research.", "abstracts": [ { "abstractType": "Regular", "content": "Our experience of the world is multimodal - we see objects, hear sounds, feel texture, smell odors, and taste flavors. <italic>Modality</italic> refers to the way in which something happens or is experienced and a research problem is characterized as <italic>multimodal</italic> when it includes multiple such modalities. In order for Artificial Intelligence to make progress in understanding the world around us, it needs to be able to interpret such multimodal signals together. <italic>Multimodal machine learning</italic> aims to build models that can process and relate information from multiple modalities. It is a vibrant multi-disciplinary field of increasing importance and with extraordinary potential. Instead of focusing on specific multimodal applications, this paper surveys the recent advances in multimodal machine learning itself and presents them in a common taxonomy. We go beyond the typical early and late fusion categorization and identify broader challenges that are faced by multimodal machine learning, namely: representation, translation, alignment, fusion, and co-learning. This new taxonomy will enable researchers to better understand the state of the field and identify directions for future research.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Our experience of the world is multimodal - we see objects, hear sounds, feel texture, smell odors, and taste flavors. Modality refers to the way in which something happens or is experienced and a research problem is characterized as multimodal when it includes multiple such modalities. In order for Artificial Intelligence to make progress in understanding the world around us, it needs to be able to interpret such multimodal signals together. Multimodal machine learning aims to build models that can process and relate information from multiple modalities. It is a vibrant multi-disciplinary field of increasing importance and with extraordinary potential. Instead of focusing on specific multimodal applications, this paper surveys the recent advances in multimodal machine learning itself and presents them in a common taxonomy. We go beyond the typical early and late fusion categorization and identify broader challenges that are faced by multimodal machine learning, namely: representation, translation, alignment, fusion, and co-learning. This new taxonomy will enable researchers to better understand the state of the field and identify directions for future research.", "title": "Multimodal Machine Learning: A Survey and Taxonomy", "normalizedTitle": "Multimodal Machine Learning: A Survey and Taxonomy", "fno": "08269806", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Speech Recognition", "Visualization", "Media", "Speech", "Multimedia Communication", "Streaming Media", "Hidden Markov Models", "Multimodal", "Machine Learning", "Introductory", "Survey" ], "authors": [ { "givenName": "Tadas", "surname": "Baltrušaitis", "fullName": "Tadas Baltrušaitis", "affiliation": "Microsoft Corporation, Cambridge, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Chaitanya", "surname": "Ahuja", "fullName": "Chaitanya Ahuja", "affiliation": "Carnegie Mellon University, Pittsburgh, PA", "__typename": "ArticleAuthorType" }, { "givenName": "Louis-Philippe", "surname": "Morency", "fullName": "Louis-Philippe Morency", "affiliation": "Carnegie Mellon University, Pittsburgh, PA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2019-02-01 00:00:00", "pubType": "trans", "pages": "423-443", "year": "2019", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ispaw/2011/4429/0/05951970", "title": "A System Proposal for Multimodal Retrieval of Multimedia Documents", "doi": null, "abstractUrl": "/proceedings-article/ispaw/2011/05951970/12OmNC3Xhy5", "parentPublication": { "id": "proceedings/ispaw/2011/4429/0", "title": "Parallel and Distributed Processing with Applications Workshops, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851d574", "title": "Temporal Multimodal Learning in Audiovisual Speech Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851d574/12OmNqGiu9C", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2017/4822/0/07926723", "title": "Deep Spatio-Temporal Features for Multimodal Emotion Recognition", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926723/12OmNrkT7Fk", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a172", "title": "Probabilistic Ensemble Fusion for Multimodal Word Sense Disambiguation", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a172/12OmNvA1h9c", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmip/2017/5954/0/5954a309", "title": "Framing Foundational Taxonomy of Multimedia Tangibility in Education Setting for Children", "doi": null, "abstractUrl": "/proceedings-article/icmip/2017/5954a309/12OmNyQ7FUY", "parentPublication": { "id": "proceedings/icmip/2017/5954/0", "title": "2017 2nd International Conference on Multimedia and Image Processing (ICMIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ftdcs/1993/4430/0/00344170", "title": "A taxonomy on multimedia synchronization", "doi": null, "abstractUrl": "/proceedings-article/ftdcs/1993/00344170/12OmNyTOslv", "parentPublication": { "id": "proceedings/ftdcs/1993/4430/0", "title": "1993 4th Workshop on Future Trends of Distributed Computing Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457c925", "title": "Hierarchical Multimodal Metric Learning for Multimodal Classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457c925/12OmNyo1o3D", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iotdi/2022/9641/0/964100a043", "title": "Multimodal Federated Learning on IoT Data", "doi": null, "abstractUrl": "/proceedings-article/iotdi/2022/964100a043/1ErraMJTd2U", "parentPublication": { "id": "proceedings/iotdi/2022/9641/0", "title": "2022 IEEE/ACM Seventh International Conference on Internet-of-Things Design and Implementation (IoTDI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2023/7578/0/757800a235", "title": "MMTS: Multimodal Teacher-Student learning for One-Shot Human Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2023/757800a235/1LFLz8IdlNS", "parentPublication": { "id": "proceedings/bigcomp/2023/7578/0", "title": "2023 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/01/09429985", "title": "Informed Machine Learning &#x2013; A Taxonomy and Survey of Integrating Prior Knowledge into Learning Systems", "doi": null, "abstractUrl": "/journal/tk/2023/01/09429985/1txPlDYaUwg", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08263148", "articleId": "17D45W1Oa1R", "__typename": "AdjacentArticleType" }, "next": { "fno": "08246552", "articleId": "17D45VUZMW8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzICEFz", "title": "July-Sept.", "year": "2017", "issueNum": "03", "idPrefix": "th", "pubType": "journal", "volume": "10", "label": "July-Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwcS1D9", "doi": "10.1109/TOH.2016.2604305", "abstract": "This paper presents a method to generate a haptic illusion of compliance using a vibrotactile actuator when a tangential force is applied to a rigid surface. The novel method builds on a conceptual compliance model where a physical object moves on a textured surface in response to a tangential force. The method plays vibration patterns simulating friction-induced vibrations as an applied tangential force changes. We built a prototype consisting of a two-dimensional tangential force sensor and a surface transducer to test the effectiveness of the model. Participants in user experiments with the prototype perceived the rigid surface of the prototype as a moving, rubber-like plate. The main findings of the experiments are: 1) the perceived stiffness of a simulated material can be controlled by controlling the force-playback transfer function, 2) its perceptual properties such as softness and pleasantness can be controlled by changing friction grain parameters, and 3) the use of the vibrotactile compliance feedback reduces participants’ workload including physical demand and frustration while performing a force repetition task.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a method to generate a haptic illusion of compliance using a vibrotactile actuator when a tangential force is applied to a rigid surface. The novel method builds on a conceptual compliance model where a physical object moves on a textured surface in response to a tangential force. The method plays vibration patterns simulating friction-induced vibrations as an applied tangential force changes. We built a prototype consisting of a two-dimensional tangential force sensor and a surface transducer to test the effectiveness of the model. Participants in user experiments with the prototype perceived the rigid surface of the prototype as a moving, rubber-like plate. The main findings of the experiments are: 1) the perceived stiffness of a simulated material can be controlled by controlling the force-playback transfer function, 2) its perceptual properties such as softness and pleasantness can be controlled by changing friction grain parameters, and 3) the use of the vibrotactile compliance feedback reduces participants’ workload including physical demand and frustration while performing a force repetition task.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a method to generate a haptic illusion of compliance using a vibrotactile actuator when a tangential force is applied to a rigid surface. The novel method builds on a conceptual compliance model where a physical object moves on a textured surface in response to a tangential force. The method plays vibration patterns simulating friction-induced vibrations as an applied tangential force changes. We built a prototype consisting of a two-dimensional tangential force sensor and a surface transducer to test the effectiveness of the model. Participants in user experiments with the prototype perceived the rigid surface of the prototype as a moving, rubber-like plate. The main findings of the experiments are: 1) the perceived stiffness of a simulated material can be controlled by controlling the force-playback transfer function, 2) its perceptual properties such as softness and pleasantness can be controlled by changing friction grain parameters, and 3) the use of the vibrotactile compliance feedback reduces participants’ workload including physical demand and frustration while performing a force repetition task.", "title": "Vibrotactile Compliance Feedback for Tangential Force Interaction", "normalizedTitle": "Vibrotactile Compliance Feedback for Tangential Force Interaction", "fno": "07556272", "hasPdf": true, "idPrefix": "th", "keywords": [ "Force", "Vibrations", "Haptic Interfaces", "Prototypes", "Friction", "Springs", "Force Measurement", "Haptic I O", "Input Devices And Strategies", "Tactile Displays" ], "authors": [ { "givenName": "Seongkook", "surname": "Heo", "fullName": "Seongkook Heo", "affiliation": "School of Computing, KAIST, Daejeon, South Korea", "__typename": "ArticleAuthorType" }, { "givenName": "Geehyuk", "surname": "Lee", "fullName": "Geehyuk Lee", "affiliation": "School of Computing, KAIST, Daejeon, South Korea", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2017-07-01 00:00:00", "pubType": "trans", "pages": "444-455", "year": "2017", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/haptics/2010/6821/0/05444652", "title": "Virtual object manipulation system with substitutive display of tangential force and slip by control of vibrotactile phantom sensation", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444652/12OmNAFWOO7", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892274", "title": "Mechanism of integrating force and vibrotactile cues for 3D user interaction within virtual environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892274/12OmNqH9hid", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2011/4467/0/4467a062", "title": "Comparison of Force and Vibrotactile Feedback with Direct Stimulation for Texture Recognition", "doi": null, "abstractUrl": "/proceedings-article/cw/2011/4467a062/12OmNy9PriF", "parentPublication": { "id": "proceedings/cw/2011/4467/0", "title": "2011 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2008/2005/0/04479920", "title": "Assessment of Vibrotactile Feedback in a Needle-Insertion Task using a Surgical Robot", "doi": null, "abstractUrl": "/proceedings-article/haptics/2008/04479920/12OmNyOq4T4", "parentPublication": { "id": "proceedings/haptics/2008/2005/0", "title": "IEEE Haptics Symposium 2008", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/03/tth2013030376", "title": "Effect of Dynamic Vibrotactile Feedback on the Control of Isometric Finger Force", "doi": null, "abstractUrl": "/journal/th/2013/03/tth2013030376/13rRUNvyatr", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/03/07452650", "title": "Non-Colocated Kinesthetic Display Limits Compliance Discrimination in the Absence of Terminal Force Cues", "doi": null, "abstractUrl": "/journal/th/2016/03/07452650/13rRUwjoNxb", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/01/tth2013010013", "title": "Comparison of Visual and Vibrotactile Feedback Methods for Seated Posture Guidance", "doi": null, "abstractUrl": "/journal/th/2013/01/tth2013010013/13rRUxcKzVp", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/01/07502124", "title": "Vibrotactile Sensitivity in Active Touch: Effect of Pressing Force", "doi": null, "abstractUrl": "/journal/th/2017/01/07502124/13rRUxjQyvy", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/02/07401066", "title": "Anticipatory Vibrotactile Cueing Facilitates Grip Force Adjustment during Perturbative Loading", "doi": null, "abstractUrl": "/journal/th/2016/02/07401066/13rRUyueghh", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798168", "title": "No Strings Attached: Force and Vibrotactile Feedback in a Virtual Guitar Simulation", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798168/1cJ1h486id2", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07784717", "articleId": "13rRUxASuAC", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqG0SRY", "title": "January-March", "year": "2011", "issueNum": "01", "idPrefix": "th", "pubType": "journal", "volume": "4", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUygBw7j", "doi": "10.1109/TOH.2010.42", "abstract": "This paper considers moments due to friction forces on the human fingertip. A computational technique called the friction moment arc method is presented. The method computes the static and/or dynamic friction moment independent of a friction force calculation. In addition, a new finger holder to display friction moment is presented. This device incorporates a small brushless motor and disk, and connects the human's finger to an interface finger of the five-fingered haptic interface robot HIRO II. Subjects' perception of friction moment while wearing the finger holder, as well as perceptions during object manipulation in a virtual reality environment, were evaluated experimentally.", "abstracts": [ { "abstractType": "Regular", "content": "This paper considers moments due to friction forces on the human fingertip. A computational technique called the friction moment arc method is presented. The method computes the static and/or dynamic friction moment independent of a friction force calculation. In addition, a new finger holder to display friction moment is presented. This device incorporates a small brushless motor and disk, and connects the human's finger to an interface finger of the five-fingered haptic interface robot HIRO II. Subjects' perception of friction moment while wearing the finger holder, as well as perceptions during object manipulation in a virtual reality environment, were evaluated experimentally.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper considers moments due to friction forces on the human fingertip. A computational technique called the friction moment arc method is presented. The method computes the static and/or dynamic friction moment independent of a friction force calculation. In addition, a new finger holder to display friction moment is presented. This device incorporates a small brushless motor and disk, and connects the human's finger to an interface finger of the five-fingered haptic interface robot HIRO II. Subjects' perception of friction moment while wearing the finger holder, as well as perceptions during object manipulation in a virtual reality environment, were evaluated experimentally.", "title": "Perception and Haptic Rendering of Friction Moments", "normalizedTitle": "Perception and Haptic Rendering of Friction Moments", "fno": "tth2011010028", "hasPdf": true, "idPrefix": "th", "keywords": [ "Friction", "Haptic Interfaces", "Force", "Fingers", "Rendering Computer Graphics", "Humans", "Dynamics", "Virtual Reality", "Haptic Interface", "Rendering", "Friction" ], "authors": [ { "givenName": "Haruhisa", "surname": "Kawasaki", "fullName": "Haruhisa Kawasaki", "affiliation": "Gifu University, Gifu", "__typename": "ArticleAuthorType" }, { "givenName": "Yoshio", "surname": "Ohtuka", "fullName": "Yoshio Ohtuka", "affiliation": "Gifu University, Gifu", "__typename": "ArticleAuthorType" }, { "givenName": "Shinya", "surname": "Koide", "fullName": "Shinya Koide", "affiliation": "Gifu University, Gifu", "__typename": "ArticleAuthorType" }, { "givenName": "Tetsuya", "surname": "Mouri", "fullName": "Tetsuya Mouri", "affiliation": "Gifu University, Gifu", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2011-01-01 00:00:00", "pubType": "trans", "pages": "28-38", "year": "2011", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/haptics/2010/6821/0/05444636", "title": "Friction measurements on a Large Area TPaD", "doi": null, "abstractUrl": "/proceedings-article/haptics/2010/05444636/12OmNyLiuuN", "parentPublication": { "id": "proceedings/haptics/2010/6821/0", "title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145163", "title": "Tactile Perception of Rotational Sliding", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145163/12OmNzd7bxr", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/02/07563434", "title": "Enhancing Variable Friction Tactile Display Using an Ultrasonic Travelling Wave", "doi": null, "abstractUrl": "/journal/th/2017/02/07563434/13rRUILtJr7", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2011/01/tth2011010014", "title": "Five-Fingered Haptic Interface Robot: HIRO III", "doi": null, "abstractUrl": "/journal/th/2011/01/tth2011010014/13rRUwghd9f", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/02/07858777", "title": "Friction Reduction through Ultrasonic Vibration Part 1: Modelling Intermittent Contact", "doi": null, "abstractUrl": "/journal/th/2017/02/07858777/13rRUxZRboa", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2009/04/tth2009040212", "title": "Fingerpad Skin Stretch Increases the Perception of Virtual Friction", "doi": null, "abstractUrl": "/journal/th/2009/04/tth2009040212/13rRUxly8T7", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/02/07102756", "title": "Physical and Perceptual Independence of Ultrasonic Vibration and Electrovibration for Friction Modulation", "doi": null, "abstractUrl": "/journal/th/2015/02/07102756/13rRUy3gn7G", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/05/09714041", "title": "FrictShoes: Providing Multilevel Nonuniform Friction Feedback on Shoes in VR", "doi": null, "abstractUrl": "/journal/tg/2022/05/09714041/1B0XXsRVUIM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2019/4765/0/476500a382", "title": "FrictionHaptics : Encountered-Type Haptic Device forTangential Friction Emulation", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a382/1gysjOSOaeA", "parentPublication": { "id": "proceedings/ismar-adjunct/2019/4765/0", "title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiea/2020/8288/0/828800a396", "title": "A Novel Friction Compensation Method Based on a Hybrid Model for Vision-Based Optoelectronic Platform", "doi": null, "abstractUrl": "/proceedings-article/aiea/2020/828800a396/1nTujnEcLOE", "parentPublication": { "id": "proceedings/aiea/2020/8288/0", "title": "2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "tth2011010014", "articleId": "13rRUwghd9f", "__typename": "AdjacentArticleType" }, "next": { "fno": "tth2011010039", "articleId": "13rRUwIF6le", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1MNboCLDDZC", "title": "June", "year": "2023", "issueNum": "06", "idPrefix": "tk", "pubType": "journal", "volume": "35", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1D4HcJ0VYNq", "doi": "10.1109/TKDE.2022.3172167", "abstract": "Traditional outlier detections are inadequate for high-dimensional data analysis due to the interference of distance tending to be concentrated (&#x201C;curse of dimensionality&#x201D;). Inspired by the Coulomb&#x2019;s law, we propose a new high-dimensional data similarity measure vector, which consists of outlier Coulomb force and outlier Coulomb resultant force. Outlier Coulomb force not only effectively gauges similarity measures among data objects, but also fully reflects differences among dimensions of data objects by vector projection in each dimension. More importantly, Coulomb resultant force can effectively measure deviations of data objects from a data center, making detection results interpretable. We introduce a new neighborhood outlier factor, which drives the development of a high-dimensional outlier detection algorithm. In our approach, attribute values with a high deviation degree is treated as interpretable information of outlier data. Finally, we implement and evaluate our algorithm using the UCI and synthetic datasets. Our experimental results show that the algorithm effectively alleviates the interference of &#x201C;Curse of Dimensionality&#x201D;. The findings confirm that high-dimensional outlier data originated by the algorithm are interpretable.", "abstracts": [ { "abstractType": "Regular", "content": "Traditional outlier detections are inadequate for high-dimensional data analysis due to the interference of distance tending to be concentrated (&#x201C;curse of dimensionality&#x201D;). Inspired by the Coulomb&#x2019;s law, we propose a new high-dimensional data similarity measure vector, which consists of outlier Coulomb force and outlier Coulomb resultant force. Outlier Coulomb force not only effectively gauges similarity measures among data objects, but also fully reflects differences among dimensions of data objects by vector projection in each dimension. More importantly, Coulomb resultant force can effectively measure deviations of data objects from a data center, making detection results interpretable. We introduce a new neighborhood outlier factor, which drives the development of a high-dimensional outlier detection algorithm. In our approach, attribute values with a high deviation degree is treated as interpretable information of outlier data. Finally, we implement and evaluate our algorithm using the UCI and synthetic datasets. Our experimental results show that the algorithm effectively alleviates the interference of &#x201C;Curse of Dimensionality&#x201D;. The findings confirm that high-dimensional outlier data originated by the algorithm are interpretable.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Traditional outlier detections are inadequate for high-dimensional data analysis due to the interference of distance tending to be concentrated (“curse of dimensionality”). Inspired by the Coulomb’s law, we propose a new high-dimensional data similarity measure vector, which consists of outlier Coulomb force and outlier Coulomb resultant force. Outlier Coulomb force not only effectively gauges similarity measures among data objects, but also fully reflects differences among dimensions of data objects by vector projection in each dimension. More importantly, Coulomb resultant force can effectively measure deviations of data objects from a data center, making detection results interpretable. We introduce a new neighborhood outlier factor, which drives the development of a high-dimensional outlier detection algorithm. In our approach, attribute values with a high deviation degree is treated as interpretable information of outlier data. Finally, we implement and evaluate our algorithm using the UCI and synthetic datasets. Our experimental results show that the algorithm effectively alleviates the interference of “Curse of Dimensionality”. The findings confirm that high-dimensional outlier data originated by the algorithm are interpretable.", "title": "A High-Dimensional Outlier Detection Approach Based on Local Coulomb Force", "normalizedTitle": "A High-Dimensional Outlier Detection Approach Based on Local Coulomb Force", "fno": "09766449", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Force", "Anomaly Detection", "Task Analysis", "Interference", "Force Measurement", "Indexes", "Euclidean Distance", "High Dimensional Outlier Detection", "Similarity Metric", "Outlier Coulomb Resultant Force", "Local Outlier Coulomb Force", "Neighborhood Outlier Factor" ], "authors": [ { "givenName": "Pengyun", "surname": "Zhu", "fullName": "Pengyun Zhu", "affiliation": "School of Computer Science and Technology, Taiyuan University of Science and Technology (TYUST), Taiyuan, Shanxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chaowei", "surname": "Zhang", "fullName": "Chaowei Zhang", "affiliation": "Department of Computer Science, Yangzhou University, Yangzhou, Jiangsu, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaofeng", "surname": "Li", "fullName": "Xiaofeng Li", "affiliation": "School of Computer Science and Technology, Taiyuan University of Science and Technology (TYUST), Taiyuan, Shanxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jifu", "surname": "Zhang", "fullName": "Jifu Zhang", "affiliation": "School of Computer Science and Technology, Taiyuan University of Science and Technology (TYUST), Taiyuan, Shanxi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiao", "surname": "Qin", "fullName": "Xiao Qin", "affiliation": "Department of Computer Science and Software Engineering, Samuel Ginn College of Engineering, Auburn University, Auburn, AL, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2023-06-01 00:00:00", "pubType": "trans", "pages": "5506-5520", "year": "2023", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2016/9005/0/07840642", "title": "In pursuit of outliers in multi-dimensional data streams", "doi": null, "abstractUrl": "/proceedings-article/big-data/2016/07840642/12OmNyqzM3K", "parentPublication": { "id": "proceedings/big-data/2016/9005/0", "title": "2016 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2012/4905/0/4905z030", "title": "Tutorial i: Outlier detection in high dimensional data", "doi": null, "abstractUrl": "/proceedings-article/icdm/2012/4905z030/12OmNyuPLek", "parentPublication": { "id": "proceedings/icdm/2012/4905/0", "title": "2012 IEEE 12th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2015/05/06948273", "title": "Reverse Nearest Neighbors in Unsupervised Distance-Based Outlier Detection", "doi": null, "abstractUrl": "/journal/tk/2015/05/06948273/13rRUwbs21m", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08257998", "title": "Distributed Top-N local outlier detection in big data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08257998/17D45W9KVHV", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08606273", "title": "Penalty Force for Coupling Materials with Coulomb Friction", "doi": null, "abstractUrl": "/journal/tg/2020/07/08606273/17D45WB0qbq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328509", "title": "A High-Dimensional Outlier Detection Algorithm Base on Relevant Subspace", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328509/17D45WYQJ7Q", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671744", "title": "VAGA: Towards Accurate and Interpretable Outlier Detection Based on Variational Auto-Encoder and Genetic Algorithm for High-Dimensional Data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671744/1A8gwiQi5ag", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b118", "title": "COPOD: Copula-Based Outlier Detection", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b118/1r54CapFGjS", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2020/6251/0/09378325", "title": "Autoencoder-based outlier detection for sparse, high dimensional data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2020/09378325/1s64on8cPZK", "parentPublication": { "id": "proceedings/big-data/2020/6251/0", "title": "2020 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nana/2021/4158/0/415800a236", "title": "A Method for Fast Outlier Detection in High Dimensional Database Log", "doi": null, "abstractUrl": "/proceedings-article/nana/2021/415800a236/1zdPHS0ifMQ", "parentPublication": { "id": "proceedings/nana/2021/4158/0", "title": "2021 International Conference on Networking and Network Applications (NaNA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09740516", "articleId": "1BYEaPSh7sA", "__typename": "AdjacentArticleType" }, "next": { "fno": "09772394", "articleId": "1Dgjwh58Ksg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCwUmsX", "title": "May-June", "year": "2013", "issueNum": "03", "idPrefix": "cg", "pubType": "magazine", "volume": "33", "label": "May-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUygT7cK", "doi": "10.1109/MCG.2013.17", "abstract": "The proposed screen-space algorithm approximates light scattering in homogeneous participating environments, such as water. Instead of simulating full global illumination, this method models scattering by a physically based point spread function. A discrete hierarchical convolution in a texture MIP map makes the algorithm efficient, and a custom anisotropic incremental filter prevents illumination leaking.", "abstracts": [ { "abstractType": "Regular", "content": "The proposed screen-space algorithm approximates light scattering in homogeneous participating environments, such as water. Instead of simulating full global illumination, this method models scattering by a physically based point spread function. A discrete hierarchical convolution in a texture MIP map makes the algorithm efficient, and a custom anisotropic incremental filter prevents illumination leaking.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The proposed screen-space algorithm approximates light scattering in homogeneous participating environments, such as water. Instead of simulating full global illumination, this method models scattering by a physically based point spread function. A discrete hierarchical convolution in a texture MIP map makes the algorithm efficient, and a custom anisotropic incremental filter prevents illumination leaking.", "title": "Real-Time Screen-Space Scattering in Homogeneous Environments", "normalizedTitle": "Real-Time Screen-Space Scattering in Homogeneous Environments", "fno": "mcg2013030053", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Lighting", "Rendering Computer Graphics", "Media", "Approximation Methods", "Light Scattering", "Scattering Parameters", "Real Time Systems", "Computer Graphics", "Participating Media", "Light Scattering", "Screen Space Methods", "Real Time Rendering" ], "authors": [ { "givenName": "O.", "surname": "Elek", "fullName": "O. Elek", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "T.", "surname": "Ritschel", "fullName": "T. Ritschel", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "H.", "surname": "Seidel", "fullName": "H. Seidel", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2013-05-01 00:00:00", "pubType": "mags", "pages": "53-65", "year": "2013", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2010/6685/0/05429594", "title": "Interactive volumetric lighting simulating scattering and shadowing", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2010/05429594/12OmNxecRYK", "parentPublication": { "id": "proceedings/pacificvis/2010/6685/0", "title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156383", "title": "Advanced lighting for unstructured-grid data visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156383/12OmNz2C1sz", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761892", "title": "Analysis of subsurface scattering under generic illumination", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761892/12OmNzd7bV9", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/03/mcg2013030066", "title": "Double- and Multiple-Scattering Effects in Translucent Materials", "doi": null, "abstractUrl": "/magazine/cg/2013/03/mcg2013030066/13rRUIJcWfX", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/02/v0342", "title": "Light Scattering from Filaments", "doi": null, "abstractUrl": "/journal/tg/2007/02/v0342/13rRUwI5TXt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122936", "title": "Ambient Volume Scattering", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122936/13rRUwcAqqh", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/v1576", "title": "Lattice-Based Volumetric Global Illumination", "doi": null, "abstractUrl": "/journal/tg/2007/06/v1576/13rRUxBa55V", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08093692", "title": "Point-Based Rendering for Homogeneous Participating Media with Refractive Boundaries", "doi": null, "abstractUrl": "/journal/tg/2018/10/08093692/13rRUy0qnGq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08600345", "title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media", "doi": null, "abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09715049", "title": "Collimated Whole Volume Light Scattering in Homogeneous Finite Media", "doi": null, "abstractUrl": "/journal/tg/5555/01/09715049/1B2DbhImWwE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2013030044", "articleId": "13rRUwkfB1X", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2013030066", "articleId": "13rRUIJcWfX", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1tWJ8EdItri", "title": "July", "year": "2021", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1gbtOfrk8Mg", "doi": "10.1109/TVCG.2019.2963015", "abstract": "Rendering participating media is important to the creation of photorealistic images. Participating media has a translucent aspect that comes from light being scattered inside the material. For materials with a small mean-free-path (mfp), multiple scattering effects dominate. Simulating these effects is computationally intensive, as it requires tracking a large number of scattering events inside the material. Existing approaches precompute multiple scattering events inside the material and store the results in a table. During rendering time, this table is used to compute the scattering effects. While these methods are faster than explicit scattering computation, they incur higher storage costs. In this paper, we present a new representation for double and multiple scattering effects that uses a neural network model. The scattering response from all homogeneous participating media is encoded into a neural network in a preprocessing step. At run time, the neural network is then used to predict the double and multiple scattering effects. We demonstrate the effects combined with Virtual Ray Lights (VRL), although our approach can be integrated with other rendering algorithms. Our algorithm is implemented on GPU. Double and multiple scattering effects for the entire participating media space are encoded using only 23.6 KB of memory. Our method achieves 50 ms per frame in typical scenes and provides results almost identical to the reference.", "abstracts": [ { "abstractType": "Regular", "content": "Rendering participating media is important to the creation of photorealistic images. Participating media has a translucent aspect that comes from light being scattered inside the material. For materials with a small mean-free-path (mfp), multiple scattering effects dominate. Simulating these effects is computationally intensive, as it requires tracking a large number of scattering events inside the material. Existing approaches precompute multiple scattering events inside the material and store the results in a table. During rendering time, this table is used to compute the scattering effects. While these methods are faster than explicit scattering computation, they incur higher storage costs. In this paper, we present a new representation for double and multiple scattering effects that uses a neural network model. The scattering response from all homogeneous participating media is encoded into a neural network in a preprocessing step. At run time, the neural network is then used to predict the double and multiple scattering effects. We demonstrate the effects combined with Virtual Ray Lights (VRL), although our approach can be integrated with other rendering algorithms. Our algorithm is implemented on GPU. Double and multiple scattering effects for the entire participating media space are encoded using only 23.6 KB of memory. Our method achieves 50 ms per frame in typical scenes and provides results almost identical to the reference.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Rendering participating media is important to the creation of photorealistic images. Participating media has a translucent aspect that comes from light being scattered inside the material. For materials with a small mean-free-path (mfp), multiple scattering effects dominate. Simulating these effects is computationally intensive, as it requires tracking a large number of scattering events inside the material. Existing approaches precompute multiple scattering events inside the material and store the results in a table. During rendering time, this table is used to compute the scattering effects. While these methods are faster than explicit scattering computation, they incur higher storage costs. In this paper, we present a new representation for double and multiple scattering effects that uses a neural network model. The scattering response from all homogeneous participating media is encoded into a neural network in a preprocessing step. At run time, the neural network is then used to predict the double and multiple scattering effects. We demonstrate the effects combined with Virtual Ray Lights (VRL), although our approach can be integrated with other rendering algorithms. Our algorithm is implemented on GPU. Double and multiple scattering effects for the entire participating media space are encoded using only 23.6 KB of memory. Our method achieves 50 ms per frame in typical scenes and provides results almost identical to the reference.", "title": "Interactive Simulation of Scattering Effects in Participating Media Using a Neural Network Model", "normalizedTitle": "Interactive Simulation of Scattering Effects in Participating Media Using a Neural Network Model", "fno": "08945399", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Simulation", "Graphics Processing Units", "Interactive Systems", "Neural Nets", "Ray Tracing", "Rendering Computer Graphics", "Scattering Events", "Ray Tracing", "Participating Media Space", "GPU", "VRL", "Virtual Ray Lights", "Storage Costs", "Double Scattering Effects", "Mean Free Path", "Photorealistic Images", "Rendering", "Interactive Simulation", "Homogeneous Participating Media", "Scattering Response", "Neural Network", "Multiple Scattering Effects", "Scattering", "Neural Networks", "Media", "Rendering Computer Graphics", "Photonics", "Computational Modeling", "Graphics Processing Units", "Participating Media", "Multiple Scattering", "Real Time", "Neural Network" ], "authors": [ { "givenName": "Liangsheng", "surname": "Ge", "fullName": "Liangsheng Ge", "affiliation": "School of Software, Shandong University, Jinan, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Beibei", "surname": "Wang", "fullName": "Beibei Wang", "affiliation": "School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, Jiangsu, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lu", "surname": "Wang", "fullName": "Lu Wang", "affiliation": "School of Software, Shandong University, Jinan, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiangxu", "surname": "Meng", "fullName": "Xiangxu Meng", "affiliation": "School of Software, Shandong University, Jinan, Shandong, China", "__typename": "ArticleAuthorType" }, { "givenName": "Nicolas", "surname": "Holzschuch", "fullName": "Nicolas Holzschuch", "affiliation": "Inria, CNRS, Grenoble INP LJK, University Grenoble Alpes, Grenoble, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2021-07-01 00:00:00", "pubType": "trans", "pages": "3123-3134", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209c095", "title": "3D Acquisition of Occluded Surfaces from Scattering in Participating Media", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209c095/12OmNAlvHRN", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2012/4896/0/4896a396", "title": "Fast Multiple Scattering in Participating Media with Beamlet Decomposition", "doi": null, "abstractUrl": "/proceedings-article/cis/2012/4896a396/12OmNwekjJa", "parentPublication": { "id": "proceedings/cis/2012/4896/0", "title": "2012 Eighth International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2013/5067/0/5067a183", "title": "Shape and Reflectance from Scattering in Participating Media", "doi": null, "abstractUrl": "/proceedings-article/3dv/2013/5067a183/12OmNxFsmJt", "parentPublication": { "id": "proceedings/3dv/2013/5067/0", "title": "2013 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvaui/2016/5870/0/5870a049", "title": "Shape Reconstruction of Objects in Participating Media by Combining Photometric Stereo and Optical Thickness", "doi": null, "abstractUrl": "/proceedings-article/cvaui/2016/5870a049/12OmNyUWQXe", "parentPublication": { "id": "proceedings/cvaui/2016/5870/0", "title": "2016 ICPR 2nd Workshop on Computer Vision for Analysis of Underwater Imagery (CVAUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rt/2008/2741/0/04634627", "title": "Interactive volumetric shadows in participating media with single-scattering", "doi": null, "abstractUrl": "/proceedings-article/rt/2008/04634627/12OmNyjtNIF", "parentPublication": { "id": "proceedings/rt/2008/2741/0", "title": "Symposium on Interactive Ray Tracing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/03/mcg2013030066", "title": "Double- and Multiple-Scattering Effects in Translucent Materials", "doi": null, "abstractUrl": "/magazine/cg/2013/03/mcg2013030066/13rRUIJcWfX", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/02/mcg2012020034", "title": "A Parallel Architecture for Interactively Rendering Scattering and Refraction Effects", "doi": null, "abstractUrl": "/magazine/cg/2012/02/mcg2012020034/13rRUwgyOfn", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/10/08093692", "title": "Point-Based Rendering for Homogeneous Participating Media with Refractive Boundaries", "doi": null, "abstractUrl": "/journal/tg/2018/10/08093692/13rRUy0qnGq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08600345", "title": "Precomputed Multiple Scattering for Rapid Light Simulation in Participating Media", "doi": null, "abstractUrl": "/journal/tg/2020/07/08600345/17D45Xh13tH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/10/08684333", "title": "Fast Computation of Single Scattering in Participating Media with Refractive Boundaries Using Frequency Analysis", "doi": null, "abstractUrl": "/journal/tg/2020/10/08684333/1keqXrXysr6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08943144", "articleId": "1g3bi26D34k", "__typename": "AdjacentArticleType" }, "next": { "fno": "08945380", "articleId": "1gbtN0iYlji", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1tWJeCio6Ck", "name": "ttg202107-08945399s1-supp1-2963015.wmv", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202107-08945399s1-supp1-2963015.wmv", "extension": "wmv", "size": "14.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNAZx8Oo", "title": "January", "year": "2010", "issueNum": "01", "idPrefix": "tp", "pubType": "journal", "volume": "32", "label": "January", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUB6Sq1w", "doi": "10.1109/TPAMI.2009.112", "abstract": "A spatiotemporal saliency algorithm based on a center-surround framework is proposed. The algorithm is inspired by biological mechanisms of motion-based perceptual grouping and extends a discriminant formulation of center-surround saliency previously proposed for static imagery. Under this formulation, the saliency of a location is equated to the power of a predefined set of features to discriminate between the visual stimuli in a center and a surround window, centered at that location. The features are spatiotemporal video patches and are modeled as dynamic textures, to achieve a principled joint characterization of the spatial and temporal components of saliency. The combination of discriminant center-surround saliency with the modeling power of dynamic textures yields a robust, versatile, and fully unsupervised spatiotemporal saliency algorithm, applicable to scenes with highly dynamic backgrounds and moving cameras. The related problem of background subtraction is treated as the complement of saliency detection, by classifying nonsalient (with respect to appearance and motion dynamics) points in the visual field as background. The algorithm is tested for background subtraction on challenging sequences, and shown to substantially outperform various state-of-the-art techniques. Quantitatively, its average error rate is almost half that of the closest competitor.", "abstracts": [ { "abstractType": "Regular", "content": "A spatiotemporal saliency algorithm based on a center-surround framework is proposed. The algorithm is inspired by biological mechanisms of motion-based perceptual grouping and extends a discriminant formulation of center-surround saliency previously proposed for static imagery. Under this formulation, the saliency of a location is equated to the power of a predefined set of features to discriminate between the visual stimuli in a center and a surround window, centered at that location. The features are spatiotemporal video patches and are modeled as dynamic textures, to achieve a principled joint characterization of the spatial and temporal components of saliency. The combination of discriminant center-surround saliency with the modeling power of dynamic textures yields a robust, versatile, and fully unsupervised spatiotemporal saliency algorithm, applicable to scenes with highly dynamic backgrounds and moving cameras. The related problem of background subtraction is treated as the complement of saliency detection, by classifying nonsalient (with respect to appearance and motion dynamics) points in the visual field as background. The algorithm is tested for background subtraction on challenging sequences, and shown to substantially outperform various state-of-the-art techniques. Quantitatively, its average error rate is almost half that of the closest competitor.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A spatiotemporal saliency algorithm based on a center-surround framework is proposed. The algorithm is inspired by biological mechanisms of motion-based perceptual grouping and extends a discriminant formulation of center-surround saliency previously proposed for static imagery. Under this formulation, the saliency of a location is equated to the power of a predefined set of features to discriminate between the visual stimuli in a center and a surround window, centered at that location. The features are spatiotemporal video patches and are modeled as dynamic textures, to achieve a principled joint characterization of the spatial and temporal components of saliency. The combination of discriminant center-surround saliency with the modeling power of dynamic textures yields a robust, versatile, and fully unsupervised spatiotemporal saliency algorithm, applicable to scenes with highly dynamic backgrounds and moving cameras. The related problem of background subtraction is treated as the complement of saliency detection, by classifying nonsalient (with respect to appearance and motion dynamics) points in the visual field as background. The algorithm is tested for background subtraction on challenging sequences, and shown to substantially outperform various state-of-the-art techniques. Quantitatively, its average error rate is almost half that of the closest competitor.", "title": "Spatiotemporal Saliency in Dynamic Scenes", "normalizedTitle": "Spatiotemporal Saliency in Dynamic Scenes", "fno": "ttp2010010171", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Spatiotemporal Saliency", "Background Subtraction", "Dynamic Backgrounds", "Motion Saliency", "Dynamic Texture", "Discriminant Center Surround Architecture", "Video Modeling" ], "authors": [ { "givenName": "Vijay", "surname": "Mahadevan", "fullName": "Vijay Mahadevan", "affiliation": "University of California, San Diego, La Jolla", "__typename": "ArticleAuthorType" }, { "givenName": "Nuno", "surname": "Vasconcelos", "fullName": "Nuno Vasconcelos", "affiliation": "University of California, San Diego, La Jolla", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2010-01-01 00:00:00", "pubType": "trans", "pages": "171-177", "year": "2010", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2011/0394/0/05995506", "title": "Saliency estimation using a non-parametric low-level vision model", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995506/12OmNA0dMNp", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a013", "title": "Saliency Based on Multi-scale Ratio of Dissimilarity", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a013/12OmNvDqsUe", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2008/2242/0/04587576", "title": "Background subtraction in highly dynamic scenes", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2008/04587576/12OmNwe2Ixg", "parentPublication": { "id": "proceedings/cvpr/2008/2242/0", "title": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2011/4589/0/4589a249", "title": "Saliency Detection Using Region-Based Incremental Center-Surround Distance", "doi": null, "abstractUrl": "/proceedings-article/ism/2011/4589a249/12OmNwtEEGB", "parentPublication": { "id": "proceedings/ism/2011/4589/0", "title": "2011 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460408", "title": "Corner-surround Contrast for saliency detection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460408/12OmNyOHG2r", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcabes/2012/4818/0/4818a048", "title": "GPU Acceleration of Saliency Detection Algorithm", "doi": null, "abstractUrl": "/proceedings-article/dcabes/2012/4818a048/12OmNyQYt1D", "parentPublication": { "id": "proceedings/dcabes/2012/4818/0", "title": "2012 11th International Symposium on Distributed Computing and Applications to Business, Engineering & Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460838", "title": "Background subtraction via early recurrence in dynamic scenes", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460838/12OmNyo1nXp", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2012/4711/0/4711a158", "title": "Spatiotemporal Saliency Detection via Sparse Representation", "doi": null, "abstractUrl": "/proceedings-article/icme/2012/4711a158/12OmNzAoi3b", "parentPublication": { "id": "proceedings/icme/2012/4711/0", "title": "2012 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/03/ttp2013030541", "title": "Biologically Inspired Object Tracking Using Center-Surround Saliency Mechanisms", "doi": null, "abstractUrl": "/journal/tp/2013/03/ttp2013030541/13rRUxZRbpj", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2018/5321/0/08499257", "title": "Saliency-Based Spatiotemporal Attention for Video Captioning", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2018/08499257/17D45WLdYRP", "parentPublication": { "id": "proceedings/bigmm/2018/5321/0", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttp2010010165", "articleId": "13rRUwInvgl", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttp2010010178", "articleId": "13rRUwjXZT9", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNynsbwc", "title": "Oct.", "year": "2012", "issueNum": "10", "idPrefix": "tp", "pubType": "journal", "volume": "34", "label": "Oct.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0ger3", "doi": "10.1109/TPAMI.2011.272", "abstract": "We propose a new type of saliency—context-aware saliency—which aims at detecting the image regions that represent the scene. This definition differs from previous definitions whose goal is to either identify fixation points or detect the dominant object. In accordance with our saliency definition, we present a detection algorithm which is based on four principles observed in the psychological literature. The benefits of the proposed approach are evaluated in two applications where the context of the dominant objects is just as essential as the objects themselves. In image retargeting, we demonstrate that using our saliency prevents distortions in the important regions. In summarization, we show that our saliency helps to produce compact, appealing, and informative summaries.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a new type of saliency—context-aware saliency—which aims at detecting the image regions that represent the scene. This definition differs from previous definitions whose goal is to either identify fixation points or detect the dominant object. In accordance with our saliency definition, we present a detection algorithm which is based on four principles observed in the psychological literature. The benefits of the proposed approach are evaluated in two applications where the context of the dominant objects is just as essential as the objects themselves. In image retargeting, we demonstrate that using our saliency prevents distortions in the important regions. In summarization, we show that our saliency helps to produce compact, appealing, and informative summaries.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a new type of saliency—context-aware saliency—which aims at detecting the image regions that represent the scene. This definition differs from previous definitions whose goal is to either identify fixation points or detect the dominant object. In accordance with our saliency definition, we present a detection algorithm which is based on four principles observed in the psychological literature. The benefits of the proposed approach are evaluated in two applications where the context of the dominant objects is just as essential as the objects themselves. In image retargeting, we demonstrate that using our saliency prevents distortions in the important regions. In summarization, we show that our saliency helps to produce compact, appealing, and informative summaries.", "title": "Context-Aware Saliency Detection", "normalizedTitle": "Context-Aware Saliency Detection", "fno": "ttp2012101915", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Feature Extraction", "Context Awareness", "Visualization", "Object Recognition", "Estimation", "Image Color Analysis", "Human Factors", "Context Aware", "Image Saliency", "Visual Saliency" ], "authors": [ { "givenName": "Stas", "surname": "Goferman", "fullName": "Stas Goferman", "affiliation": "Israel Institute of Technology, Haifa", "__typename": "ArticleAuthorType" }, { "givenName": "Lihi", "surname": "Zelnik-Manor", "fullName": "Lihi Zelnik-Manor", "affiliation": "Israel Institute of Technology, Haifa", "__typename": "ArticleAuthorType" }, { "givenName": "Ayellet", "surname": "Tal", "fullName": "Ayellet Tal", "affiliation": "Israel Institute of Technology, Haifa", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "10", "pubDate": "2012-10-01 00:00:00", "pubType": "trans", "pages": "1915-1926", "year": "2012", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2013/3022/0/3022a347", "title": "Thematic Saliency Detection Using Spatial-Temporal Context", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2013/3022a347/12OmNqIhG1Z", "parentPublication": { "id": "proceedings/iccvw/2013/3022/0", "title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995743", "title": "Image saliency: From intrinsic to extrinsic context", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995743/12OmNrF2DMA", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2011/4584/0/4584b062", "title": "A Saliency Detection Model Based on Multi-feature Fusion", "doi": null, "abstractUrl": "/proceedings-article/cis/2011/4584b062/12OmNwDACnH", "parentPublication": { "id": "proceedings/cis/2011/4584/0", "title": "2011 Seventh International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2011/4589/0/4589a249", "title": "Saliency Detection Using Region-Based Incremental Center-Surround Distance", "doi": null, "abstractUrl": "/proceedings-article/ism/2011/4589a249/12OmNwtEEGB", "parentPublication": { "id": "proceedings/ism/2011/4589/0", "title": "2011 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2010/6984/0/05539929", "title": "Context-aware saliency detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05539929/12OmNxXUhVQ", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2011/4584/0/4584b201", "title": "Saliency-based Adaptive Scaling for Image Retargeting", "doi": null, "abstractUrl": "/proceedings-article/cis/2011/4584b201/12OmNyrqzlP", "parentPublication": { "id": "proceedings/cis/2011/4584/0", "title": "2011 Seventh International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08585158", "title": "Saliency-Aware Texture Smoothing", "doi": null, "abstractUrl": "/journal/tg/2020/07/08585158/17D45XeKgwj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2018/3788/0/08545545", "title": "Saliency Detection using Iterative Dynamic Guided Filtering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2018/08545545/17D45Xq6dzG", "parentPublication": { "id": "proceedings/icpr/2018/3788/0", "title": "2018 24th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047464", "title": "Saliency Detection Based on Weighted Saliency Probability", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047464/1iC6yUqkTja", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0", "title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2020/8138/0/813800a045", "title": "RSF: A Novel Saliency Fusion Framework for Image Saliency Detection", "doi": null, "abstractUrl": "/proceedings-article/iccst/2020/813800a045/1p1gt8mtlNm", "parentPublication": { "id": "proceedings/iccst/2020/8138/0", "title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttp2012101902", "articleId": "13rRUzp02pn", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttp2012101927", "articleId": "13rRUEgs2Da", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqMPfRq", "title": "Dec.", "year": "2019", "issueNum": "12", "idPrefix": "tp", "pubType": "journal", "volume": "41", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy3gn8N", "doi": "10.1109/TPAMI.2018.2866563", "abstract": "Nearly all existing visual saliency models by far have focused on predicting a universal saliency map across all observers. Yet psychology studies suggest that visual attention of different observers can vary significantly under specific circumstances, especially a scene is composed of multiple salient objects. To study such heterogenous visual attention pattern across observers, we first construct a personalized saliency dataset and explore correlations between visual attention, personal preferences, and image contents. Specifically, we propose to decompose a personalized saliency map (referred to as PSM) into a universal saliency map (referred to as USM) predictable by existing saliency detection models and a new discrepancy map across users that characterizes personalized saliency. We then present two solutions towards predicting such discrepancy maps, i.e., a multi-task convolutional neural network (CNN) framework and an extended CNN with Person-specific Information Encoded Filters (CNN-PIEF). Extensive experimental results demonstrate the effectiveness of our models for PSM prediction as well their generalization capabilityfor unseen observers.", "abstracts": [ { "abstractType": "Regular", "content": "Nearly all existing visual saliency models by far have focused on predicting a universal saliency map across all observers. Yet psychology studies suggest that visual attention of different observers can vary significantly under specific circumstances, especially a scene is composed of multiple salient objects. To study such heterogenous visual attention pattern across observers, we first construct a personalized saliency dataset and explore correlations between visual attention, personal preferences, and image contents. Specifically, we propose to decompose a personalized saliency map (referred to as PSM) into a universal saliency map (referred to as USM) predictable by existing saliency detection models and a new discrepancy map across users that characterizes personalized saliency. We then present two solutions towards predicting such discrepancy maps, i.e., a multi-task convolutional neural network (CNN) framework and an extended CNN with Person-specific Information Encoded Filters (CNN-PIEF). Extensive experimental results demonstrate the effectiveness of our models for PSM prediction as well their generalization capabilityfor unseen observers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Nearly all existing visual saliency models by far have focused on predicting a universal saliency map across all observers. Yet psychology studies suggest that visual attention of different observers can vary significantly under specific circumstances, especially a scene is composed of multiple salient objects. To study such heterogenous visual attention pattern across observers, we first construct a personalized saliency dataset and explore correlations between visual attention, personal preferences, and image contents. Specifically, we propose to decompose a personalized saliency map (referred to as PSM) into a universal saliency map (referred to as USM) predictable by existing saliency detection models and a new discrepancy map across users that characterizes personalized saliency. We then present two solutions towards predicting such discrepancy maps, i.e., a multi-task convolutional neural network (CNN) framework and an extended CNN with Person-specific Information Encoded Filters (CNN-PIEF). Extensive experimental results demonstrate the effectiveness of our models for PSM prediction as well their generalization capabilityfor unseen observers.", "title": "Personalized Saliency and Its Prediction", "normalizedTitle": "Personalized Saliency and Its Prediction", "fno": "08444709", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Convolutional Neural Nets", "Image Processing", "Prediction Theory", "Psychology", "Visual Saliency Models", "Universal Saliency Map", "Psychology Studies", "Heterogenous Visual Attention Pattern", "Personalized Saliency Dataset", "Personal Preferences", "Personalized Saliency Map", "Saliency Detection Models", "Discrepancy Map", "Multitask Convolutional Neural Network Framework", "PSM Prediction", "Generalization Capability", "Person Specific Information Encoded Filters", "Observers", "Saliency Detection", "Feature Extraction", "Visualization", "Semantics", "Predictive Models", "Image Color Analysis", "Universal Saliency", "Personalized Saliency", "Multi Task Learning", "Convolutional Neural Network" ], "authors": [ { "givenName": "Yanyu", "surname": "Xu", "fullName": "Yanyu Xu", "affiliation": "ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shenghua", "surname": "Gao", "fullName": "Shenghua Gao", "affiliation": "ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Junru", "surname": "Wu", "fullName": "Junru Wu", "affiliation": "Texas A&M University, College Station, TX", "__typename": "ArticleAuthorType" }, { "givenName": "Nianyi", "surname": "Li", "fullName": "Nianyi Li", "affiliation": "University of Delaware, Newark, DE, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jingyi", "surname": "Yu", "fullName": "Jingyi Yu", "affiliation": "ShanghaiTech University, Pudong, Shanghai, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2019-12-01 00:00:00", "pubType": "trans", "pages": "2975-2989", "year": "2019", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2012/1226/0/061P1B08", "title": "Exploiting local and global patch rarities for saliency detection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2012/061P1B08/12OmNrY3LuC", "parentPublication": { "id": "proceedings/cvpr/2012/1226/0", "title": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2011/4541/0/4541a022", "title": "Saliency Modulated High Dynamic Range Image Tone Mapping", "doi": null, "abstractUrl": "/proceedings-article/icig/2011/4541a022/12OmNwDSdLL", "parentPublication": { "id": "proceedings/icig/2011/4541/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460735", "title": "Visual saliency and categorisation of abstract images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460735/12OmNxjjEfM", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477612", "title": "Fixation prediction with a combined model of bottom-up saliency and vanishing point", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477612/12OmNySG3Nb", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486603", "title": "Co-Saliency Detection via Hierarchical Consistency Measure", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486603/14jQfRlKNy6", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/07/08585158", "title": "Saliency-Aware Texture Smoothing", "doi": null, "abstractUrl": "/journal/tg/2020/07/08585158/17D45XeKgwj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a066", "title": "Saliency-Guided Image Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a066/1cJ0zw9Ceru", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0/09047464", "title": "Saliency Detection Based on Weighted Saliency Probability", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2019/09047464/1iC6yUqkTja", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2019/4328/0", "title": "2019 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800i579", "title": "UC-Net: Uncertainty Inspired RGB-D Saliency Detection via Conditional Variational Autoencoders", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800i579/1m3nhGh6i1q", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2020/8138/0/813800a045", "title": "RSF: A Novel Saliency Fusion Framework for Image Saliency Detection", "doi": null, "abstractUrl": "/proceedings-article/iccst/2020/813800a045/1p1gt8mtlNm", "parentPublication": { "id": "proceedings/iccst/2020/8138/0", "title": "2020 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08466003", "articleId": "13zn9d0P6i5", "__typename": "AdjacentArticleType" }, "next": { "fno": "08481592", "articleId": "146z4FBGCW2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxEjY43", "title": "July", "year": "2019", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgs2C4", "doi": "10.1109/TVCG.2018.2839685", "abstract": "In this paper, we present a novel approach for 3D dental model segmentation via deep Convolutional Neural Networks (CNNs). Traditional geometry-based methods tend to receive undesirable results due to the complex appearance of human teeth (e.g., missing/rotten teeth, feature-less regions, crowding teeth, extra medical attachments, etc.). Furthermore, labeling of individual tooth is hardly enabled in traditional tooth segmentation methods. To address these issues, we propose to learn a generic and robust segmentation model by exploiting deep Neural Networks, namely NNs. The segmentation task is achieved by labeling each mesh face. We extract a set of geometry features as face feature representations. In the training step, the network is fed with those features, and produces a probability vector, of which each element indicates the probability a face belonging to the corresponding model part. To this end, we extensively experiment with various network structures, and eventually arrive at a 2-level hierarchical CNNs structure for tooth segmentation: one for teeth-gingiva labeling and the other for inter-teeth labeling. Further, we propose a novel boundary-aware tooth simplification method to significantly improve efficiency in the stage of feature extraction. After CNNs prediction, we do graph-based label optimization and further refine the boundary with an improved version of fuzzy clustering. The accuracy of our mesh labeling method exceeds that of the state-of-art geometry-based methods, reaching 99.06 percent measured by area which is directly applicable in orthodontic CAD systems. It is also robust to any possible foreign matters on model surface, e.g., air bubbles, dental accessories, and many more.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we present a novel approach for 3D dental model segmentation via deep Convolutional Neural Networks (CNNs). Traditional geometry-based methods tend to receive undesirable results due to the complex appearance of human teeth (e.g., missing/rotten teeth, feature-less regions, crowding teeth, extra medical attachments, etc.). Furthermore, labeling of individual tooth is hardly enabled in traditional tooth segmentation methods. To address these issues, we propose to learn a generic and robust segmentation model by exploiting deep Neural Networks, namely NNs. The segmentation task is achieved by labeling each mesh face. We extract a set of geometry features as face feature representations. In the training step, the network is fed with those features, and produces a probability vector, of which each element indicates the probability a face belonging to the corresponding model part. To this end, we extensively experiment with various network structures, and eventually arrive at a 2-level hierarchical CNNs structure for tooth segmentation: one for teeth-gingiva labeling and the other for inter-teeth labeling. Further, we propose a novel boundary-aware tooth simplification method to significantly improve efficiency in the stage of feature extraction. After CNNs prediction, we do graph-based label optimization and further refine the boundary with an improved version of fuzzy clustering. The accuracy of our mesh labeling method exceeds that of the state-of-art geometry-based methods, reaching 99.06 percent measured by area which is directly applicable in orthodontic CAD systems. It is also robust to any possible foreign matters on model surface, e.g., air bubbles, dental accessories, and many more.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we present a novel approach for 3D dental model segmentation via deep Convolutional Neural Networks (CNNs). Traditional geometry-based methods tend to receive undesirable results due to the complex appearance of human teeth (e.g., missing/rotten teeth, feature-less regions, crowding teeth, extra medical attachments, etc.). Furthermore, labeling of individual tooth is hardly enabled in traditional tooth segmentation methods. To address these issues, we propose to learn a generic and robust segmentation model by exploiting deep Neural Networks, namely NNs. The segmentation task is achieved by labeling each mesh face. We extract a set of geometry features as face feature representations. In the training step, the network is fed with those features, and produces a probability vector, of which each element indicates the probability a face belonging to the corresponding model part. To this end, we extensively experiment with various network structures, and eventually arrive at a 2-level hierarchical CNNs structure for tooth segmentation: one for teeth-gingiva labeling and the other for inter-teeth labeling. Further, we propose a novel boundary-aware tooth simplification method to significantly improve efficiency in the stage of feature extraction. After CNNs prediction, we do graph-based label optimization and further refine the boundary with an improved version of fuzzy clustering. The accuracy of our mesh labeling method exceeds that of the state-of-art geometry-based methods, reaching 99.06 percent measured by area which is directly applicable in orthodontic CAD systems. It is also robust to any possible foreign matters on model surface, e.g., air bubbles, dental accessories, and many more.", "title": "3D Tooth Segmentation and Labeling Using Deep Convolutional Neural Networks", "normalizedTitle": "3D Tooth Segmentation and Labeling Using Deep Convolutional Neural Networks", "fno": "08362667", "hasPdf": true, "idPrefix": "tg", "keywords": [ "CAD", "Convolutional Neural Nets", "Dentistry", "Feature Extraction", "Geometry", "Graph Theory", "Image Representation", "Image Segmentation", "Medical Image Processing", "Optimisation", "Pattern Clustering", "Probability", "Deep Convolutional Neural Networks", "3 D Dental Model Segmentation", "Human Teeth", "Feature Less Regions", "Generic Segmentation Model", "Robust Segmentation Model", "Segmentation Task", "Mesh Face", "Geometry Features", "Face Feature Representations", "Network Structures", "2 Level Hierarchical CN Ns Structure", "Teeth Gingiva Labeling", "Inter Teeth Labeling", "Feature Extraction", "Mesh Labeling Method", "Geometry Based Methods", "3 D Tooth Segmentation Methods", "Probability Vector", "Boundary Aware Tooth Simplification Method", "Graph Based Label Optimization", "Fuzzy Clustering", "Orthodontic CAD Systems", "Teeth", "Dentistry", "Feature Extraction", "Labeling", "Three Dimensional Displays", "Solid Modeling", "Image Segmentation", "Boundary Aware Simplification", "3 D Mesh Segmentation", "Deep Convolutional Neural Networks", "Fuzzy Clustering" ], "authors": [ { "givenName": "Xiaojie", "surname": "Xu", "fullName": "Xiaojie Xu", "affiliation": "Chinese Academy of Sciences, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Chang", "surname": "Liu", "fullName": "Chang Liu", "affiliation": "Chinese Academy of Sciences, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Youyi", "surname": "Zheng", "fullName": "Youyi Zheng", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2019-07-01 00:00:00", "pubType": "trans", "pages": "2336-2348", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ism/2012/4875/0/4875a145", "title": "A New Approach to Teeth Segmentation", "doi": null, "abstractUrl": "/proceedings-article/ism/2012/4875a145/12OmNzlUKwe", "parentPublication": { "id": "proceedings/ism/2012/4875/0", "title": "2012 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/lt/2016/02/07345566", "title": "ToothPIC: An Interactive Application for Teaching Oral Anatomy", "doi": null, "abstractUrl": "/journal/lt/2016/02/07345566/13rRUxYrbIk", "parentPublication": { "id": "trans/lt", "title": "IEEE Transactions on Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2018/9264/0/926400a400", "title": "Deep Instance Segmentation of Teeth in Panoramic X-Ray Images", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2018/926400a400/17D45W1Oa4w", "parentPublication": { "id": "proceedings/sibgrapi/2018/9264/0", "title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09720214", "title": "TeethGNN: Semantic 3D Teeth Segmentation with Graph Neural Networks", "doi": null, "abstractUrl": "/journal/tg/5555/01/09720214/1BefbMXPO3C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0720", "title": "DArch: Dental Arch Prior-assisted 3D Tooth Instance Segmentation with Weak Annotations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0720/1H1kFKjFl16", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09933028", "title": "Tooth Alignment Network Based on Landmark Constraints and Hierarchical Graph Structure", "doi": null, "abstractUrl": "/journal/tg/5555/01/09933028/1HVsnduN8e4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956708", "title": "Automatic teeth segmentation on panoramic X-rays using deep neural networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956708/1IHqmXCw89O", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300a749", "title": "Automatic Individual Tooth Segmentation in Cone-Beam Computed Tomography Based on Multi-Task CNN and Watershed Transform", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300a749/1LSPkbunsdy", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2020/9274/0/927400a164", "title": "A study on tooth segmentation and numbering using end-to-end deep neural networks", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2020/927400a164/1p2VzkB4pji", "parentPublication": { "id": "proceedings/sibgrapi/2020/9274/0", "title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/10/09445658", "title": "A Fully Automated Method for 3D Individual Tooth Identification and Segmentation in Dental CBCT", "doi": null, "abstractUrl": "/journal/tp/2022/10/09445658/1uaajNYaeQw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "08354904", "articleId": "13rRUxYrbUP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwpGgJA", "title": "July", "year": "2013", "issueNum": "07", "idPrefix": "co", "pubType": "magazine", "volume": "46", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly90R", "doi": "10.1109/MC.2013.151", "abstract": "Because visual analytics has a broad scope and aims at knowledge discovery, evaluating the methods used in this field is challenging. Successful solutions are often found through trial and error, with solid guidelines and findings still lagging. The Web Extra document contains links with further information on visual analytics challenges and repositories.", "abstracts": [ { "abstractType": "Regular", "content": "Because visual analytics has a broad scope and aims at knowledge discovery, evaluating the methods used in this field is challenging. Successful solutions are often found through trial and error, with solid guidelines and findings still lagging. The Web Extra document contains links with further information on visual analytics challenges and repositories.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Because visual analytics has a broad scope and aims at knowledge discovery, evaluating the methods used in this field is challenging. Successful solutions are often found through trial and error, with solid guidelines and findings still lagging. The Web Extra document contains links with further information on visual analytics challenges and repositories.", "title": "Evaluation: A Challenge for Visual Analytics", "normalizedTitle": "Evaluation: A Challenge for Visual Analytics", "fno": "mco2013070056", "hasPdf": true, "idPrefix": "co", "keywords": [ "Visual Analytics", "Data Visualization", "Knowledge Discovery", "Information Analysis", "HASH 0 X 4 D 1 A 5 Fc" ], "authors": [ { "givenName": "Jarke J.", "surname": "van Wijk", "fullName": "Jarke J. van Wijk", "affiliation": "Dept. of Math. & Comput. Sci., Eindhoven Univ. of Technol., Eindhoven, Netherlands", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2013-07-01 00:00:00", "pubType": "mags", "pages": "56-60", "year": "2013", "issn": "0018-9162", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2012/4752/0/06400514", "title": "Big data exploration through visual analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400514/12OmNC3XhwY", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892b495", "title": "A Role for Reasoning in Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892b495/12OmNqJ8tq4", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csmr-wcre/2014/3752/0/06747208", "title": "in*Bug: Visual analytics of bug repositories", "doi": null, "abstractUrl": "/proceedings-article/csmr-wcre/2014/06747208/12OmNyKJijS", "parentPublication": { "id": "proceedings/csmr-wcre/2014/3752/0", "title": "2014 Software Evolution Week - IEEE Conference on Software Maintenance, Reengineering and Reverse Engineering (CSMR-WCRE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122908", "title": "The User Puzzle—Explaining the Interaction with Visual Analytics Systems", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122908/13rRUIIVlcH", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/08/mco2013080090", "title": "Bixplorer: Visual Analytics with Biclusters", "doi": null, "abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070047", "title": "Real-Time Visual Analytics for Text Streams", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070047/13rRUxAStVJ", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070030", "title": "Visual Analytics Support for Intelligence Analysis", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070030/13rRUxD9h0P", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/02/mcg2009020084", "title": "Demystifying Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2009/02/mcg2009020084/13rRUy3gn3z", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020637", "title": "Ishikawa, JESS, and Visual Analytics for Engineering", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020637/1KfRI7ZM8Ja", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/skg/2017/2558/0/255801a208", "title": "Research on Equipment Knowledge Representation Based on Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/skg/2017/255801a208/1i5m8aChW6c", "parentPublication": { "id": "proceedings/skg/2017/2558/0", "title": "2017 13th International Conference on Semantics, Knowledge and Grids (SKG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mco2013070047", "articleId": "13rRUxAStVJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "mco2013070062", "articleId": "13rRUytF44y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRY5", "name": "mco2013070056s1.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/mco2013070056s1.pdf", "extension": "pdf", "size": "14.2 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvsDHDY", "title": "Jan.", "year": "2020", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1cG4z3ZA4k8", "doi": "10.1109/TVCG.2019.2934282", "abstract": "Data collection and analysis in the field is critical for operations in domains such as environmental science and public safety. However, field workers currently face dataand platform-oriented issues in efficient data collection and analysis in the field, such as limited connectivity, screen space, and attentional resources. In this paper, we explore how visual analytics tools might transform field practices by more deeply integrating data into these operations. We use a design probe coupling mobile, cloud, and immersive analytics components to guide interviews with ten experts from five domains to explore how visual analytics could support data collection and analysis needs in the field. The results identify shortcomings of current approaches and target scenarios and design considerations for future field analysis systems. We embody these findings in FieldView, an extensible, open-source prototype designed to support critical use cases for situated field analysis. Our findings suggest the potential for integrating mobile and immersive technologies to enhance data's utility for various field operations and new directions for visual analytics tools to transform fieldwork.", "abstracts": [ { "abstractType": "Regular", "content": "Data collection and analysis in the field is critical for operations in domains such as environmental science and public safety. However, field workers currently face dataand platform-oriented issues in efficient data collection and analysis in the field, such as limited connectivity, screen space, and attentional resources. In this paper, we explore how visual analytics tools might transform field practices by more deeply integrating data into these operations. We use a design probe coupling mobile, cloud, and immersive analytics components to guide interviews with ten experts from five domains to explore how visual analytics could support data collection and analysis needs in the field. The results identify shortcomings of current approaches and target scenarios and design considerations for future field analysis systems. We embody these findings in FieldView, an extensible, open-source prototype designed to support critical use cases for situated field analysis. Our findings suggest the potential for integrating mobile and immersive technologies to enhance data's utility for various field operations and new directions for visual analytics tools to transform fieldwork.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Data collection and analysis in the field is critical for operations in domains such as environmental science and public safety. However, field workers currently face dataand platform-oriented issues in efficient data collection and analysis in the field, such as limited connectivity, screen space, and attentional resources. In this paper, we explore how visual analytics tools might transform field practices by more deeply integrating data into these operations. We use a design probe coupling mobile, cloud, and immersive analytics components to guide interviews with ten experts from five domains to explore how visual analytics could support data collection and analysis needs in the field. The results identify shortcomings of current approaches and target scenarios and design considerations for future field analysis systems. We embody these findings in FieldView, an extensible, open-source prototype designed to support critical use cases for situated field analysis. Our findings suggest the potential for integrating mobile and immersive technologies to enhance data's utility for various field operations and new directions for visual analytics tools to transform fieldwork.", "title": "Designing for Mobile and Immersive Visual Analytics in the Field", "normalizedTitle": "Designing for Mobile and Immersive Visual Analytics in the Field", "fno": "08805467", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cloud Computing", "Data Analysis", "Data Visualisation", "Interactive Systems", "Mobile Computing", "Cloud Analytics", "Field View", "Data Analysis", "Mobile Visual Analytics", "Data Integration", "Data Collection", "Public Safety", "Environmental Science", "Immersive Visual Analytics", "Data Visualization", "Data Collection", "Tools", "Decision Making", "Mobile Handsets", "Visual Analytics", "Emergency Services", "Immersive Analytics", "Augmented Reality", "Mobile Visualization", "Outdoor Visualization", "Emergency Response" ], "authors": [ { "givenName": "Matt", "surname": "Whitlock", "fullName": "Matt Whitlock", "affiliation": "University of Colorado", "__typename": "ArticleAuthorType" }, { "givenName": "Keke", "surname": "Wu", "fullName": "Keke Wu", "affiliation": "University of Colorado", "__typename": "ArticleAuthorType" }, { "givenName": "Danielle Albers", "surname": "Szafir", "fullName": "Danielle Albers Szafir", "affiliation": "University of Colorado", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2020-01-01 00:00:00", "pubType": "trans", "pages": "503-513", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2011/9618/0/05718616", "title": "Pair Analytics: Capturing Reasoning Processes in Collaborative Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2011/05718616/12OmNvAiShB", "parentPublication": { "id": "proceedings/hicss/2011/9618/0", "title": "2011 44th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2015/7343/0/07314296", "title": "Immersive Analytics", "doi": null, "abstractUrl": "/proceedings-article/bdva/2015/07314296/12OmNzVXNSO", "parentPublication": { "id": "proceedings/bdva/2015/7343/0", "title": "2015 Big Data Visual Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/07/mco2013070047", "title": "Real-Time Visual Analytics for Text Streams", "doi": null, "abstractUrl": "/magazine/co/2013/07/mco2013070047/13rRUxAStVJ", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/03/08698351", "title": "Immersive Analytics", "doi": null, "abstractUrl": "/magazine/cg/2019/03/08698351/19utOsQX9Nm", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a155", "title": "HybridAxes: An Immersive Analytics Tool With Interoperability Between 2D and Immersive Reality Modes", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a155/1J7Wc83LEUo", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/03/08770302", "title": "Survey of Immersive Analytics", "doi": null, "abstractUrl": "/journal/tg/2021/03/08770302/1bTRatYkzoA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a216", "title": "Compositional Microservices for Immersive Social Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a216/1cMFalENINq", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08423105", "title": "Commercial Visual Analytics Systems&#x2013;Advances in the Big Data Analytics Field", "doi": null, "abstractUrl": "/journal/tg/2019/10/08423105/1cYd7bZMLp6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a515", "title": "Knowledge-Driven Framework for Designing Visual Analytics Applications", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a515/1rSRbNnQM5q", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/04/09487524", "title": "The Value of Immersive Visualization", "doi": null, "abstractUrl": "/magazine/cg/2021/04/09487524/1vg3n8rdAEU", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08794607", "articleId": "1cumYxIQ9Ve", "__typename": "AdjacentArticleType" }, "next": { "fno": "08854316", "articleId": "1dM2fkHbAVa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1i4z6O9xsKA", "name": "ttg202001-08805467s1.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202001-08805467s1.mp4", "extension": "mp4", "size": "57 MB", "__typename": "WebExtraType" }, { "id": "1i4QkRUwCqY", "name": "ttg202001-08805467s2.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202001-08805467s2.pdf", "extension": "pdf", "size": "408 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwFid7w", "title": "Jan.", "year": "2019", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WHONjL", "doi": "10.1109/TVCG.2018.2864844", "abstract": "Economic globalization is increasing connectedness among regions of the world, creating complex interdependencies within various supply chains. Recent studies have indicated that changes and disruptions within such networks can serve as indicators for increased risks of violence and armed conflicts. This is especially true of countries that may not be able to compete for scarce commodities during supply shocks. Thus, network-induced vulnerability to supply disruption is typically exported from wealthier populations to disadvantaged populations. As such, researchers and stakeholders concerned with supply chains, political science, environmental studies, etc. need tools to explore the complex dynamics within global trade networks and how the structure of these networks relates to regional instability. However, the multivariate, spatiotemporal nature of the network structure creates a bottleneck in the extraction and analysis of correlations and anomalies for exploratory data analysis and hypothesis generation. Working closely with experts in political science and sustainability, we have developed a highly coordinated, multi-view framework that utilizes anomaly detection, network analytics, and spatiotemporal visualization methods for exploring the relationship between global trade networks and regional instability. Requirements for analysis and initial research questions to be investigated are elicited from domain experts, and a variety of visual encoding techniques for rapid assessment of analysis and correlations between trade goods, network patterns, and time series signatures are explored. We demonstrate the application of our framework through case studies focusing on armed conflicts in Africa, regional instability measures, and their relationship to international global trade.", "abstracts": [ { "abstractType": "Regular", "content": "Economic globalization is increasing connectedness among regions of the world, creating complex interdependencies within various supply chains. Recent studies have indicated that changes and disruptions within such networks can serve as indicators for increased risks of violence and armed conflicts. This is especially true of countries that may not be able to compete for scarce commodities during supply shocks. Thus, network-induced vulnerability to supply disruption is typically exported from wealthier populations to disadvantaged populations. As such, researchers and stakeholders concerned with supply chains, political science, environmental studies, etc. need tools to explore the complex dynamics within global trade networks and how the structure of these networks relates to regional instability. However, the multivariate, spatiotemporal nature of the network structure creates a bottleneck in the extraction and analysis of correlations and anomalies for exploratory data analysis and hypothesis generation. Working closely with experts in political science and sustainability, we have developed a highly coordinated, multi-view framework that utilizes anomaly detection, network analytics, and spatiotemporal visualization methods for exploring the relationship between global trade networks and regional instability. Requirements for analysis and initial research questions to be investigated are elicited from domain experts, and a variety of visual encoding techniques for rapid assessment of analysis and correlations between trade goods, network patterns, and time series signatures are explored. We demonstrate the application of our framework through case studies focusing on armed conflicts in Africa, regional instability measures, and their relationship to international global trade.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Economic globalization is increasing connectedness among regions of the world, creating complex interdependencies within various supply chains. Recent studies have indicated that changes and disruptions within such networks can serve as indicators for increased risks of violence and armed conflicts. This is especially true of countries that may not be able to compete for scarce commodities during supply shocks. Thus, network-induced vulnerability to supply disruption is typically exported from wealthier populations to disadvantaged populations. As such, researchers and stakeholders concerned with supply chains, political science, environmental studies, etc. need tools to explore the complex dynamics within global trade networks and how the structure of these networks relates to regional instability. However, the multivariate, spatiotemporal nature of the network structure creates a bottleneck in the extraction and analysis of correlations and anomalies for exploratory data analysis and hypothesis generation. Working closely with experts in political science and sustainability, we have developed a highly coordinated, multi-view framework that utilizes anomaly detection, network analytics, and spatiotemporal visualization methods for exploring the relationship between global trade networks and regional instability. Requirements for analysis and initial research questions to be investigated are elicited from domain experts, and a variety of visual encoding techniques for rapid assessment of analysis and correlations between trade goods, network patterns, and time series signatures are explored. We demonstrate the application of our framework through case studies focusing on armed conflicts in Africa, regional instability measures, and their relationship to international global trade.", "title": "A Visual Analytics Framework for Spatiotemporal Trade Network Analysis", "normalizedTitle": "A Visual Analytics Framework for Spatiotemporal Trade Network Analysis", "fno": "08440040", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Economics", "Financial Data Processing", "Globalisation", "International Trade", "Time Series", "Regional Instability Measures", "International Global Trade", "Visual Analytics Framework", "Spatiotemporal Trade Network Analysis", "Economic Globalization", "Supply Chains", "Supply Shocks", "Network Induced Vulnerability", "Political Science", "Environmental Studies", "Global Trade Networks", "Multivariate Nature", "Spatiotemporal Nature", "Network Structure", "Exploratory Data Analysis", "Multiview Framework", "Network Analytics", "Spatiotemporal Visualization Methods", "Visual Encoding Techniques", "Trade Goods", "Data Visualization", "Visual Analytics", "Correlation", "Anomaly Detection", "Time Series Analysis", "Spatiotemporal Phenomena", "Global Trade Network", "Anomaly Detection", "Visual Analytics" ], "authors": [ { "givenName": "Hong", "surname": "Wang", "fullName": "Hong Wang", "affiliation": "Arizona State University", "__typename": "ArticleAuthorType" }, { "givenName": "Yafeng", "surname": "Lu", "fullName": "Yafeng Lu", "affiliation": "Arizona State University", "__typename": "ArticleAuthorType" }, { "givenName": "Shade T.", "surname": "Shutters", "fullName": "Shade T. Shutters", "affiliation": "Arizona State University", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Steptoe", "fullName": "Michael Steptoe", "affiliation": "Arizona State University", "__typename": "ArticleAuthorType" }, { "givenName": "Feng", "surname": "Wang", "fullName": "Feng Wang", "affiliation": "GE Global Research", "__typename": "ArticleAuthorType" }, { "givenName": "Steven", "surname": "Landis", "fullName": "Steven Landis", "affiliation": "University of Nevada", "__typename": "ArticleAuthorType" }, { "givenName": "Ross", "surname": "Maciejewski", "fullName": "Ross Maciejewski", "affiliation": "Arizona State University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2019-01-01 00:00:00", "pubType": "trans", "pages": "331-341", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2012/4752/0/06400491", "title": "A correlative analysis process in a visual analytics environment", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400491/12OmNAkEU1K", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2016/1451/0/07465251", "title": "EnsembleGraph: Interactive visual analysis of spatiotemporal behaviors in ensemble simulation data", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2016/07465251/12OmNCh0Pb9", "parentPublication": { "id": "proceedings/pacificvis/2016/1451/0", "title": "2016 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsia/2017/2198/0/08339088", "title": "A client-based visual analytics framework for large spatiotemporal data under architectural constraints", "doi": null, "abstractUrl": "/proceedings-article/dsia/2017/08339088/12OmNrJAdU1", "parentPublication": { "id": "proceedings/dsia/2017/2198/0", "title": "2017 IEEE Workshop on Data Systems for Interactive Analysis (DSIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2014/6227/0/07042482", "title": "An insight- and task-based methodology for evaluating spatiotemporal visual analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042482/12OmNwp74wP", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875970", "title": "Proactive Spatiotemporal Resource Allocation and Predictive Visual Analytics for Community Policing and Law Enforcement", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875970/13rRUNvgyWo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/09/08037991", "title": "A Visual Analytics Framework for Identifying Topic Drivers in Media Events", "doi": null, "abstractUrl": "/journal/tg/2018/09/08037991/13rRUxASuhI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585564", "title": "Interactive Visual Analytics Application for Spatiotemporal Movement Data VAST Challenge 2017 Mini-Challenge 1: Award for Actionable and Detailed Analysis", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585564/17D45VsBU7R", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvisp/2021/0770/0/077000a296", "title": "Visual Analytics for the International Trade", "doi": null, "abstractUrl": "/proceedings-article/icvisp/2021/077000a296/1APq2QCw3n2", "parentPublication": { "id": "proceedings/icvisp/2021/0770/0", "title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2022/02/08950124", "title": "Visual Analytics of Anomalous User Behaviors: A Survey", "doi": null, "abstractUrl": "/journal/bd/2022/02/08950124/1gKwHIY8sAo", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09397369", "title": "Visual Cascade Analytics of Large-Scale Spatiotemporal Data", "doi": null, "abstractUrl": "/journal/tg/2022/06/09397369/1sA4WPUOESY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08440048", "articleId": "17D45W2WyxU", "__typename": "AdjacentArticleType" }, "next": { "fno": "08440085", "articleId": "17D45WaTkjk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1i4zrHs1alG", "name": "ttg201901-08440040s1.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201901-08440040s1.mp4", "extension": "mp4", "size": "34.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1M2Ido7rZde", "title": "May", "year": "2023", "issueNum": "05", "idPrefix": "tk", "pubType": "journal", "volume": "35", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1BWZdToFUVW", "doi": "10.1109/TKDE.2022.3154166", "abstract": "Detecting anomalies in large complex systems is a critical and challenging task. The difficulties arise from several aspects. First, collecting ground truth labels or prior knowledge for anomalies is hard in real-world systems, which often lead to limited or no anomaly labels in the dataset. Second, anomalies in large systems usually occur in a collective manner due to the underlying dependency structure among devices or sensors. Lastly, real-time anomaly detection for high-dimensional data requires efficient algorithms that are capable of handling different types of data (i.e. continuous and discrete). We propose a correlation structure-based collective anomaly detection (CSCAD) model for high-dimensional anomaly detection problem in large systems, which is also generalizable to semi-supervised or supervised settings. Our framework utilize graph convolutional network combining a variational autoencoder to jointly exploit the feature space correlation and reconstruction deficiency of samples to perform anomaly detection. We propose an extended mutual information (EMI) metric to mine the internal correlation structure among different data features, which enhances the data reconstruction capability of CSCAD. The reconstruction loss and latent standard deviation vector of a sample obtained from reconstruction network can be perceived as two natural anomalous degree measures. An anomaly discriminating network can then be trained using low anomalous degree samples as positive samples, and high anomalous degree samples as negative samples. Experimental results on five public datasets demonstrate that our approach consistently outperforms all the competing baselines.", "abstracts": [ { "abstractType": "Regular", "content": "Detecting anomalies in large complex systems is a critical and challenging task. The difficulties arise from several aspects. First, collecting ground truth labels or prior knowledge for anomalies is hard in real-world systems, which often lead to limited or no anomaly labels in the dataset. Second, anomalies in large systems usually occur in a collective manner due to the underlying dependency structure among devices or sensors. Lastly, real-time anomaly detection for high-dimensional data requires efficient algorithms that are capable of handling different types of data (i.e. continuous and discrete). We propose a correlation structure-based collective anomaly detection (CSCAD) model for high-dimensional anomaly detection problem in large systems, which is also generalizable to semi-supervised or supervised settings. Our framework utilize graph convolutional network combining a variational autoencoder to jointly exploit the feature space correlation and reconstruction deficiency of samples to perform anomaly detection. We propose an extended mutual information (EMI) metric to mine the internal correlation structure among different data features, which enhances the data reconstruction capability of CSCAD. The reconstruction loss and latent standard deviation vector of a sample obtained from reconstruction network can be perceived as two natural anomalous degree measures. An anomaly discriminating network can then be trained using low anomalous degree samples as positive samples, and high anomalous degree samples as negative samples. Experimental results on five public datasets demonstrate that our approach consistently outperforms all the competing baselines.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Detecting anomalies in large complex systems is a critical and challenging task. The difficulties arise from several aspects. First, collecting ground truth labels or prior knowledge for anomalies is hard in real-world systems, which often lead to limited or no anomaly labels in the dataset. Second, anomalies in large systems usually occur in a collective manner due to the underlying dependency structure among devices or sensors. Lastly, real-time anomaly detection for high-dimensional data requires efficient algorithms that are capable of handling different types of data (i.e. continuous and discrete). We propose a correlation structure-based collective anomaly detection (CSCAD) model for high-dimensional anomaly detection problem in large systems, which is also generalizable to semi-supervised or supervised settings. Our framework utilize graph convolutional network combining a variational autoencoder to jointly exploit the feature space correlation and reconstruction deficiency of samples to perform anomaly detection. We propose an extended mutual information (EMI) metric to mine the internal correlation structure among different data features, which enhances the data reconstruction capability of CSCAD. The reconstruction loss and latent standard deviation vector of a sample obtained from reconstruction network can be perceived as two natural anomalous degree measures. An anomaly discriminating network can then be trained using low anomalous degree samples as positive samples, and high anomalous degree samples as negative samples. Experimental results on five public datasets demonstrate that our approach consistently outperforms all the competing baselines.", "title": "CSCAD: Correlation Structure-Based Collective Anomaly Detection in Complex System", "normalizedTitle": "CSCAD: Correlation Structure-Based Collective Anomaly Detection in Complex System", "fno": "09740038", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Convolutional Neural Nets", "Data Handling", "Feature Extraction", "Graph Neural Networks", "Semi Supervised Learning Artificial Intelligence", "Anomaly Discriminating Network", "Anomaly Labels", "Complex System", "Correlation Structure Based Collective Anomaly Detection Model", "Critical Task", "CSCAD", "Data Features", "Data Reconstruction Capability", "EMI", "Extended Mutual Information", "Feature Space Correlation", "Framework Utilize Graph Convolutional Network", "Ground Truth Labels", "High Dimensional Anomaly Detection Problem", "High Dimensional Data", "Internal Correlation Structure", "Latent Standard Deviation Vector", "Real Time Anomaly Detection", "Supervised Settings", "Anomaly Detection", "Correlation", "Sensors", "Feature Extraction", "Data Models", "Loss Measurement", "Complex Systems", "Anomaly Detection", "Complex System", "Correlation Mining", "Unsupervised Learning", "Urban Computing", "Variational Autoencoder" ], "authors": [ { "givenName": "Huiling", "surname": "Qin", "fullName": "Huiling Qin", "affiliation": "Xidian University and JD iCity, JD Technology, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xianyuan", "surname": "Zhan", "fullName": "Xianyuan Zhan", "affiliation": "Institute for AI Industry Research (AIR), Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yu", "surname": "Zheng", "fullName": "Yu Zheng", "affiliation": "Xidian University and JD iCity, JD Technology, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2023-05-01 00:00:00", "pubType": "trans", "pages": "4634-4645", "year": "2023", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/pacificvis/2018/1424/0/142401a150", "title": "Visual Analysis of Collective Anomalies Through High-Order Correlation Graph", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2018/142401a150/12OmNym2c5B", "parentPublication": { "id": "proceedings/pacificvis/2018/1424/0", "title": "2018 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g479", "title": "Real-World Anomaly Detection in Surveillance Videos", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g479/17D45Wuc32Y", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956420", "title": "Anomaly Detection via Learnable Pretext Task", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956420/1IHqECC9zWg", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mass/2022/7180/0/718000a530", "title": "Anomaly Detection for CPS via Memory-Augmented Reconstruction and Time Series Prediction", "doi": null, "abstractUrl": "/proceedings-article/mass/2022/718000a530/1JeE6wlcidy", "parentPublication": { "id": "proceedings/mass/2022/7180/0", "title": "2022 IEEE 19th International Conference on Mobile Ad Hoc and Smart Systems (MASS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102818", "title": "Glad: Global And Local Anomaly Detection", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102818/1kwrlp2eJVK", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09468958", "title": "Interpretable Anomaly Detection in Event Sequences via Sequence Matching and Visual Comparison", "doi": null, "abstractUrl": "/journal/tg/2022/12/09468958/1uR9IWtyEi4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428386", "title": "Cross-Scene Person Trajectory Anomaly Detection Based on Re-Identification", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428386/1uim16kMRVu", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccns/2021/2711/0/271100a098", "title": "Anomaly Detection with Partially Observed Anomaly Types", "doi": null, "abstractUrl": "/proceedings-article/ccns/2021/271100a098/1xIOHzCf4Uo", "parentPublication": { "id": "proceedings/ccns/2021/2711/0", "title": "2021 2nd International Conference on Computer Communication and Network Security (CCNS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100a207", "title": "Synthetic Temporal Anomaly Guided End-to-End Video Anomaly Detection", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100a207/1yNhrPQeCfm", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/04/09669010", "title": "Time Series Anomaly Detection With Adversarial Reconstruction Networks", "doi": null, "abstractUrl": "/journal/tk/2023/04/09669010/1zTfSYY68Za", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09714871", "articleId": "1B2CSl8NTOM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09705125", "articleId": "1AII2mzg5eo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyGtjf5", "title": "April", "year": "2019", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17YCN4eFKwN", "doi": "10.1109/TVCG.2018.2818156", "abstract": "We propose an end-to-end solution for presenting movie quality animated graphics to the user while still allowing the sense of presence afforded by free viewpoint head motion. By transforming offline rendered movie content into a novel immersive representation, we display the content in real-time according to the tracked head pose. For each frame, we generate a set of cubemap images per frame (colors and depths) using a sparse set of of cameras placed in the vicinity of the potential viewer locations. The cameras are placed with an optimization process so that the rendered data maximise coverage with minimum redundancy, depending on the lighting environment complexity. We compress the colors and depths separately, introducing an integrated spatial and temporal scheme tailored to high performance on GPUs for Virtual Reality applications. A view-dependent decompression algorithm decodes only the parts of the compressed video streams that are visible to users. We detail a real-time rendering algorithm using multi-view ray casting, with a variant that can handle strong view dependent effects such as mirror surfaces and glass. Compression rates of 150:1 and greater are demonstrated with quantitative analysis of image reconstruction quality and performance.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an end-to-end solution for presenting movie quality animated graphics to the user while still allowing the sense of presence afforded by free viewpoint head motion. By transforming offline rendered movie content into a novel immersive representation, we display the content in real-time according to the tracked head pose. For each frame, we generate a set of cubemap images per frame (colors and depths) using a sparse set of of cameras placed in the vicinity of the potential viewer locations. The cameras are placed with an optimization process so that the rendered data maximise coverage with minimum redundancy, depending on the lighting environment complexity. We compress the colors and depths separately, introducing an integrated spatial and temporal scheme tailored to high performance on GPUs for Virtual Reality applications. A view-dependent decompression algorithm decodes only the parts of the compressed video streams that are visible to users. We detail a real-time rendering algorithm using multi-view ray casting, with a variant that can handle strong view dependent effects such as mirror surfaces and glass. Compression rates of 150:1 and greater are demonstrated with quantitative analysis of image reconstruction quality and performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an end-to-end solution for presenting movie quality animated graphics to the user while still allowing the sense of presence afforded by free viewpoint head motion. By transforming offline rendered movie content into a novel immersive representation, we display the content in real-time according to the tracked head pose. For each frame, we generate a set of cubemap images per frame (colors and depths) using a sparse set of of cameras placed in the vicinity of the potential viewer locations. The cameras are placed with an optimization process so that the rendered data maximise coverage with minimum redundancy, depending on the lighting environment complexity. We compress the colors and depths separately, introducing an integrated spatial and temporal scheme tailored to high performance on GPUs for Virtual Reality applications. A view-dependent decompression algorithm decodes only the parts of the compressed video streams that are visible to users. We detail a real-time rendering algorithm using multi-view ray casting, with a variant that can handle strong view dependent effects such as mirror surfaces and glass. Compression rates of 150:1 and greater are demonstrated with quantitative analysis of image reconstruction quality and performance.", "title": "Compressed Animated Light Fields with Real-Time View-Dependent Reconstruction", "normalizedTitle": "Compressed Animated Light Fields with Real-Time View-Dependent Reconstruction", "fno": "08322310", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Animation", "Data Compression", "Data Visualisation", "Image Coding", "Image Reconstruction", "Rendering Computer Graphics", "Video Streaming", "Virtual Reality", "Temporal Scheme", "View Dependent Decompression Algorithm", "Compressed Video Streams", "Real Time Rendering Algorithm", "Multiview Ray Casting", "Strong View Dependent Effects", "Compression Rates", "Image Reconstruction Quality", "Animated Light Fields", "Real Time View Dependent Reconstruction", "Movie Quality", "Free Viewpoint Head Motion", "Movie Content", "Novel Immersive Representation", "Tracked Head", "Cubemap Images", "Sparse Set", "Cameras", "Optimization Process", "Rendered Data Maximise Coverage", "Lighting Environment Complexity", "Integrated Spatial", "Virtual Reality Applications", "Viewer Locations", "Image Reconstruction Performance", "Compressed Animated Light Fields", "Rendering Computer Graphics", "Real Time Systems", "Image Reconstruction", "Cameras", "Streaming Media", "Probes", "Image Color Analysis", "Image Based Rendering", "Video Compression", "Light Field Rendering", "Multi View" ], "authors": [ { "givenName": "Charalampos", "surname": "Koniaris", "fullName": "Charalampos Koniaris", "affiliation": "Edinburgh University, Edinburgh, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Maggie", "surname": "Kosek", "fullName": "Maggie Kosek", "affiliation": "Napier University, Edinburgh, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "David", "surname": "Sinclair", "fullName": "David Sinclair", "affiliation": "Edinburgh, Midlothian, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Kenny", "surname": "Mitchell", "fullName": "Kenny Mitchell", "affiliation": "Napier University, Edinburgh, United Kingdom", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2019-04-01 00:00:00", "pubType": "trans", "pages": "1666-1680", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2017/6067/0/08019417", "title": "Novel view synthesis with light-weight view-dependent texture mapping for a stereoscopic HMD", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019417/12OmNx8wTwx", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2015/8020/0/07450419", "title": "View-Dependent Projective Atlases", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2015/07450419/12OmNxWuihn", "parentPublication": { "id": "proceedings/cad-graphics/2015/8020/0", "title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2004/8603/3/01394702", "title": "Reconstructing dense light field from a multi-focus images array", "doi": null, "abstractUrl": "/proceedings-article/icme/2004/01394702/12OmNyRxFDN", "parentPublication": { "id": "proceedings/icme/2004/8603/3", "title": "2004 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032c262", "title": "Learning to Synthesize a 4D RGBD Light Field from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c262/12OmNzmclkx", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/06/ttg2010061578", "title": "View-Dependent Streamlines for 3D Vector Fields", "doi": null, "abstractUrl": "/journal/tg/2010/06/ttg2010061578/13rRUxASuGd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g267", "title": "Inferring Light Fields from Shadows", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g267/17D45XvMcaB", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2943", "title": "Towards Multimodal Depth Estimation from Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2943/1H1k4uRP4sM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a584", "title": "PanoSynthVR: Toward Light-weight 360-Degree View Synthesis from a Single Panoramic Input", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a584/1JrQVOFAlhu", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c552", "title": "Learning Fused Pixel and Feature-Based View Reconstructions for Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c552/1m3o6tGQqMo", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2021/1952/0/09466274", "title": "View-dependent Scene Appearance Synthesis using Inverse Rendering from Light Fields", "doi": null, "abstractUrl": "/proceedings-article/iccp/2021/09466274/1uSSV7tRhSw", "parentPublication": { "id": "proceedings/iccp/2021/1952/0", "title": "2021 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08322258", "articleId": "17YCN5E6cAE", "__typename": "AdjacentArticleType" }, "next": { "fno": "08322185", "articleId": "17YCN2UVh4c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAtstbj", "title": "Sept.", "year": "2012", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "18", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUx0xPTP", "doi": "10.1109/TVCG.2011.276", "abstract": "A curvature-adaptive implicit surface reconstruction for noisy and irregularly spaced points in 3D is introduced. The reconstructed surface traces the zero crossings of a signed field obtained from the sum of first-derivative anisotropic Gaussians centered at the points. The standard deviations of the anisotropic Gaussians are adapted to surface curvatures estimated from local data. A key characteristic of the formulation is its ability to smooth more along edges than across them, thereby preserving shape details while smoothing noise. The behavior of the proposed method under various density and organization of points is investigated and surface reconstruction results are compared with those obtained by well-known methods in the literature.", "abstracts": [ { "abstractType": "Regular", "content": "A curvature-adaptive implicit surface reconstruction for noisy and irregularly spaced points in 3D is introduced. The reconstructed surface traces the zero crossings of a signed field obtained from the sum of first-derivative anisotropic Gaussians centered at the points. The standard deviations of the anisotropic Gaussians are adapted to surface curvatures estimated from local data. A key characteristic of the formulation is its ability to smooth more along edges than across them, thereby preserving shape details while smoothing noise. The behavior of the proposed method under various density and organization of points is investigated and surface reconstruction results are compared with those obtained by well-known methods in the literature.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A curvature-adaptive implicit surface reconstruction for noisy and irregularly spaced points in 3D is introduced. The reconstructed surface traces the zero crossings of a signed field obtained from the sum of first-derivative anisotropic Gaussians centered at the points. The standard deviations of the anisotropic Gaussians are adapted to surface curvatures estimated from local data. A key characteristic of the formulation is its ability to smooth more along edges than across them, thereby preserving shape details while smoothing noise. The behavior of the proposed method under various density and organization of points is investigated and surface reconstruction results are compared with those obtained by well-known methods in the literature.", "title": "A Curvature-Adaptive Implicit Surface Reconstruction for Irregularly Spaced Points", "normalizedTitle": "A Curvature-Adaptive Implicit Surface Reconstruction for Irregularly Spaced Points", "fno": "06081858", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Solid Modelling", "Gaussian Processes", "Smoothing Methods", "Computer Graphics", "Curvature Adaptive Implicit Surface Reconstruction", "Irregularly Spaced Points", "Noisy Spaced Points", "3 D Surface Reconstruction", "Signed Field Zero Crossings", "First Derivative Anisotropic Gaussians", "Standard Deviations", "Surface Curvatures", "Local Data", "Shape Details Preservation", "Noise Smoothing", "Surface Reconstruction", "Surface Treatment", "Shape", "Image Reconstruction", "Smoothing Methods", "Surface Roughness", "Rough Surfaces", "Smoothness Parameter", "Computer Graphics", "Surface Reconstruction", "Implicit Surface", "Point Cloud" ], "authors": [ { "givenName": "A. A.", "surname": "Goshtasby", "fullName": "A. A. Goshtasby", "affiliation": "Dept. of Comput. Sci. & Eng., Wright State Univ., Dayton, OH, USA", "__typename": "ArticleAuthorType" }, { "givenName": "L. G.", "surname": "Zagorchev", "fullName": "L. G. Zagorchev", "affiliation": "Thayer Sch. of Eng., Dartmouth Coll., Hanover, NH, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2012-09-01 00:00:00", "pubType": "trans", "pages": "1460-1473", "year": "2012", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isspit/2007/1834/0/04458028", "title": "A Framework for Implicit Surfaces Reconstruction form Large Clouds of Points", "doi": null, "abstractUrl": "/proceedings-article/isspit/2007/04458028/12OmNALCNso", "parentPublication": { "id": "proceedings/isspit/2007/1834/0", "title": "2007 IEEE International Symposium on Signal Processing and Information Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wkdd/2010/5397/0/05432585", "title": "Scattered Points Denoising of TC-Bézier Surface Fitting", "doi": null, "abstractUrl": "/proceedings-article/wkdd/2010/05432585/12OmNAlNiMA", "parentPublication": { "id": "proceedings/wkdd/2010/5397/0", "title": "2010 3rd International Conference on Knowledge Discovery and Data Mining (WKDD 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970055", "title": "Spiraling Edge: Fast Surface Reconstruction from Partially Organized Sample Points", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970055/12OmNAoDhVE", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2008/3278/0/3278a913", "title": "Incomplete Points Cloud Data Surface Reconstruction Based on Neural Network", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2008/3278a913/12OmNBTawwc", "parentPublication": { "id": "proceedings/iih-msp/2008/3278/0", "title": "2008 Fourth International Conference on Intelligent Information Hiding and Multimedia Signal Processing (IIH-MSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isdea/2013/4893/0/06456227", "title": "Estimating Discrete Surface Curvature Based on Voronoi Poles", "doi": null, "abstractUrl": "/proceedings-article/isdea/2013/06456227/12OmNvT2oWn", "parentPublication": { "id": "proceedings/isdea/2013/4893/0", "title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvd/2010/4112/0/4112a042", "title": "Nearest-Neighbor Queries with Well-Spaced Points", "doi": null, "abstractUrl": "/proceedings-article/isvd/2010/4112a042/12OmNz61cX6", "parentPublication": { "id": "proceedings/isvd/2010/4112/0", "title": "2010 International Symposium on Voronoi Diagrams in Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300067", "title": "Curvature-Based Transfer Functions for Direct Volume Rendering: Methods and Applications", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300067/12OmNz61d84", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2001/02/mcg2001020060", "title": "Curvature-Dependent Triangulation of Implicit Surfaces", "doi": null, "abstractUrl": "/magazine/cg/2001/02/mcg2001020060/13rRUxk89gB", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e319", "title": "Recovering Fine Details for Neural Implicit Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e319/1KxUSVbk6He", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10113186", "title": "Patching Non-Uniform Extraordinary Points", "doi": null, "abstractUrl": "/journal/tg/5555/01/10113186/1MNbNVYb4sw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06095545", "articleId": "13rRUxASu0J", "__typename": "AdjacentArticleType" }, "next": { "fno": "06109250", "articleId": "13rRUwcAqqe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxI0KAN", "title": "May", "year": "2014", "issueNum": "05", "idPrefix": "tp", "pubType": "journal", "volume": "36", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvPLaQ", "doi": "10.1109/TPAMI.2013.179", "abstract": "This paper presents a novel approach that achieves dynamic surface alignment by geodesing mapping. The surfaces are 3D manifold meshes representing non-rigid objects in motion (e.g., humans) which can be obtained by multiview stereo reconstruction. The proposed framework consists of a geodesic mapping (i.e., geodesic diffeomorphism) between surfaces which carry a distance function (namely the global geodesic distance), and a geodesic-based coordinate system (namely the global geodesic coordinates) defined similarly to generalized barycentric coordinates. The coordinates are used to recursively choose correspondence points in non-ambiguous regions using a coarse-to-fine strategy to reliably locate all surface points and define a discrete mapping. Complete point-to-point surface alignment with smooth mapping is then derived by optimizing a piecewise objective function within a probabilistic framework. The proposed technique only relies on surface intrinsic geometrical properties, and does not require prior knowledge on surface appearance (e.g., color or texture), shape (e.g., topology) or parameterization (e.g., mesh connectivity or complexity). The method can be used for numerous applications, such as visual information (e.g., texture) transfer between surface models representing different objects, dense motion flow estimation of 3D dynamic surfaces, wide-timeframe matching, etc. Experiments show compelling results on challenging publicly available real-world datasets.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel approach that achieves dynamic surface alignment by geodesing mapping. The surfaces are 3D manifold meshes representing non-rigid objects in motion (e.g., humans) which can be obtained by multiview stereo reconstruction. The proposed framework consists of a geodesic mapping (i.e., geodesic diffeomorphism) between surfaces which carry a distance function (namely the global geodesic distance), and a geodesic-based coordinate system (namely the global geodesic coordinates) defined similarly to generalized barycentric coordinates. The coordinates are used to recursively choose correspondence points in non-ambiguous regions using a coarse-to-fine strategy to reliably locate all surface points and define a discrete mapping. Complete point-to-point surface alignment with smooth mapping is then derived by optimizing a piecewise objective function within a probabilistic framework. The proposed technique only relies on surface intrinsic geometrical properties, and does not require prior knowledge on surface appearance (e.g., color or texture), shape (e.g., topology) or parameterization (e.g., mesh connectivity or complexity). The method can be used for numerous applications, such as visual information (e.g., texture) transfer between surface models representing different objects, dense motion flow estimation of 3D dynamic surfaces, wide-timeframe matching, etc. Experiments show compelling results on challenging publicly available real-world datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel approach that achieves dynamic surface alignment by geodesing mapping. The surfaces are 3D manifold meshes representing non-rigid objects in motion (e.g., humans) which can be obtained by multiview stereo reconstruction. The proposed framework consists of a geodesic mapping (i.e., geodesic diffeomorphism) between surfaces which carry a distance function (namely the global geodesic distance), and a geodesic-based coordinate system (namely the global geodesic coordinates) defined similarly to generalized barycentric coordinates. The coordinates are used to recursively choose correspondence points in non-ambiguous regions using a coarse-to-fine strategy to reliably locate all surface points and define a discrete mapping. Complete point-to-point surface alignment with smooth mapping is then derived by optimizing a piecewise objective function within a probabilistic framework. The proposed technique only relies on surface intrinsic geometrical properties, and does not require prior knowledge on surface appearance (e.g., color or texture), shape (e.g., topology) or parameterization (e.g., mesh connectivity or complexity). The method can be used for numerous applications, such as visual information (e.g., texture) transfer between surface models representing different objects, dense motion flow estimation of 3D dynamic surfaces, wide-timeframe matching, etc. Experiments show compelling results on challenging publicly available real-world datasets.", "title": "Geodesic Mapping for Dynamic Surface Alignment", "normalizedTitle": "Geodesic Mapping for Dynamic Surface Alignment", "fno": "06605689", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Stereo Image Processing", "Differential Geometry", "Image Reconstruction", "Probability", "Surface Intrinsic Geometrical Properties", "Geodesic Mapping", "Dynamic Surface Alignment", "3 D Manifold Mesh", "Multiview Stereo Reconstruction", "Geodesic Diffeomorphism", "Distance Function", "Global Geodesic Distance", "Geodesic Based Coordinate System", "Global Geodesic Coordinates", "Generalized Barycentric Coordinates", "Coarse To Fine Strategy", "Discrete Mapping", "Point To Point Surface Alignment", "Smooth Mapping", "Piecewise Objective Function Optimization", "Probabilistic Framework", "Surface Reconstruction", "Three Dimensional Displays", "Surface Treatment", "Surface Texture", "Shape", "Topology", "Robustness", "MRF", "Geodesic Mapping", "Non Rigid Surface Alignment", "Dynamic Surface", "Multiple View Stereo", "MRF", "Geodesic Mapping", "Surface Alignment", "Dynamic Surface", "Non Rigid Deformation", "3 D Video" ], "authors": [ { "givenName": "Tony", "surname": "Tung", "fullName": "Tony Tung", "affiliation": "Grad. Sch. of Inf., Kyoto Univ., Kyoto, Japan", "__typename": "ArticleAuthorType" }, { "givenName": "Takashi", "surname": "Matsuyama", "fullName": "Takashi Matsuyama", "affiliation": "Grad. Sch. of Inf., Kyoto Univ., Kyoto, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2014-05-01 00:00:00", "pubType": "trans", "pages": "901-913", "year": "2014", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2010/6984/0/05539806", "title": "Dynamic surface matching by geodesic mapping for 3D animation transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2010/05539806/12OmNAXxXiT", "parentPublication": { "id": "proceedings/cvpr/2010/6984/0", "title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2010/8420/0/05720356", "title": "Geotextures: A Multi-source Geodesic Distance Field Approach for Procedural Texturing of Complex Meshes", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2010/05720356/12OmNBOUxso", "parentPublication": { "id": "proceedings/sibgrapi/2010/8420/0", "title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532840", "title": "Topology-driven surface mappings with robust feature alignment", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532840/12OmNBgQFHv", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2014/4284/0/4284a173", "title": "Texture Mapping Based on Projection and Viewpoints", "doi": null, "abstractUrl": "/proceedings-article/icdh/2014/4284a173/12OmNvjgWVu", "parentPublication": { "id": "proceedings/icdh/2014/4284/0", "title": "2014 5th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032c344", "title": "3D Surface Detail Enhancement from a Single Normal Map", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c344/12OmNwCsdKG", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/05/07469384", "title": "Hyperbolic Harmonic Mapping for Surface Registration", "doi": null, "abstractUrl": "/journal/tp/2017/05/07469384/13rRUwd9CHh", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/10/ttg2012101664", "title": "A Triangulation-Invariant Method for Anisotropic Geodesic Map Computation on Surface Meshes", "doi": null, "abstractUrl": "/journal/tg/2012/10/ttg2012101664/13rRUwdIOUJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/02/v0181", "title": "Conformal Surface Parameterization for Texture Mapping", "doi": null, "abstractUrl": "/journal/tg/2000/02/v0181/13rRUxBJhFj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c916", "title": "Deep Marching Cubes: Learning Explicit Surface Representations", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c916/17D45WrVg22", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceic/2020/8573/0/857300a199", "title": "Algorithm realization and application of geodesic", "doi": null, "abstractUrl": "/proceedings-article/icceic/2020/857300a199/1rCguSbHGOA", "parentPublication": { "id": "proceedings/icceic/2020/8573/0", "title": "2020 International Conference on Computer Engineering and Intelligent Control (ICCEIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06613486", "articleId": "13rRUy2YLZD", "__typename": "AdjacentArticleType" }, "next": { "fno": "06626306", "articleId": "13rRUwbs1TK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqJHFwx", "title": "April", "year": "2016", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "22", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwvBy8W", "doi": "10.1109/TVCG.2016.2518099", "abstract": "Three-dimensional modeling has long been regarded as an ideal application for virtual reality (VR), but current VR-based 3D modeling tools suffer from two problems that limit creativity and applicability: (1) the lack of control for freehand modeling, and (2) the difficulty of starting from scratch. To address these challenges, we present Lift-Off, an immersive 3D interface for creating complex models with a controlled, handcrafted style. Artists start outside of VR with 2D sketches, which are then imported and positioned in VR. Then, using a VR interface built on top of image processing algorithms, 2D curves within the sketches are selected interactively and &#x201C;lifted&#x201D; into space to create a 3D scaffolding for the model. Finally, artists sweep surfaces along these curves to create 3D models. Evaluations are presented for both long-term users and for novices who each created a 3D sailboat model from the same starting sketch. Qualitative results are positive, with the visual style of the resulting models of animals and other organic subjects as well as architectural models matching what is possible with traditional fine art media. In addition, quantitative data from logging features built into the software are used to characterize typical tool use and suggest areas for further refinement of the interface.", "abstracts": [ { "abstractType": "Regular", "content": "Three-dimensional modeling has long been regarded as an ideal application for virtual reality (VR), but current VR-based 3D modeling tools suffer from two problems that limit creativity and applicability: (1) the lack of control for freehand modeling, and (2) the difficulty of starting from scratch. To address these challenges, we present Lift-Off, an immersive 3D interface for creating complex models with a controlled, handcrafted style. Artists start outside of VR with 2D sketches, which are then imported and positioned in VR. Then, using a VR interface built on top of image processing algorithms, 2D curves within the sketches are selected interactively and &#x201C;lifted&#x201D; into space to create a 3D scaffolding for the model. Finally, artists sweep surfaces along these curves to create 3D models. Evaluations are presented for both long-term users and for novices who each created a 3D sailboat model from the same starting sketch. Qualitative results are positive, with the visual style of the resulting models of animals and other organic subjects as well as architectural models matching what is possible with traditional fine art media. In addition, quantitative data from logging features built into the software are used to characterize typical tool use and suggest areas for further refinement of the interface.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Three-dimensional modeling has long been regarded as an ideal application for virtual reality (VR), but current VR-based 3D modeling tools suffer from two problems that limit creativity and applicability: (1) the lack of control for freehand modeling, and (2) the difficulty of starting from scratch. To address these challenges, we present Lift-Off, an immersive 3D interface for creating complex models with a controlled, handcrafted style. Artists start outside of VR with 2D sketches, which are then imported and positioned in VR. Then, using a VR interface built on top of image processing algorithms, 2D curves within the sketches are selected interactively and “lifted” into space to create a 3D scaffolding for the model. Finally, artists sweep surfaces along these curves to create 3D models. Evaluations are presented for both long-term users and for novices who each created a 3D sailboat model from the same starting sketch. Qualitative results are positive, with the visual style of the resulting models of animals and other organic subjects as well as architectural models matching what is possible with traditional fine art media. In addition, quantitative data from logging features built into the software are used to characterize typical tool use and suggest areas for further refinement of the interface.", "title": "Lift-Off: Using Reference Imagery and Freehand Sketching to Create 3D Models in VR", "normalizedTitle": "Lift-Off: Using Reference Imagery and Freehand Sketching to Create 3D Models in VR", "fno": "07383322", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Art", "Image Processing", "User Interfaces", "Virtual Reality", "Reference Imagery", "Freehand Sketching", "Virtual Reality", "VR Based 3 D Modeling Tool", "Lift Off", "Scratch", "Immersive 3 D Interface", "Handcrafted Style", "2 D Sketch", "VR Interface", "Image Processing Algorithm", "2 D Curve", "3 D Scaffolding", "3 D Sailboat Model", "Three Dimensional Displays", "Solid Modeling", "Computational Modeling", "User Interfaces", "Surface Treatment", "Art", "Shape", "Immersive 3 D Modeling", "Virtual Reality", "3 D User Interfaces", "Sketch Based Modeling", "Immersive 3 D Modeling", "Virtual Reality", "3 D User Interfaces", "Sketch Based Modeling" ], "authors": [ { "givenName": "Bret", "surname": "Jackson", "fullName": "Bret Jackson", "affiliation": ", Macalester College", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel F.", "surname": "Keefe", "fullName": "Daniel F. Keefe", "affiliation": ", Macalester College", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2016-04-01 00:00:00", "pubType": "trans", "pages": "1442-1451", "year": "2016", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vr/2018/3365/0/08446595", "title": "Fluid Sketching&#x2015;Immersive Sketching Based on Fluid Flow", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446595/13bd1eOELL3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08421591", "title": "Model-Guided 3D Sketching", "doi": null, "abstractUrl": "/journal/tg/2019/10/08421591/13rRUEgs2Mb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/04/ttg2008040835", "title": "Scientific Sketching for Collaborative VR Visualization Design", "doi": null, "abstractUrl": "/journal/tg/2008/04/ttg2008040835/13rRUwI5UfX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2018/7315/0/731500a066", "title": "Enhancing Sketching and Sculpting for Shape Modeling", "doi": null, "abstractUrl": "/proceedings-article/cw/2018/731500a066/17D45WWzW7i", "parentPublication": { "id": "proceedings/cw/2018/7315/0", "title": "2018 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a968", "title": "Asymmetric interfaces with stylus and gesture for VR sketching", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a968/1CJdzTRQ9s4", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2019/2297/0/229700a155", "title": "Realistic Folded Surface Modeling from Sketching", "doi": null, "abstractUrl": "/proceedings-article/cw/2019/229700a155/1fHkotoz84w", "parentPublication": { "id": "proceedings/cw/2019/2297/0", "title": "2019 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visap/2020/8553/0/855300a019", "title": "Printmaking, Puzzles, and Studio Closets: Using Artistic Metaphors to Reimagine the User Interface for Designing Immersive Visualizations", "doi": null, "abstractUrl": "/proceedings-article/visap/2020/855300a019/1q7jxTJwiRO", "parentPublication": { "id": "proceedings/visap/2020/8553/0", "title": "2020 IEEE VIS Arts Program (VISAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a081", "title": "Towards 3D VR-Sketch to 3D Shape Retrieval", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a081/1qyxlDtR0Ji", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a826", "title": "Mid-Air Finger Sketching for Tree Modeling", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a826/1tuBbGEUWm4", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a329", "title": "BuildingSketch: Freehand Mid-Air Sketching for Building Modeling", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a329/1yeCWcklIfm", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07383324", "articleId": "13rRUwI5Ugg", "__typename": "AdjacentArticleType" }, "next": { "fno": "07383336", "articleId": "13rRUy0HYRt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwFid7w", "title": "Jan.", "year": "2019", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45Vw15xs", "doi": "10.1109/TVCG.2018.2864813", "abstract": "We present a formal approach to the visual analysis of recirculation in flows by introducing recirculation surfaces for 3D unsteady flow fields. Recirculation surfaces are the loci where massless particle integration returns to its starting point after some variable, finite integration. We give a rigorous definition of recirculation surfaces as 2-manifolds embedded in 5D space and study their properties. Based on this we construct an algorithm for their extraction, which searches for intersections of a recirculation surface with lines defined in 3D. This reduces the problem to a repeated search for critical points in 3D vector fields. We provide a uniform sampling of the search space paired with a surface reconstruction and visualize results. This way, we present the first algorithm for a comprehensive feature extraction in the 5D flow map of a 3D flow. The problem of finding isolated closed orbits in steady vector fields occurs as a special case of recirculation surfaces. This includes isolated closed orbits with saddle behavior. We show recirculation surfaces for a number of artificial and real flow data sets.", "abstracts": [ { "abstractType": "Regular", "content": "We present a formal approach to the visual analysis of recirculation in flows by introducing recirculation surfaces for 3D unsteady flow fields. Recirculation surfaces are the loci where massless particle integration returns to its starting point after some variable, finite integration. We give a rigorous definition of recirculation surfaces as 2-manifolds embedded in 5D space and study their properties. Based on this we construct an algorithm for their extraction, which searches for intersections of a recirculation surface with lines defined in 3D. This reduces the problem to a repeated search for critical points in 3D vector fields. We provide a uniform sampling of the search space paired with a surface reconstruction and visualize results. This way, we present the first algorithm for a comprehensive feature extraction in the 5D flow map of a 3D flow. The problem of finding isolated closed orbits in steady vector fields occurs as a special case of recirculation surfaces. This includes isolated closed orbits with saddle behavior. We show recirculation surfaces for a number of artificial and real flow data sets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a formal approach to the visual analysis of recirculation in flows by introducing recirculation surfaces for 3D unsteady flow fields. Recirculation surfaces are the loci where massless particle integration returns to its starting point after some variable, finite integration. We give a rigorous definition of recirculation surfaces as 2-manifolds embedded in 5D space and study their properties. Based on this we construct an algorithm for their extraction, which searches for intersections of a recirculation surface with lines defined in 3D. This reduces the problem to a repeated search for critical points in 3D vector fields. We provide a uniform sampling of the search space paired with a surface reconstruction and visualize results. This way, we present the first algorithm for a comprehensive feature extraction in the 5D flow map of a 3D flow. The problem of finding isolated closed orbits in steady vector fields occurs as a special case of recirculation surfaces. This includes isolated closed orbits with saddle behavior. We show recirculation surfaces for a number of artificial and real flow data sets.", "title": "Recirculation Surfaces for Flow Visualization", "normalizedTitle": "Recirculation Surfaces for Flow Visualization", "fno": "08440089", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computational Fluid Dynamics", "Computational Geometry", "Feature Extraction", "Flow Instability", "Flow Simulation", "Flow Visualisation", "Vectors", "Flow Visualization", "3 D Vector Fields", "Uniform Sampling", "Feature Extraction", "5 D Flow Map", "3 D Flow", "Saddle Behavior", "3 D Unsteady Flow Fields", "Recirculation Surface", "Time 5 0 D", "Three Dimensional Displays", "Orbits", "Visualization", "Surface Treatment", "Two Dimensional Displays", "Null Space", "Data Visualization", "Flow Visualization", "Recirculation", "Unsteady Flow" ], "authors": [ { "givenName": "Thomas", "surname": "Wilde", "fullName": "Thomas Wilde", "affiliation": "Visual Computing groupUniversity of Magdeburg", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "Rössi", "fullName": "Christian Rössi", "affiliation": "Visual Computing groupUniversity of Magdeburg", "__typename": "ArticleAuthorType" }, { "givenName": "Holger", "surname": "Theisel", "fullName": "Holger Theisel", "affiliation": "Visual Computing groupUniversity of Magdeburg", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2019-01-01 00:00:00", "pubType": "trans", "pages": "946-955", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/nicoint/2016/2305/0/2305a152", "title": "3D Simulator of a Rolling Baton on Cylindrical Surfaces", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2016/2305a152/12OmNAWH9DN", "parentPublication": { "id": "proceedings/nicoint/2016/2305/0", "title": "2016 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733b287", "title": "Generating 5D Light Fields in Scattering Media for Representing 3D Images", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b287/12OmNAndigF", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300018", "title": "Image Space Based Visualization of Unsteady Flow on Surfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300018/12OmNxH9Xhw", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cad-graphics/2013/2576/0/06815040", "title": "Illusory Motions on Surfaces", "doi": null, "abstractUrl": "/proceedings-article/cad-graphics/2013/06815040/12OmNxXCGKC", "parentPublication": { "id": "proceedings/cad-graphics/2013/2576/0", "title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/01/07102748", "title": "Gauge Invariant Framework for Shape Analysis of Surfaces", "doi": null, "abstractUrl": "/journal/tp/2016/01/07102748/13rRUwkxc6I", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2016/03/mcg2016030060", "title": "Evaluating Shape Alignment via Ensemble Visualization", "doi": null, "abstractUrl": "/magazine/cg/2016/03/mcg2016030060/13rRUxASu3j", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/08/08396300", "title": "Decal-Lenses: Interactive Lenses on Surfaces for Multivariate Visualization", "doi": null, "abstractUrl": "/journal/tg/2019/08/08396300/13rRUyeCkap", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823618", "title": "3De Interactive Lenses for Visualization in Virtual Environments", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823618/1d5kwZvgfNm", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/09410458", "title": "SurfRiver: Flattening Stream Surfaces for Comparative Visualization", "doi": null, "abstractUrl": "/journal/tg/2021/06/09410458/1sYYubtk9va", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552927", "title": "Feature Curves and Surfaces of 3D Asymmetric Tensor Fields", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552927/1xic6oeRxnO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08440097", "articleId": "17D45WaTknK", "__typename": "AdjacentArticleType" }, "next": { "fno": "08440096", "articleId": "17D45WXIkH8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXnFs1", "name": "ttg201901-08440089s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201901-08440089s1.zip", "extension": "zip", "size": "4.14 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1JInFQ8f8Q0", "title": "Feb.", "year": "2023", "issueNum": "02", "idPrefix": "tp", "pubType": "journal", "volume": "45", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1CbVkWyt0LC", "doi": "10.1109/TPAMI.2022.3163720", "abstract": "We propose a novel framework to learn the spatiotemporal variability in longitudinal 3D shape data sets, which contain observations of objects that evolve and deform over time. This problem is challenging since surfaces come with arbitrary parameterizations and thus, they need to be spatially registered. Also, different deforming objects, hereinafter referred to as <italic>4D surfaces</italic>, evolve at different speeds and thus they need to be temporally aligned. We solve this spatiotemporal registration problem using a Riemannian approach. We treat a 3D surface as a point in a shape space equipped with an elastic Riemannian metric that measures the amount of bending and stretching that the surfaces undergo. A 4D surface can then be seen as a trajectory in this space. With this formulation, the statistical analysis of 4D surfaces can be cast as the problem of analyzing trajectories embedded in a nonlinear Riemannian manifold. However, performing the spatiotemporal registration, and subsequently computing statistics, on such nonlinear spaces is not straightforward as they rely on complex nonlinear optimizations. Our core contribution is the mapping of the surfaces to the space of Square-Root Normal Fields (SRNF) where the <inline-formula><tex-math notation=\"LaTeX\">Z_$\\mathbb {L}^{2}$_Z</tex-math></inline-formula> metric is equivalent to the partial elastic metric in the space of surfaces. Thus, by solving the spatial registration in the SRNF space, the problem of analyzing 4D surfaces becomes the problem of analyzing trajectories embedded in the SRNF space, which has a euclidean structure. In this paper, we develop the building blocks that enable such analysis. These include: <bold>(1)</bold> the spatiotemporal registration of arbitrarily parameterized 4D surfaces even in the presence of large elastic deformations and large variations in their execution rates; <bold>(2)</bold> the computation of geodesics between 4D surfaces; <bold>(3)</bold> the computation of statistical summaries, such as means and modes of variation, of collections of 4D surfaces; and <bold>(4)</bold> the synthesis of random 4D surfaces. We demonstrate the performance of the proposed framework using 4D facial surfaces and 4D human body shapes.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a novel framework to learn the spatiotemporal variability in longitudinal 3D shape data sets, which contain observations of objects that evolve and deform over time. This problem is challenging since surfaces come with arbitrary parameterizations and thus, they need to be spatially registered. Also, different deforming objects, hereinafter referred to as <italic>4D surfaces</italic>, evolve at different speeds and thus they need to be temporally aligned. We solve this spatiotemporal registration problem using a Riemannian approach. We treat a 3D surface as a point in a shape space equipped with an elastic Riemannian metric that measures the amount of bending and stretching that the surfaces undergo. A 4D surface can then be seen as a trajectory in this space. With this formulation, the statistical analysis of 4D surfaces can be cast as the problem of analyzing trajectories embedded in a nonlinear Riemannian manifold. However, performing the spatiotemporal registration, and subsequently computing statistics, on such nonlinear spaces is not straightforward as they rely on complex nonlinear optimizations. Our core contribution is the mapping of the surfaces to the space of Square-Root Normal Fields (SRNF) where the <inline-formula><tex-math notation=\"LaTeX\">$\\mathbb {L}^{2}$</tex-math><alternatives><mml:math><mml:msup><mml:mi mathvariant=\"double-struck\">L</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:math><inline-graphic xlink:href=\"laga-ieq1-3163720.gif\"/></alternatives></inline-formula> metric is equivalent to the partial elastic metric in the space of surfaces. Thus, by solving the spatial registration in the SRNF space, the problem of analyzing 4D surfaces becomes the problem of analyzing trajectories embedded in the SRNF space, which has a euclidean structure. In this paper, we develop the building blocks that enable such analysis. These include: <bold>(1)</bold> the spatiotemporal registration of arbitrarily parameterized 4D surfaces even in the presence of large elastic deformations and large variations in their execution rates; <bold>(2)</bold> the computation of geodesics between 4D surfaces; <bold>(3)</bold> the computation of statistical summaries, such as means and modes of variation, of collections of 4D surfaces; and <bold>(4)</bold> the synthesis of random 4D surfaces. We demonstrate the performance of the proposed framework using 4D facial surfaces and 4D human body shapes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a novel framework to learn the spatiotemporal variability in longitudinal 3D shape data sets, which contain observations of objects that evolve and deform over time. This problem is challenging since surfaces come with arbitrary parameterizations and thus, they need to be spatially registered. Also, different deforming objects, hereinafter referred to as 4D surfaces, evolve at different speeds and thus they need to be temporally aligned. We solve this spatiotemporal registration problem using a Riemannian approach. We treat a 3D surface as a point in a shape space equipped with an elastic Riemannian metric that measures the amount of bending and stretching that the surfaces undergo. A 4D surface can then be seen as a trajectory in this space. With this formulation, the statistical analysis of 4D surfaces can be cast as the problem of analyzing trajectories embedded in a nonlinear Riemannian manifold. However, performing the spatiotemporal registration, and subsequently computing statistics, on such nonlinear spaces is not straightforward as they rely on complex nonlinear optimizations. Our core contribution is the mapping of the surfaces to the space of Square-Root Normal Fields (SRNF) where the - metric is equivalent to the partial elastic metric in the space of surfaces. Thus, by solving the spatial registration in the SRNF space, the problem of analyzing 4D surfaces becomes the problem of analyzing trajectories embedded in the SRNF space, which has a euclidean structure. In this paper, we develop the building blocks that enable such analysis. These include: (1) the spatiotemporal registration of arbitrarily parameterized 4D surfaces even in the presence of large elastic deformations and large variations in their execution rates; (2) the computation of geodesics between 4D surfaces; (3) the computation of statistical summaries, such as means and modes of variation, of collections of 4D surfaces; and (4) the synthesis of random 4D surfaces. We demonstrate the performance of the proposed framework using 4D facial surfaces and 4D human body shapes.", "title": "4D Atlas: Statistical Analysis of the Spatiotemporal Variability in Longitudinal 3D Shape Data", "normalizedTitle": "4D Atlas: Statistical Analysis of the Spatiotemporal Variability in Longitudinal 3D Shape Data", "fno": "09745790", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Image Registration", "Optimisation", "Statistical Analysis", "Stereo Image Processing", "4 D Atlas", "4 D Facial Surfaces", "4 D Human Body Shapes", "Arbitrary Parameterizations", "Bending Stretching", "Complex Nonlinear Optimizations", "Deforming Objects", "Elastic Deformations", "Longitudinal 3 D Shape Data Sets", "Nonlinear Riemannian Manifold", "Nonlinear Spaces", "Riemannian Approach", "Shape Space", "Spatial Registration", "Spatiotemporal Registration Problem", "Spatiotemporal Variability", "Square Root Normal Fields", "SRNF Space", "Statistical Analysis", "Statistical Summaries", "Shape", "Three Dimensional Displays", "Surface Treatment", "Solid Modeling", "Measurement", "Strain", "Spatiotemporal Phenomena", "Dynamic Surfaces", "Elastic Metric", "Square Root Normal Field", "Statistical Summaries", "Shape Synthesis And Generation", "4 D Surface", "Human 4 D", "Face 4 D", "Motion", "Growth" ], "authors": [ { "givenName": "Hamid", "surname": "Laga", "fullName": "Hamid Laga", "affiliation": "Information Technology Discipline, Murdoch University, Murdoch, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Marcel", "surname": "Padilla", "fullName": "Marcel Padilla", "affiliation": "Technische Universität Berlin, Berlin, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Ian H.", "surname": "Jermyn", "fullName": "Ian H. Jermyn", "affiliation": "Durham University, Durham, U.K.", "__typename": "ArticleAuthorType" }, { "givenName": "Sebastian", "surname": "Kurtek", "fullName": "Sebastian Kurtek", "affiliation": "Ohio State University, Columbus, OH, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Mohammed", "surname": "Bennamoun", "fullName": "Mohammed Bennamoun", "affiliation": "University of Western Australia, Perth, WA, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Anuj", "surname": "Srivastava", "fullName": "Anuj Srivastava", "affiliation": "Florida State University, Tallahassee, FL, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2023-02-01 00:00:00", "pubType": "trans", "pages": "1335-1352", "year": "2023", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2004/2128/1/01333991", "title": "3D scanning using spatiotemporal orientation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/01333991/12OmNBC8Au8", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mnrao/1994/6435/0/00346253", "title": "Analyzing gait with spatiotemporal surfaces", "doi": null, "abstractUrl": "/proceedings-article/mnrao/1994/00346253/12OmNBqdrhf", "parentPublication": { "id": "proceedings/mnrao/1994/6435/0", "title": "Proceedings of 1994 IEEE Workshop on Motion of Non-rigid and Articulated Objects", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2014/5666/0/07004398", "title": "Spatiotemporal indexing techniques for efficiently mining spatiotemporal co-occurrence patterns", "doi": null, "abstractUrl": "/proceedings-article/big-data/2014/07004398/12OmNwbLVmL", "parentPublication": { "id": "proceedings/big-data/2014/5666/0", "title": "2014 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460708", "title": "Feature-aligned 4D spatiotemporal image registration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460708/12OmNzUPpFB", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130437", "title": "Event-driven feature analysis in a 4D spatiotemporal representation for ambient assisted living", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130437/12OmNzVXNWz", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1990/2062/1/00118130", "title": "Cyclic motion detection using spatiotemporal surfaces and curves", "doi": null, "abstractUrl": "/proceedings-article/icpr/1990/00118130/12OmNzwZ6kF", "parentPublication": { "id": "proceedings/icpr/1990/2062/1", "title": "Proceedings 10th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/02/09151354", "title": "Spatiotemporal Bundle Adjustment for Dynamic 3D Human Reconstruction in the Wild", "doi": null, "abstractUrl": "/journal/tp/2022/02/09151354/1lPCkW5UbPG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e725", "title": "A Spatiotemporal Volumetric Interpolation Network for 4D Dynamic Medical Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e725/1m3o6jEDHEY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2020/9574/0/957400b065", "title": "BPARC: A novel spatio-temporal (4D) data-driven brain parcellation scheme based on deep residual networks", "doi": null, "abstractUrl": "/proceedings-article/bibe/2020/957400b065/1pBMmIZzgRO", "parentPublication": { "id": "proceedings/bibe/2020/9574/0", "title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a467", "title": "Spatiotemporal Phenomena Summarization through Static Visual Narratives", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a467/1rSRaNwIpFK", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09729603", "articleId": "1Bya4pRZxtK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1JInK9BurXa", "name": "ttp202302-09745790s1-supp2-3163720.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202302-09745790s1-supp2-3163720.pdf", "extension": "pdf", "size": "54.2 MB", "__typename": "WebExtraType" }, { "id": "1JInKUbYzzG", "name": "ttp202302-09745790s1-supp1-3163720.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202302-09745790s1-supp1-3163720.mp4", "extension": "mp4", "size": "50 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNqJHFwA", "title": "April-June", "year": "2018", "issueNum": "02", "idPrefix": "ta", "pubType": "journal", "volume": "9", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxlgy26", "doi": "10.1109/TAFFC.2016.2601101", "abstract": "We examined the effects of the emotional facial expressions of a virtual character (VC) on human frontal electroencephalographic (EEG) asymmetry (putatively indexing approach/withdrawal motivation), facial electromyographic (EMG) activity (emotional expressions), and social decision making (cooperation/defection). In a within-subjects design, the participants played the Iterated Prisoner's Dilemma game with VCs with different dynamic facial expressions (predefined or dependent on the participant's electrodermal and facial EMG activity). In general, VC facial expressions elicited congruent facial muscle activity. However, both frontal EEG asymmetry and facial EMG activity elicited by an angry VC facial expression varied as a function of preceding interactional events (human collaboration/defection). Pre-decision inner emotional-motivational processes and emotional facial expressions were dissociated, suggesting that human goals influence pre-decision frontal asymmetry, whereas display rules may affect (pre-decision) emotional expressions in human-VC interaction. An angry VC facial expression, high pre-decision corrugator EMG activity, and relatively greater left frontal activation predicted the participant's decision to defect. Both post-decision frontal asymmetry and facial EMG activity were related to reciprocal cooperation. The results suggest that the justifiability of VC emotional expressions and the perceived fairness of VC actions influence human emotional responses.", "abstracts": [ { "abstractType": "Regular", "content": "We examined the effects of the emotional facial expressions of a virtual character (VC) on human frontal electroencephalographic (EEG) asymmetry (putatively indexing approach/withdrawal motivation), facial electromyographic (EMG) activity (emotional expressions), and social decision making (cooperation/defection). In a within-subjects design, the participants played the Iterated Prisoner's Dilemma game with VCs with different dynamic facial expressions (predefined or dependent on the participant's electrodermal and facial EMG activity). In general, VC facial expressions elicited congruent facial muscle activity. However, both frontal EEG asymmetry and facial EMG activity elicited by an angry VC facial expression varied as a function of preceding interactional events (human collaboration/defection). Pre-decision inner emotional-motivational processes and emotional facial expressions were dissociated, suggesting that human goals influence pre-decision frontal asymmetry, whereas display rules may affect (pre-decision) emotional expressions in human-VC interaction. An angry VC facial expression, high pre-decision corrugator EMG activity, and relatively greater left frontal activation predicted the participant's decision to defect. Both post-decision frontal asymmetry and facial EMG activity were related to reciprocal cooperation. The results suggest that the justifiability of VC emotional expressions and the perceived fairness of VC actions influence human emotional responses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We examined the effects of the emotional facial expressions of a virtual character (VC) on human frontal electroencephalographic (EEG) asymmetry (putatively indexing approach/withdrawal motivation), facial electromyographic (EMG) activity (emotional expressions), and social decision making (cooperation/defection). In a within-subjects design, the participants played the Iterated Prisoner's Dilemma game with VCs with different dynamic facial expressions (predefined or dependent on the participant's electrodermal and facial EMG activity). In general, VC facial expressions elicited congruent facial muscle activity. However, both frontal EEG asymmetry and facial EMG activity elicited by an angry VC facial expression varied as a function of preceding interactional events (human collaboration/defection). Pre-decision inner emotional-motivational processes and emotional facial expressions were dissociated, suggesting that human goals influence pre-decision frontal asymmetry, whereas display rules may affect (pre-decision) emotional expressions in human-VC interaction. An angry VC facial expression, high pre-decision corrugator EMG activity, and relatively greater left frontal activation predicted the participant's decision to defect. Both post-decision frontal asymmetry and facial EMG activity were related to reciprocal cooperation. The results suggest that the justifiability of VC emotional expressions and the perceived fairness of VC actions influence human emotional responses.", "title": "Virtual Character Facial Expressions Influence Human Brain and Facial EMG Activity in a Decision-Making Game", "normalizedTitle": "Virtual Character Facial Expressions Influence Human Brain and Facial EMG Activity in a Decision-Making Game", "fno": "07547933", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Electromyography", "Games", "Decision Making", "Electroencephalography", "Electronic Mail", "Context", "Facial Muscles", "Virtual Characters", "Emotions", "Neurophysiology", "Decision Making" ], "authors": [ { "givenName": "Niklas", "surname": "Ravaja", "fullName": "Niklas Ravaja", "affiliation": "Helsinki Collegium for Advanced Studies, University of Helsinki, Helsinki, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Gary", "surname": "Bente", "fullName": "Gary Bente", "affiliation": "University of Cologne, Cologne, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Jari", "surname": "Kätsyri", "fullName": "Jari Kätsyri", "affiliation": "School of Science, Aalto University, Helsinki, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Mikko", "surname": "Salminen", "fullName": "Mikko Salminen", "affiliation": "School of Science, Aalto University, Helsinki, Finland", "__typename": "ArticleAuthorType" }, { "givenName": "Tapio", "surname": "Takala", "fullName": "Tapio Takala", "affiliation": "School of Science, Aalto University, Helsinki, Finland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2018-04-01 00:00:00", "pubType": "trans", "pages": "285-298", "year": "2018", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/fg/2008/2153/0/04813317", "title": "Emotional contagion for unseen bodily expressions: Evidence from facial EMG", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813317/12OmNAmmuOV", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813388", "title": "Investigating the production of emotional facial expressions: a combined electroencephalographic (EEG) and electromyographic (EMG) approach", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813388/12OmNAo45B8", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2009/4800/0/05349579", "title": "Pleasure-arousal-dominance driven facial expression simulation", "doi": null, "abstractUrl": "/proceedings-article/acii/2009/05349579/12OmNBLdKEh", "parentPublication": { "id": "proceedings/acii/2009/4800/0", "title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/2/01048355", "title": "Mapping emotional status to facial expressions", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/01048355/12OmNBW0vFt", "parentPublication": { "id": "proceedings/icpr/2002/1695/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273651", "title": "Spontaneous and posed smile recognition based on spatial and temporal patterns of facial EMG", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273651/12OmNBigFqV", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/devlrn/2005/9226/0/01490973", "title": "Emotional elicitation by dynamic facial expressions", "doi": null, "abstractUrl": "/proceedings-article/devlrn/2005/01490973/12OmNBt3qpZ", "parentPublication": { "id": "proceedings/devlrn/2005/9226/0", "title": "International Conference on Development and Learning", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2016/1437/0/1437b452", "title": "Extended DISFA Dataset: Investigating Posed and Spontaneous Facial Expressions", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2016/1437b452/12OmNrYlmDm", "parentPublication": { "id": "proceedings/cvprw/2016/1437/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363189", "title": "Facial Expression Recognition with sEMG Method", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363189/12OmNscOUel", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2014/03/06778017", "title": "Design of a Wearable Device for Reading Positive Expressions from Facial EMG Signals", "doi": null, "abstractUrl": "/journal/ta/2014/03/06778017/13rRUyY2937", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2019/0089/0/08756541", "title": "A Boost in Revealing Subtle Facial Expressions: A Consolidated Eulerian Framework", "doi": null, "abstractUrl": "/proceedings-article/fg/2019/08756541/1bzYwpdafPa", "parentPublication": { "id": "proceedings/fg/2019/0089/0", "title": "2019 14th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2019)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07728015", "articleId": "13rRUxBJhEc", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRV0", "name": "tta201802-07547933s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/tta201802-07547933s1.zip", "extension": "zip", "size": "43.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzICEFu", "title": "July-Sept.", "year": "2014", "issueNum": "03", "idPrefix": "ta", "pubType": "journal", "volume": "5", "label": "July-Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyY2937", "doi": "10.1109/TAFFC.2014.2313557", "abstract": "In this paper we present the design of a wearable device that reads positive facial expressions using physiological signals. We first analyze facial morphology in 3 dimensions and facial electromyographic signals on different facial locations and show that we can detect electromyographic signals with high amplitude on areas of low facial mobility on the side of the face, which are correlated to ones obtained from electrodes on traditional surface electromyographic capturing positions on top of facial muscles on the front of the face. We use a multi-attribute decision-making method to find adequate electrode positions on the side of face to capture these signals. Based on this analysis, we design and implement an ergonomic wearable device with high reliability. Because the signals are recorded distally, the proposed device uses independent component analysis and an artificial neural network to analyze them and achieve a high facial expression recognition rate on the side of the face. The recognized emotional facial expressions through the wearable interface device can be recorded during therapeutic interventions and for long-term facial expression recognition to quantify and infer the user's affective state in order to support medical professionals.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we present the design of a wearable device that reads positive facial expressions using physiological signals. We first analyze facial morphology in 3 dimensions and facial electromyographic signals on different facial locations and show that we can detect electromyographic signals with high amplitude on areas of low facial mobility on the side of the face, which are correlated to ones obtained from electrodes on traditional surface electromyographic capturing positions on top of facial muscles on the front of the face. We use a multi-attribute decision-making method to find adequate electrode positions on the side of face to capture these signals. Based on this analysis, we design and implement an ergonomic wearable device with high reliability. Because the signals are recorded distally, the proposed device uses independent component analysis and an artificial neural network to analyze them and achieve a high facial expression recognition rate on the side of the face. The recognized emotional facial expressions through the wearable interface device can be recorded during therapeutic interventions and for long-term facial expression recognition to quantify and infer the user's affective state in order to support medical professionals.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we present the design of a wearable device that reads positive facial expressions using physiological signals. We first analyze facial morphology in 3 dimensions and facial electromyographic signals on different facial locations and show that we can detect electromyographic signals with high amplitude on areas of low facial mobility on the side of the face, which are correlated to ones obtained from electrodes on traditional surface electromyographic capturing positions on top of facial muscles on the front of the face. We use a multi-attribute decision-making method to find adequate electrode positions on the side of face to capture these signals. Based on this analysis, we design and implement an ergonomic wearable device with high reliability. Because the signals are recorded distally, the proposed device uses independent component analysis and an artificial neural network to analyze them and achieve a high facial expression recognition rate on the side of the face. The recognized emotional facial expressions through the wearable interface device can be recorded during therapeutic interventions and for long-term facial expression recognition to quantify and infer the user's affective state in order to support medical professionals.", "title": "Design of a Wearable Device for Reading Positive Expressions from Facial EMG Signals", "normalizedTitle": "Design of a Wearable Device for Reading Positive Expressions from Facial EMG Signals", "fno": "06778017", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Decision Making", "Electromyography", "Ergonomics", "Face Recognition", "Independent Component Analysis", "Interactive Devices", "Medical Signal Detection", "Physiology", "Wearable Computers", "High Reliability", "Medical Professionals", "User Affective State", "Long Term Facial Expression Recognition", "Therapeutic Interventions", "Wearable Interface Device", "High Facial Expression Recognition Rate", "Artificial Neural Network", "Independent Component Analysis", "Ergonomic Wearable Device", "Electrode Positions", "Multiattribute Decision Making Method", "Facial Muscles", "Surface Electromyographic", "Low Facial Mobility", "Electromyographic Signal Detection", "Facial Locations", "Facial Electromyographic Signals", "Facial Morphology", "Positive Facial Expressions", "Physiological Signals", "Facial EMG Signals", "Wearable Device Design", "Electrodes", "Electromyography", "Face Recognition", "Emotion Recognition", "Facial Muscles", "Muscles", "Electromyography", "Face And Gesture Recognition", "Pattern Recognition", "Wearable Interface" ], "authors": [ { "givenName": "Anna", "surname": "Gruebler", "fullName": "Anna Gruebler", "affiliation": "School of Computer Science and Electronic Engineering, University of Essex, Wivenhoe Park, Colchester, United Kingdom", "__typename": "ArticleAuthorType" }, { "givenName": "Kenji", "surname": "Suzuki", "fullName": "Kenji Suzuki", "affiliation": "Center for Cybernics Research, University of Tsukuba, Tsukuba, Japan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2014-07-01 00:00:00", "pubType": "trans", "pages": "227-237", "year": "2014", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/fg/2008/2153/0/04813317", "title": "Emotional contagion for unseen bodily expressions: Evidence from facial EMG", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813317/12OmNAmmuOV", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acii/2017/0563/0/08273651", "title": "Spontaneous and posed smile recognition based on spatial and temporal patterns of facial EMG", "doi": null, "abstractUrl": "/proceedings-article/acii/2017/08273651/12OmNBigFqV", "parentPublication": { "id": "proceedings/acii/2017/0563/0", "title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363189", "title": "Facial Expression Recognition with sEMG Method", "doi": null, "abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363189/12OmNscOUel", "parentPublication": { "id": "proceedings/cit-iucc-dasc-picom/2015/0154/0", "title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2015/7082/0/07177398", "title": "EMG based rehabilitation systems - approaches for ALS patients in different stages", "doi": null, "abstractUrl": "/proceedings-article/icme/2015/07177398/12OmNvBIRN3", "parentPublication": { "id": "proceedings/icme/2015/7082/0", "title": "2015 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/micai/2012/4731/0/06387218", "title": "EMG Pattern Recognition System Based on Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/micai/2012/06387218/12OmNwoPtrz", "parentPublication": { "id": "proceedings/micai/2012/4731/0", "title": "2012 11th Mexican International Conference on Artificial Intelligence (MICAI 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/est/2012/4791/0/4791a178", "title": "Analysis of Social Smile Sharing Using a Wearable Device that Captures Distal Electromyographic Signals", "doi": null, "abstractUrl": "/proceedings-article/est/2012/4791a178/12OmNxvwoNi", "parentPublication": { "id": "proceedings/est/2012/4791/0", "title": "2012 Third International Conference on Emerging Security Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2014/5179/0/06850687", "title": "Human hand gesture detection based on EMG signal using ANN", "doi": null, "abstractUrl": "/proceedings-article/iciev/2014/06850687/12OmNzvz6G0", "parentPublication": { "id": "proceedings/iciev/2014/5179/0", "title": "2014 International Conference on Informatics, Electronics & Vision (ICIEV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2017/04/08049392", "title": "A Wearable Device for Fast and Subtle Spontaneous Smile Recognition", "doi": null, "abstractUrl": "/journal/ta/2017/04/08049392/13rRUILtJpl", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2016/04/07173007", "title": "The Automatic Detection of Chronic Pain-Related Expression: Requirements, Challenges and the Multimodal EmoPain Dataset", "doi": null, "abstractUrl": "/journal/ta/2016/04/07173007/13rRUx0getK", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/02/07547933", "title": "Virtual Character Facial Expressions Influence Human Brain and Facial EMG Activity in a Decision-Making Game", "doi": null, "abstractUrl": "/journal/ta/2018/02/07547933/13rRUxlgy26", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06784326", "articleId": "13rRUx0xPgz", "__typename": "AdjacentArticleType" }, "next": { "fno": "06851182", "articleId": "13rRUxE04s0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwpGgK8", "title": "Dec.", "year": "2014", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxZ0o1D", "doi": "10.1109/TVCG.2014.2346422", "abstract": "Effectively showing the relationships between objects in a dataset is one of the main tasks in information visualization. Typically there is a well-defined notion of distance between pairs of objects, and traditional approaches such as principal component analysis or multi-dimensional scaling are used to place the objects as points in 2D space, so that similar objects are close to each other. In another typical setting, the dataset is visualized as a network graph, where related nodes are connected by links. More recently, datasets are also visualized as maps, where in addition to nodes and links, there is an explicit representation of groups and clusters. We consider these three Techniques, characterized by a progressive increase of the amount of encoded information: node diagrams, node-link diagrams and node-link-group diagrams. We assess these three types of diagrams with a controlled experiment that covers nine different tasks falling broadly in three categories: node-based tasks, network-based tasks and group-based tasks. Our findings indicate that adding links, or links and group representations, does not negatively impact performance (time and accuracy) of node-based tasks. Similarly, adding group representations does not negatively impact the performance of network-based tasks. Node-link-group diagrams outperform the others on group-based tasks. These conclusions contradict results in other studies, in similar but subtly different settings. Taken together, however, such results can have significant implications for the design of standard and domain snecific visualizations tools.", "abstracts": [ { "abstractType": "Regular", "content": "Effectively showing the relationships between objects in a dataset is one of the main tasks in information visualization. Typically there is a well-defined notion of distance between pairs of objects, and traditional approaches such as principal component analysis or multi-dimensional scaling are used to place the objects as points in 2D space, so that similar objects are close to each other. In another typical setting, the dataset is visualized as a network graph, where related nodes are connected by links. More recently, datasets are also visualized as maps, where in addition to nodes and links, there is an explicit representation of groups and clusters. We consider these three Techniques, characterized by a progressive increase of the amount of encoded information: node diagrams, node-link diagrams and node-link-group diagrams. We assess these three types of diagrams with a controlled experiment that covers nine different tasks falling broadly in three categories: node-based tasks, network-based tasks and group-based tasks. Our findings indicate that adding links, or links and group representations, does not negatively impact performance (time and accuracy) of node-based tasks. Similarly, adding group representations does not negatively impact the performance of network-based tasks. Node-link-group diagrams outperform the others on group-based tasks. These conclusions contradict results in other studies, in similar but subtly different settings. Taken together, however, such results can have significant implications for the design of standard and domain snecific visualizations tools.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Effectively showing the relationships between objects in a dataset is one of the main tasks in information visualization. Typically there is a well-defined notion of distance between pairs of objects, and traditional approaches such as principal component analysis or multi-dimensional scaling are used to place the objects as points in 2D space, so that similar objects are close to each other. In another typical setting, the dataset is visualized as a network graph, where related nodes are connected by links. More recently, datasets are also visualized as maps, where in addition to nodes and links, there is an explicit representation of groups and clusters. We consider these three Techniques, characterized by a progressive increase of the amount of encoded information: node diagrams, node-link diagrams and node-link-group diagrams. We assess these three types of diagrams with a controlled experiment that covers nine different tasks falling broadly in three categories: node-based tasks, network-based tasks and group-based tasks. Our findings indicate that adding links, or links and group representations, does not negatively impact performance (time and accuracy) of node-based tasks. Similarly, adding group representations does not negatively impact the performance of network-based tasks. Node-link-group diagrams outperform the others on group-based tasks. These conclusions contradict results in other studies, in similar but subtly different settings. Taken together, however, such results can have significant implications for the design of standard and domain snecific visualizations tools.", "title": "Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation", "normalizedTitle": "Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation", "fno": "06876036", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Image Color Analysis", "Data Visualization", "Visualization", "Diagrams", "Layout", "Datasets", "Scatter Plots", "Graphs", "Networks", "Maps" ], "authors": [ { "givenName": "Bahador", "surname": "Saket", "fullName": "Bahador Saket", "affiliation": ", University of Arizona", "__typename": "ArticleAuthorType" }, { "givenName": "Paolo", "surname": "Simonetto", "fullName": "Paolo Simonetto", "affiliation": ", University of Arizona", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen", "surname": "Kobourov", "fullName": "Stephen Kobourov", "affiliation": ", University of Arizona", "__typename": "ArticleAuthorType" }, { "givenName": "Katy", "surname": "Borner", "fullName": "Katy Borner", "affiliation": ", Indiana University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2014-12-01 00:00:00", "pubType": "trans", "pages": "2231-2240", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vlhcc/2013/0369/0/06645246", "title": "Just model! — Putting automatic synthesis of node-link-diagrams into practice", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2013/06645246/12OmNCcKQH9", "parentPublication": { "id": "proceedings/vlhcc/2013/0369/0", "title": "2013 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2014/4103/0/4103a053", "title": "Partial Link Drawings for Nodes, Links, and Regions of Interest", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a053/12OmNqAU6pE", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2015/9403/0/9403a034", "title": "Schematization of Node-Link Diagrams and Drawing Techniques for Geo-referenced Networks", "doi": null, "abstractUrl": "/proceedings-article/cw/2015/9403a034/12OmNwEJ0Ua", "parentPublication": { "id": "proceedings/cw/2015/9403/0", "title": "2015 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/01/ttg2011010088", "title": "Inductively Generating Euler Diagrams", "doi": null, "abstractUrl": "/journal/tg/2011/01/ttg2011010088/13rRUNvgziB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08438968", "title": "Node-Link or Adjacency Matrices: Old Question, New Insights", "doi": null, "abstractUrl": "/journal/tg/2019/10/08438968/13rRUwjoNx8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/11/06787045", "title": "How to Display Group Information on Node-Link Diagrams: An Evaluation", "doi": null, "abstractUrl": "/journal/tg/2014/11/06787045/13rRUyY28Yz", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09908291", "title": "Comparative Evaluation of Bipartite, Node-Link, and Matrix-Based Network Representations", "doi": null, "abstractUrl": "/journal/tg/2023/01/09908291/1HbasfaWNX2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09944974", "title": "Color-encoded Links Improve Homophily Perception in Node-Link Diagrams", "doi": null, "abstractUrl": "/journal/tg/5555/01/09944974/1IbMbZhmPE4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/04/08889698", "title": "Expressive Authoring of Node-Link Diagrams With Graphies", "doi": null, "abstractUrl": "/journal/tg/2021/04/08889698/1eBufwF6gne", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09240072", "title": "Exemplar-based Layout Fine-tuning for Node-link Diagrams", "doi": null, "abstractUrl": "/journal/tg/2021/02/09240072/1oeZOPx1j0c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06875968", "articleId": "13rRUy3gn7y", "__typename": "AdjacentArticleType" }, "next": { "fno": "06876010", "articleId": "13rRUxD9gXK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRUw", "name": "ttg201412-06876036s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201412-06876036s1.zip", "extension": "zip", "size": "2.12 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvqEvRo", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1IbMbZhmPE4", "doi": "10.1109/TVCG.2022.3221014", "abstract": "Node-link diagrams enable visual assessment of homophily when viewers can identify and evaluate the relative number of intra-cluster and inter-cluster links. Our online experiment shows that a new design with link type encoded edge color leads to more accurate perception of homophily than a design with same-color edges.", "abstracts": [ { "abstractType": "Regular", "content": "Node-link diagrams enable visual assessment of homophily when viewers can identify and evaluate the relative number of intra-cluster and inter-cluster links. Our online experiment shows that a new design with link type encoded edge color leads to more accurate perception of homophily than a design with same-color edges.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Node-link diagrams enable visual assessment of homophily when viewers can identify and evaluate the relative number of intra-cluster and inter-cluster links. Our online experiment shows that a new design with link type encoded edge color leads to more accurate perception of homophily than a design with same-color edges.", "title": "Color-encoded Links Improve Homophily Perception in Node-Link Diagrams", "normalizedTitle": "Color-encoded Links Improve Homophily Perception in Node-Link Diagrams", "fno": "09944974", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Task Analysis", "Layout", "Image Color Analysis", "Standards", "Psychology", "Color", "Information Visualization", "Node Link Diagrams", "Homophily", "Perception" ], "authors": [ { "givenName": "Daniel", "surname": "Reimann", "fullName": "Daniel Reimann", "affiliation": "Department of Psychology, FernUniversität in Hagen, Hagen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "André", "surname": "Schulz", "fullName": "André Schulz", "affiliation": "Department of Mathematics and Computer Science, FernUniversität in Hagen, Hagen, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Nilam", "surname": "Ram", "fullName": "Nilam Ram", "affiliation": "Departments of Psychology and Communication, Stanford University, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Robert", "surname": "Gaschler", "fullName": "Robert Gaschler", "affiliation": "Department of Psychology, FernUniversität in Hagen, Hagen, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-11-01 00:00:00", "pubType": "trans", "pages": "1-7", "year": "5555", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2014/4103/0/4103a034", "title": "Interactive Similarity Links in Treemap Visualizations", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a034/12OmNAnMuLr", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2014/4103/0/4103a053", "title": "Partial Link Drawings for Nodes, Links, and Regions of Interest", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a053/12OmNqAU6pE", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iai/2004/8387/0/01300947", "title": "Using inverse image frequency for perception-based color image quantization", "doi": null, "abstractUrl": "/proceedings-article/iai/2004/01300947/12OmNyaoDF1", "parentPublication": { "id": "proceedings/iai/2004/8387/0", "title": "2004 Southwest Symposium on Image Analysis and Interpretation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875960", "title": "Reinforcing Visual Grouping Cues to Communicate Complex Informational Structure", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875960/13rRUwInvB8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876036", "title": "Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876036/13rRUxZ0o1D", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440853", "title": "Optimizing Color Assignment for Perception of Class Separability in Multiclass Scatterplots", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440853/17D45VTRoxJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797714", "title": "PILC Projector: RGB-IR Projector for Pixel-level Infrared Light Communication", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797714/1cJ0L8WggAE", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2019/4941/0/08933760", "title": "Evaluating Gradient Perception in Color-Coded Scalar Fields", "doi": null, "abstractUrl": "/proceedings-article/vis/2019/08933760/1fTgHHw1pSM", "parentPublication": { "id": "proceedings/vis/2019/4941/0", "title": "2019 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09523761", "title": "Evaluating Effects of Background Stories on Graph Perception", "doi": null, "abstractUrl": "/journal/tg/2022/12/09523761/1wnLgUKA2fm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccea/2021/2616/0/261600a224", "title": "Thematic Map Color Matching Design Based On Geese Swarm Optimization Algorithm", "doi": null, "abstractUrl": "/proceedings-article/iccea/2021/261600a224/1y4owqkadBC", "parentPublication": { "id": "proceedings/iccea/2021/2616/0", "title": "2021 International Conference on Computer Engineering and Application (ICCEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09944192", "articleId": "1Ia7fREeHHW", "__typename": "AdjacentArticleType" }, "next": { "fno": "09947006", "articleId": "1Idr5neUL5e", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1rvygosnS8M", "title": "April", "year": "2021", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1eBufwF6gne", "doi": "10.1109/TVCG.2019.2950932", "abstract": "Expressive design environments enable visualization designers not only to specify chart types and visual mappings, but also to customize individual graphical marks, as they would in a vector graphics drawing tool. Prior work has mainly investigated how to support the expressive design of a wide range of charts generated from tabular data: bar charts, scatterplots, maps, etc. We focus here on an expressive design environment for node-link diagrams generated from multivariate networks. Such data structures raise specific challenges and opportunities in terms of visual design and interactive authoring. We discuss those specificities and describe the user-centered design process that led to Graphies, a prototype environment for expressive node-link diagram authoring. We then report on a study in which participants successfully reproduced several expressive designs, and created their own designs as well.", "abstracts": [ { "abstractType": "Regular", "content": "Expressive design environments enable visualization designers not only to specify chart types and visual mappings, but also to customize individual graphical marks, as they would in a vector graphics drawing tool. Prior work has mainly investigated how to support the expressive design of a wide range of charts generated from tabular data: bar charts, scatterplots, maps, etc. We focus here on an expressive design environment for node-link diagrams generated from multivariate networks. Such data structures raise specific challenges and opportunities in terms of visual design and interactive authoring. We discuss those specificities and describe the user-centered design process that led to Graphies, a prototype environment for expressive node-link diagram authoring. We then report on a study in which participants successfully reproduced several expressive designs, and created their own designs as well.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Expressive design environments enable visualization designers not only to specify chart types and visual mappings, but also to customize individual graphical marks, as they would in a vector graphics drawing tool. Prior work has mainly investigated how to support the expressive design of a wide range of charts generated from tabular data: bar charts, scatterplots, maps, etc. We focus here on an expressive design environment for node-link diagrams generated from multivariate networks. Such data structures raise specific challenges and opportunities in terms of visual design and interactive authoring. We discuss those specificities and describe the user-centered design process that led to Graphies, a prototype environment for expressive node-link diagram authoring. We then report on a study in which participants successfully reproduced several expressive designs, and created their own designs as well.", "title": "Expressive Authoring of Node-Link Diagrams With Graphies", "normalizedTitle": "Expressive Authoring of Node-Link Diagrams With Graphies", "fno": "08889698", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Structures", "Data Visualisation", "User Centred Design", "Expressive Authoring", "Node Link Diagrams", "Graphies", "Expressive Design Environment", "Visualization Designers", "Chart Types", "Visual Mappings", "Individual Graphical Marks", "Vector Graphics", "Bar Charts", "Visual Design", "Interactive Authoring", "User Centered Design Process", "Prototype Environment", "Expressive Node Link Diagram Authoring", "Visualization", "Tools", "Data Visualization", "Layout", "Data Structures", "User Centered Design", "Expressive Design", "Node Link Diagram", "Multivariate Networks" ], "authors": [ { "givenName": "Hugo", "surname": "Romat", "fullName": "Hugo Romat", "affiliation": "INRIA, Université Paris-Saclay CNRS, Orsay, France", "__typename": "ArticleAuthorType" }, { "givenName": "Caroline", "surname": "Appert", "fullName": "Caroline Appert", "affiliation": "INRIA, Université Paris-Saclay CNRS, Orsay, France", "__typename": "ArticleAuthorType" }, { "givenName": "Emmanuel", "surname": "Pietriga", "fullName": "Emmanuel Pietriga", "affiliation": "INRIA, Université Paris-Saclay CNRS, Orsay, France", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2021-04-01 00:00:00", "pubType": "trans", "pages": "2329-2340", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/infvis/2005/9464/0/01532129", "title": "Elastic hierarchies: combining treemaps and node-link diagrams", "doi": null, "abstractUrl": "/proceedings-article/infvis/2005/01532129/12OmNqzcvFm", "parentPublication": { "id": "proceedings/infvis/2005/9464/0", "title": "IEEE Symposium on Information Visualization (InfoVis 05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2005/2790/0/01532129", "title": "Elastic hierarchies: combining treemaps and node-link diagrams", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2005/01532129/12OmNzYwcax", "parentPublication": { "id": "proceedings/ieee-infovis/2005/2790/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876036", "title": "Node, Node-Link, and Node-Link-Group Diagrams: An Evaluation", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876036/13rRUxZ0o1D", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/beliv/2018/6884/0/08634297", "title": "Reflecting on the Evaluation of Visualization Authoring Systems : Position Paper", "doi": null, "abstractUrl": "/proceedings-article/beliv/2018/08634297/17D45VTRozT", "parentPublication": { "id": "proceedings/beliv/2018/6884/0", "title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/08/08611113", "title": "MARVisT: Authoring Glyph-Based Visualization in Mobile Augmented Reality", "doi": null, "abstractUrl": "/journal/tg/2020/08/08611113/17D45Wuc367", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09903511", "title": "Supporting Expressive and Faithful Pictorial Visualization Design with Visual Style Transfer", "doi": null, "abstractUrl": "/journal/tg/2023/01/09903511/1GZokWw73mo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09912366", "title": "Towards Natural Language-Based Visualization Authoring", "doi": null, "abstractUrl": "/journal/tg/2023/01/09912366/1HeiWkRN3tC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09944974", "title": "Color-encoded Links Improve Homophily Perception in Node-Link Diagrams", "doi": null, "abstractUrl": "/journal/tg/5555/01/09944974/1IbMbZhmPE4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807226", "title": "Critical Reflections on Visualization Authoring Systems", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807226/1cG65OkeVu8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09035622", "title": "LADV: Deep Learning Assisted Authoring of Dashboard Visualizations From Images and Sketches", "doi": null, "abstractUrl": "/journal/tg/2021/09/09035622/1iaeAO11H6o", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08869805", "articleId": "1e9hb0tlqpy", "__typename": "AdjacentArticleType" }, "next": { "fno": "08894500", "articleId": "1eNbh4fCqzu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1qL5hsvvVkc", "title": "Feb.", "year": "2021", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1oeZOPx1j0c", "doi": "10.1109/TVCG.2020.3030393", "abstract": "We design and evaluate a novel layout fine-tuning technique for node-link diagrams that facilitates exemplar-based adjustment of a group of substructures in batching mode. The key idea is to transfer user modifications on a local substructure to other substructures in the entire graph that are topologically similar to the exemplar. We first precompute a canonical representation for each substructure with node embedding techniques and then use it for on-the-fly substructure retrieval. We design and develop a light-weight interactive system to enable intuitive adjustment, modification transfer, and visual graph exploration. We also report some results of quantitative comparisons, three case studies, and a within-participant user study.", "abstracts": [ { "abstractType": "Regular", "content": "We design and evaluate a novel layout fine-tuning technique for node-link diagrams that facilitates exemplar-based adjustment of a group of substructures in batching mode. The key idea is to transfer user modifications on a local substructure to other substructures in the entire graph that are topologically similar to the exemplar. We first precompute a canonical representation for each substructure with node embedding techniques and then use it for on-the-fly substructure retrieval. We design and develop a light-weight interactive system to enable intuitive adjustment, modification transfer, and visual graph exploration. We also report some results of quantitative comparisons, three case studies, and a within-participant user study.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We design and evaluate a novel layout fine-tuning technique for node-link diagrams that facilitates exemplar-based adjustment of a group of substructures in batching mode. The key idea is to transfer user modifications on a local substructure to other substructures in the entire graph that are topologically similar to the exemplar. We first precompute a canonical representation for each substructure with node embedding techniques and then use it for on-the-fly substructure retrieval. We design and develop a light-weight interactive system to enable intuitive adjustment, modification transfer, and visual graph exploration. We also report some results of quantitative comparisons, three case studies, and a within-participant user study.", "title": "Exemplar-based Layout Fine-tuning for Node-link Diagrams", "normalizedTitle": "Exemplar-based Layout Fine-tuning for Node-link Diagrams", "fno": "09240072", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Graph Theory", "Interactive Systems", "Batching Mode", "User Modifications", "Local Substructure", "Node Embedding Techniques", "On The Fly Substructure", "Intuitive Adjustment", "Modification Transfer", "Node Link Diagrams", "Exemplar Based Layout Fine Tuning", "Layout Fine Tuning Technique", "Visual Graph Exploration", "Interactive System", "Layout", "Optimization", "Merging", "Topology", "Two Dimensional Displays", "Software Algorithms", "Measurement", "Node Link Diagram", "Graph Layout", "Graph Visualization", "User Interactions" ], "authors": [ { "givenName": "Jiacheng", "surname": "Pan", "fullName": "Jiacheng Pan", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Chen", "fullName": "Wei Chen", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaodong", "surname": "Zhao", "fullName": "Xiaodong Zhao", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Shuyue", "surname": "Zhou", "fullName": "Shuyue Zhou", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Wei", "surname": "Zeng", "fullName": "Wei Zeng", "affiliation": "Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences, China", "__typename": "ArticleAuthorType" }, { "givenName": "Minfeng", "surname": "Zhu", "fullName": "Minfeng Zhu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jian", "surname": "Chen", "fullName": "Jian Chen", "affiliation": "Ohio State University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Siwei", "surname": "Fu", "fullName": "Siwei Fu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yingcai", "surname": "Wu", "fullName": "Yingcai Wu", "affiliation": "State Key Lab of CAD&CG, Zhejiang University, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2021-02-01 00:00:00", "pubType": "trans", "pages": "1655-1665", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iv/2014/4103/0/4103a001", "title": "Using Visual Cues on DOITree for Visualizing Large Hierarchical Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a001/12OmNBJNL1S", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispdc/2016/4152/0/07904327", "title": "Key Characteristic Variable Based Learning Model for Exemplar Learning", "doi": null, "abstractUrl": "/proceedings-article/ispdc/2016/07904327/12OmNvrdI08", "parentPublication": { "id": "proceedings/ispdc/2016/4152/0", "title": "2016 15th International Symposium on Parallel and Distributed Computing (ISPDC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2017/0831/0/0831a224", "title": "Node Overlap Removal for 1D Graph Layout", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a224/12OmNxuXczJ", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034b173", "title": "Fast CNN-Based Document Layout Analysis", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034b173/12OmNyGtjiv", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2016/8942/0/8942a094", "title": "On Edge Bundling and Node Layout for Mutually Connected Directed Graphs", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a094/12OmNzwZ6qg", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2018/3365/0/08446523", "title": "COP: A New Continuous Packing Layout for 360 VR Videos", "doi": null, "abstractUrl": "/proceedings-article/vr/2018/08446523/13bd1fKQxs3", "parentPublication": { "id": "proceedings/vr/2018/3365/0", "title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000c051", "title": "LayoutNet: Reconstructing the 3D Room Layout from a Single RGB Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000c051/17D45W9KVIS", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d753", "title": "End-to-End Optimization of Scene Layout", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d753/1m3ooUhHlVC", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2020/9325/0/09232526", "title": "LayART: Generating indoor layout using ARCore Transformations", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2020/09232526/1o56y3bZgwo", "parentPublication": { "id": "proceedings/bigmm/2020/9325/0", "title": "2020 IEEE Sixth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2021/2688/0/268800a042", "title": "Exemplar Fine-Tuning for 3D Human Model Fitting Towards In-the-Wild 3D Human Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2021/268800a042/1zWEdaIowuY", "parentPublication": { "id": "proceedings/3dv/2021/2688/0", "title": "2021 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09226461", "articleId": "1nYrgS8Y9Py", "__typename": "AdjacentArticleType" }, "next": { "fno": "09282195", "articleId": "1phNGqqKjYY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1qMJQOwBp28", "name": "ttg202102-09240072s1-tvcg-3030393-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09240072s1-tvcg-3030393-mm.zip", "extension": "zip", "size": "21.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzFdtc6", "title": "November/December", "year": "2010", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "16", "label": "November/December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxcsYLL", "doi": "10.1109/TVCG.2010.182", "abstract": "A (3D) scalar grid is a regular Z_$n_1 x n_2 x n_3$_Z grid of vertices where each vertex v is associated with some scalar value Z_$s_v$_Z. ;Applying trilinear interpolation, the scalar grid determines a scalar function g where Z_$g(v) = s_v$_Z for each grid vertex v. An isosurface with ;isovalue s is a triangular mesh which approximates the level set Z_$g^{-1}(α)$_Z. The fractal dimension of an isosurface represents the growth ;in the isosurface as the number of grid cubes increases. We dene and discuss the fractal isosurface dimension. Plotting the fractal ;dimension as a function of the isovalues in a data set provides information about the isosurfaces determined by the data set. We present statistics on the average fractal dimension of 60 publicly available benchmark data sets. We also show the fractal dimension is highly correlated with topological noise in the benchmark data sets, measuring the topological noise by the number of connected components in the isosurface. Lastly, we present a formula predicting the fractal dimension as a function of noise and validate the formula with experimental results.", "abstracts": [ { "abstractType": "Regular", "content": "A (3D) scalar grid is a regular $n_1 x n_2 x n_3$ grid of vertices where each vertex v is associated with some scalar value $s_v$. ;Applying trilinear interpolation, the scalar grid determines a scalar function g where $g(v) = s_v$ for each grid vertex v. An isosurface with ;isovalue s is a triangular mesh which approximates the level set $g^{-1}(α)$. The fractal dimension of an isosurface represents the growth ;in the isosurface as the number of grid cubes increases. We dene and discuss the fractal isosurface dimension. Plotting the fractal ;dimension as a function of the isovalues in a data set provides information about the isosurfaces determined by the data set. We present statistics on the average fractal dimension of 60 publicly available benchmark data sets. We also show the fractal dimension is highly correlated with topological noise in the benchmark data sets, measuring the topological noise by the number of connected components in the isosurface. Lastly, we present a formula predicting the fractal dimension as a function of noise and validate the formula with experimental results.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A (3D) scalar grid is a regular - grid of vertices where each vertex v is associated with some scalar value -. ;Applying trilinear interpolation, the scalar grid determines a scalar function g where - for each grid vertex v. An isosurface with ;isovalue s is a triangular mesh which approximates the level set -. The fractal dimension of an isosurface represents the growth ;in the isosurface as the number of grid cubes increases. We dene and discuss the fractal isosurface dimension. Plotting the fractal ;dimension as a function of the isovalues in a data set provides information about the isosurfaces determined by the data set. We present statistics on the average fractal dimension of 60 publicly available benchmark data sets. We also show the fractal dimension is highly correlated with topological noise in the benchmark data sets, measuring the topological noise by the number of connected components in the isosurface. Lastly, we present a formula predicting the fractal dimension as a function of noise and validate the formula with experimental results.", "title": "On the Fractal Dimension of Isosurfaces", "normalizedTitle": "On the Fractal Dimension of Isosurfaces", "fno": "ttg2010061198", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Isosurfaces", "Scalar Data", "Fractal Dimension" ], "authors": [ { "givenName": "Marc", "surname": "Khoury", "fullName": "Marc Khoury", "affiliation": "Computer and Information Science Department at The Ohio State University", "__typename": "ArticleAuthorType" }, { "givenName": "Rephael", "surname": "Wenger", "fullName": "Rephael Wenger", "affiliation": "Computer and Information Science Department at The Ohio State University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2010-11-01 00:00:00", "pubType": "trans", "pages": "1198-1205", "year": "2010", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isise/2010/4360/0/4360a175", "title": "Correction Calculation Model of Curve's Interval Fractal Dimension", "doi": null, "abstractUrl": "/proceedings-article/isise/2010/4360a175/12OmNAq3hPj", "parentPublication": { "id": "proceedings/isise/2010/4360/0", "title": "2010 Third International Symposium on Information Science and Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifita/2010/4115/1/4115a019", "title": "A Method of Calculating Image Fractal Dimension Based on Fractal Brownian Model", "doi": null, "abstractUrl": "/proceedings-article/ifita/2010/4115a019/12OmNArthc8", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2008/3304/4/3304d640", "title": "Image Edge Detection Based on Improved Local Fractal Dimension", "doi": null, "abstractUrl": "/proceedings-article/icnc/2008/3304d640/12OmNBf94UJ", "parentPublication": { "id": "proceedings/icnc/2008/3304/4", "title": "2008 Fourth International Conference on Natural Computation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bci/2009/3783/0/3783a093", "title": "Faster Estimation of the Correlation Fractal Dimension Using Box-counting", "doi": null, "abstractUrl": "/proceedings-article/bci/2009/3783a093/12OmNqBKU6z", "parentPublication": { "id": "proceedings/bci/2009/3783/0", "title": "Informatics, Balkan Conference in", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isise/2008/3494/1/3494a066", "title": "A Grid and Fractal Dimension-Based Data Stream Clustering Algorithm", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494a066/12OmNrJRPcq", "parentPublication": { "id": "proceedings/isise/2008/3494/1", "title": "2008 International Symposium on Information Science and Engieering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icesssymposia/2008/3288/0/3288a475", "title": "Study on Recognition Characteristics of Acoustic Emission Based on Fractal Dimension", "doi": null, "abstractUrl": "/proceedings-article/icesssymposia/2008/3288a475/12OmNvqW6T9", "parentPublication": { "id": "proceedings/icesssymposia/2008/3288/0", "title": "Embedded Software and Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci/2006/0475/1/04216412", "title": "A Relative Fractal Dimension Spectrum as a Complexity Measure", "doi": null, "abstractUrl": "/proceedings-article/icci/2006/04216412/12OmNx8wTnz", "parentPublication": { "id": "proceedings/icci/2006/0475/1", "title": "Cognitive Informatics, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci/2006/0475/2/04216530", "title": "Traffic Image Classification Method Based on Fractal Dimension", "doi": null, "abstractUrl": "/proceedings-article/icci/2006/04216530/12OmNxZBSzG", "parentPublication": { "id": "proceedings/icci/2006/0475/2", "title": "Cognitive Informatics, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bife/2010/4116/0/4116a328", "title": "An Improved K-Line Fractal Dimension and Its Application on Stock Time Series Segmentation", "doi": null, "abstractUrl": "/proceedings-article/bife/2010/4116a328/12OmNylKAZ4", "parentPublication": { "id": "proceedings/bife/2010/4116/0", "title": "2010 Third International Conference on Business Intelligence and Financial Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/1995/01/i0072", "title": "Texture Segmentation Using Fractal Dimension", "doi": null, "abstractUrl": "/journal/tp/1995/01/i0072/13rRUB6Sq1i", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2010061190", "articleId": "13rRUwvBy8R", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2010061206", "articleId": "13rRUxYrbUA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXnFwf", "name": "ttg2010061198s1.xlsx", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2010061198s1.xlsx", "extension": "xlsx", "size": "65.5 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyKJisD", "title": "Oct.-Dec.", "year": "2015", "issueNum": "04", "idPrefix": "th", "pubType": "journal", "volume": "8", "label": "Oct.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUypp57K", "doi": "10.1109/TOH.2015.2466239", "abstract": "Haptic interfaces offer an intuitive way to interact with and manipulate 3D datasets, and may simplify the interpretation of visual information. This work proposes an algorithm to provide haptic feedback directly from volumetric datasets, as an aid to regular visualization. The haptic rendering algorithm lets users perceive isosurfaces in volumetric datasets, and it relies on several design features that ensure a robust and efficient rendering. A marching tetrahedra approach enables the dynamic extraction of a piecewise linear continuous isosurface. Robustness is achieved using a continuous collision detection step coupled with state-of-the-art proxy-based rendering methods over the extracted isosurface. The introduced marching tetrahedra approach guarantees that the extracted isosurface will match the topology of an equivalent isosurface computed using trilinear interpolation. The proposed haptic rendering algorithm improves the consistency between haptic and visual cues computing a second proxy on the isosurface displayed on screen. Our experiments demonstrate the improvements on the isosurface extraction stage as well as the robustness and the efficiency of the complete algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "Haptic interfaces offer an intuitive way to interact with and manipulate 3D datasets, and may simplify the interpretation of visual information. This work proposes an algorithm to provide haptic feedback directly from volumetric datasets, as an aid to regular visualization. The haptic rendering algorithm lets users perceive isosurfaces in volumetric datasets, and it relies on several design features that ensure a robust and efficient rendering. A marching tetrahedra approach enables the dynamic extraction of a piecewise linear continuous isosurface. Robustness is achieved using a continuous collision detection step coupled with state-of-the-art proxy-based rendering methods over the extracted isosurface. The introduced marching tetrahedra approach guarantees that the extracted isosurface will match the topology of an equivalent isosurface computed using trilinear interpolation. The proposed haptic rendering algorithm improves the consistency between haptic and visual cues computing a second proxy on the isosurface displayed on screen. Our experiments demonstrate the improvements on the isosurface extraction stage as well as the robustness and the efficiency of the complete algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Haptic interfaces offer an intuitive way to interact with and manipulate 3D datasets, and may simplify the interpretation of visual information. This work proposes an algorithm to provide haptic feedback directly from volumetric datasets, as an aid to regular visualization. The haptic rendering algorithm lets users perceive isosurfaces in volumetric datasets, and it relies on several design features that ensure a robust and efficient rendering. A marching tetrahedra approach enables the dynamic extraction of a piecewise linear continuous isosurface. Robustness is achieved using a continuous collision detection step coupled with state-of-the-art proxy-based rendering methods over the extracted isosurface. The introduced marching tetrahedra approach guarantees that the extracted isosurface will match the topology of an equivalent isosurface computed using trilinear interpolation. The proposed haptic rendering algorithm improves the consistency between haptic and visual cues computing a second proxy on the isosurface displayed on screen. Our experiments demonstrate the improvements on the isosurface extraction stage as well as the robustness and the efficiency of the complete algorithm.", "title": "Volume Haptics with Topology-Consistent Isosurfaces", "normalizedTitle": "Volume Haptics with Topology-Consistent Isosurfaces", "fno": "07182778", "hasPdf": true, "idPrefix": "th", "keywords": [ "Isosurfaces", "Haptic Interfaces", "Rendering Computer Graphics", "Visualization", "Volume Measurement", "Proxy Based Haptic Rendering", "Volume Haptics", "Marching Tetrahedra", "Isosurface" ], "authors": [ { "givenName": "Loc", "surname": "Corenthy", "fullName": "Loc Corenthy", "affiliation": ", Universidad Politécnica de Madrid, Madrid, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Miguel A.", "surname": "Otaduy", "fullName": "Miguel A. Otaduy", "affiliation": ", Universidad Rey Juan Carlos, Madrid, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Luis", "surname": "Pastor", "fullName": "Luis Pastor", "affiliation": ", Universidad Rey Juan Carlos, Madrid, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "Marcos", "surname": "Garcia", "fullName": "Marcos Garcia", "affiliation": ", Universidad Rey Juan Carlos, Madrid, Spain", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2015-10-01 00:00:00", "pubType": "trans", "pages": "480-491", "year": "2015", "issn": "1939-1412", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2000/6478/0/64780016", "title": "Hardware-Accelerated Volume and Isosurface Rendering Based on Cell-Projection", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780016/12OmNBuL1lz", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2001/7200/0/7200pekar", "title": "Fast Detection of Meaningful Isosurfaces for Volume Data Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2001/7200pekar/12OmNCbU3bR", "parentPublication": { "id": "proceedings/ieee-vis/2001/7200/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/2004/8781/0/87810009", "title": "A Fast High Accuracy Volume Renderer for Unstructured Data", "doi": null, "abstractUrl": "/proceedings-article/vv/2004/87810009/12OmNs0kyFD", "parentPublication": { "id": "proceedings/vv/2004/8781/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880497", "title": "Simplifying Flexible Isosurfaces Using Local Geometric Measures", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880497/12OmNxUMHnw", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660038", "title": "Scale-Invariant Volume Rendering", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880449", "title": "Volume Refinement Fairing Isosurfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880449/12OmNyxFKc4", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2015/04/07124506", "title": "Direct Visuo-Haptic 4D Volume Rendering Using Respiratory Motion Models", "doi": null, "abstractUrl": "/journal/th/2015/04/07124506/13rRUwInvfi", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1998/01/v0037", "title": "A High Accuracy Volume Renderer for Unstructured Data", "doi": null, "abstractUrl": "/journal/tg/1998/01/v0037/13rRUwwaKsU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2000/04/v0335", "title": "An Order of Magnitude Faster Isosurface Rendering in Software on a PC than Using Dedicated, General Purpose Rendering Hardware", "doi": null, "abstractUrl": "/journal/tg/2000/04/v0335/13rRUxBJhFk", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/02/04069241", "title": "Topology-Controlled Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2007/02/04069241/13rRUytF41s", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07113879", "articleId": "13rRUyuvRoW", "__typename": "AdjacentArticleType" }, "next": { "fno": "07258388", "articleId": "13rRUwgyOju", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwCsdFw", "title": "PrePrints", "year": "5555", "issueNum": "01", "idPrefix": "tk", "pubType": "journal", "volume": null, "label": "PrePrints", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1yeC6nu6NsA", "doi": "10.1109/TKDE.2021.3124061", "abstract": "Detecting anomalies for dynamic graphs has drawn increasing attention due to their wide applications in social networks, e-commerce, and cybersecurity. Recent deep learning-based approaches have shown promising results over shallow methods. However, they fail to address two core challenges of anomaly detection in dynamic graphs: the lack of informative encoding for unattributed nodes and the difficulty of learning discriminate knowledge from coupled spatial-temporal dynamic graphs. To overcome these challenges, in this paper, we present a novel transformer-based Anomaly Detection framework for dynamic graphs (TADDY). Our framework constructs a comprehensive node encoding strategy to better represent each nodes structural and temporal roles in an evolving graphs stream. Meanwhile, TADDY captures informative representation from dynamic graphs with coupled spatial-temporal patterns via a dynamic graph transformer model. The extensive experimental results demonstrate that our proposed TADDY framework outperforms the state-of-the-art methods by a large margin on six real-world datasets.", "abstracts": [ { "abstractType": "Regular", "content": "Detecting anomalies for dynamic graphs has drawn increasing attention due to their wide applications in social networks, e-commerce, and cybersecurity. Recent deep learning-based approaches have shown promising results over shallow methods. However, they fail to address two core challenges of anomaly detection in dynamic graphs: the lack of informative encoding for unattributed nodes and the difficulty of learning discriminate knowledge from coupled spatial-temporal dynamic graphs. To overcome these challenges, in this paper, we present a novel transformer-based Anomaly Detection framework for dynamic graphs (TADDY). Our framework constructs a comprehensive node encoding strategy to better represent each nodes structural and temporal roles in an evolving graphs stream. Meanwhile, TADDY captures informative representation from dynamic graphs with coupled spatial-temporal patterns via a dynamic graph transformer model. The extensive experimental results demonstrate that our proposed TADDY framework outperforms the state-of-the-art methods by a large margin on six real-world datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Detecting anomalies for dynamic graphs has drawn increasing attention due to their wide applications in social networks, e-commerce, and cybersecurity. Recent deep learning-based approaches have shown promising results over shallow methods. However, they fail to address two core challenges of anomaly detection in dynamic graphs: the lack of informative encoding for unattributed nodes and the difficulty of learning discriminate knowledge from coupled spatial-temporal dynamic graphs. To overcome these challenges, in this paper, we present a novel transformer-based Anomaly Detection framework for dynamic graphs (TADDY). Our framework constructs a comprehensive node encoding strategy to better represent each nodes structural and temporal roles in an evolving graphs stream. Meanwhile, TADDY captures informative representation from dynamic graphs with coupled spatial-temporal patterns via a dynamic graph transformer model. The extensive experimental results demonstrate that our proposed TADDY framework outperforms the state-of-the-art methods by a large margin on six real-world datasets.", "title": "Anomaly Detection in Dynamic Graphs via Transformer", "normalizedTitle": "Anomaly Detection in Dynamic Graphs via Transformer", "fno": "09599560", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Transformers", "Image Edge Detection", "Encoding", "Feature Extraction", "Anomaly Detection", "Task Analysis", "Solid Modeling", "Anomaly Detection", "Dynamic Graphs", "Transformer" ], "authors": [ { "givenName": "Yixin", "surname": "Liu", "fullName": "Yixin Liu", "affiliation": "Department of Data Science and AI, Monash University, 2541 Clayton, Victoria, Australia, (e-mail: yixin.liu@monash.edu)", "__typename": "ArticleAuthorType" }, { "givenName": "Shirui", "surname": "Pan", "fullName": "Shirui Pan", "affiliation": "Faculty of Information Technology, Monash University, 2541 Clayton, Victoria, Australia, 3800 (e-mail: shirui.pan@monash.edu)", "__typename": "ArticleAuthorType" }, { "givenName": "Yu Guang", "surname": "Wang", "fullName": "Yu Guang Wang", "affiliation": "School of Mathematics and Statistics, UNSW, 7800 Sydney, New South Wales, Australia, (e-mail: yuguang.wang@unsw.edu.au)", "__typename": "ArticleAuthorType" }, { "givenName": "Fei", "surname": "Xiong", "fullName": "Fei Xiong", "affiliation": "School of Electronic and Information Engineering, Beijing Jiaotong University, 47829 Beijing, Beijing, China, 100044 (e-mail: xiongf@bjtu.edu.cn)", "__typename": "ArticleAuthorType" }, { "givenName": "Liang", "surname": "Wang", "fullName": "Liang Wang", "affiliation": "School of Computer Science, Northwestern Polytechnical University, Xi'an, Shaanxi, China, (e-mail: liangwang@nwpu.edu.cn)", "__typename": "ArticleAuthorType" }, { "givenName": "Qingfeng", "surname": "Chen", "fullName": "Qingfeng Chen", "affiliation": "School of Computer, Electronic and Information, Guangxi University, 12664 Nanning, Guangxi, China, 530005 (e-mail: qingfeng@gxu.edu.cn)", "__typename": "ArticleAuthorType" }, { "givenName": "Vincent CS", "surname": "Lee", "fullName": "Vincent CS Lee", "affiliation": "Clayton School of IT, Monash University, Melbourne, Victoria, Australia, VIC 3800 (e-mail: vincent.cs.lee@monash.edu)", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2021-11-01 00:00:00", "pubType": "trans", "pages": "1-1", "year": "5555", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0/08276899", "title": "Anomaly Detection in Evolving Heterogeneous Graphs", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2017/08276899/17D45VUZMU8", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0", "title": "2017 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200q6352", "title": "Spatial-Temporal Transformer for Dynamic Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200q6352/1BmEvNHaTO8", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3557", "title": "VidTr: Video Transformer Without Convolutions", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3557/1BmFrycdf5S", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/04/09815549", "title": "Adaptive Multi-View and Temporal Fusing Transformer for 3D Human Pose Estimation", "doi": null, "abstractUrl": "/journal/tp/2023/04/09815549/1ELg9lk0AeI", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aemcse/2022/8474/0/847400a711", "title": "VadTR: Video Anomaly Detection with Transformer", "doi": null, "abstractUrl": "/proceedings-article/aemcse/2022/847400a711/1IlNThd2dos", "parentPublication": { "id": "proceedings/aemcse/2022/8474/0", "title": "2022 5th International Conference on Advanced Electronic Materials, Computers and Software Engineering (AEMCSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10021063", "title": "Decomposed Transformer with Frequency Attention for Multivariate Time Series Anomaly Detection", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10021063/1KfRWG876bS", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e859", "title": "TransMOT: Spatial-Temporal Graph Transformer for Multiple Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e859/1KxUGrr4jRK", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f119", "title": "Exploiting Long-Term Dependencies for Generating Dynamic Scene Graphs", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f119/1KxVGWLEPAc", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2020/6251/0/09377893", "title": "Anomaly Detection in Large Graphs", "doi": null, "abstractUrl": "/proceedings-article/big-data/2020/09377893/1s64Ti4v00U", "parentPublication": { "id": "proceedings/big-data/2020/6251/0", "title": "2020 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smds/2021/0058/0/005800a184", "title": "DynGraphTrans: Dynamic Graph Embedding via Modified Universal Transformer Networks for Financial Transaction Data", "doi": null, "abstractUrl": "/proceedings-article/smds/2021/005800a184/1yeQwSQeMlq", "parentPublication": { "id": "proceedings/smds/2021/0058/0", "title": "2021 IEEE International Conference on Smart Data Services (SMDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09573443", "articleId": "1xH5BipuJ0I", "__typename": "AdjacentArticleType" }, "next": { "fno": "09611023", "articleId": "1ypYayRhnvq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrFBPWq", "title": "September-October", "year": "2006", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "12", "label": "September-October", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwvBy8O", "doi": "10.1109/TVCG.2006.154", "abstract": "Computational simulations frequently generate solutions defined over very large tetrahedral volume meshes containing many millions of elements. Furthermore, such solutions may often be expressed using non-linear basis functions. Certain solution techniques, such as discontinuous Galerkin methods, may even produce non-conforming meshes. Such data is difficult to visualize interactively, as it is far too large to fit in memory and many common data reduction techniques, such as mesh simplification, cannot be applied to non-conforming meshes. We introduce a point-based visualization system for interactive rendering of large, potentially non-conforming, tetrahedral meshes. We propose methods for adaptively sampling points from non-linear solution data and for decimating points at run time to fit GPU memory limits. Because these are streaming processes, memory consumption is independent of the input size. We also present an order-independent point rendering method that can efficiently render volumes on the order of 20 million tetrahedra at interactive rates.", "abstracts": [ { "abstractType": "Regular", "content": "Computational simulations frequently generate solutions defined over very large tetrahedral volume meshes containing many millions of elements. Furthermore, such solutions may often be expressed using non-linear basis functions. Certain solution techniques, such as discontinuous Galerkin methods, may even produce non-conforming meshes. Such data is difficult to visualize interactively, as it is far too large to fit in memory and many common data reduction techniques, such as mesh simplification, cannot be applied to non-conforming meshes. We introduce a point-based visualization system for interactive rendering of large, potentially non-conforming, tetrahedral meshes. We propose methods for adaptively sampling points from non-linear solution data and for decimating points at run time to fit GPU memory limits. Because these are streaming processes, memory consumption is independent of the input size. We also present an order-independent point rendering method that can efficiently render volumes on the order of 20 million tetrahedra at interactive rates.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Computational simulations frequently generate solutions defined over very large tetrahedral volume meshes containing many millions of elements. Furthermore, such solutions may often be expressed using non-linear basis functions. Certain solution techniques, such as discontinuous Galerkin methods, may even produce non-conforming meshes. Such data is difficult to visualize interactively, as it is far too large to fit in memory and many common data reduction techniques, such as mesh simplification, cannot be applied to non-conforming meshes. We introduce a point-based visualization system for interactive rendering of large, potentially non-conforming, tetrahedral meshes. We propose methods for adaptively sampling points from non-linear solution data and for decimating points at run time to fit GPU memory limits. Because these are streaming processes, memory consumption is independent of the input size. We also present an order-independent point rendering method that can efficiently render volumes on the order of 20 million tetrahedra at interactive rates.", "title": "Interactive Point-Based Rendering of Higher-Order Tetrahedral Data", "normalizedTitle": "Interactive Point-Based Rendering of Higher-Order Tetrahedral Data", "fno": "v1229", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Finite Element Methods", "Moment Methods", "Sampling Methods", "Sorting", "Electric Shock", "Computer Graphics", "Piecewise Linear Techniques", "Cost Function", "Performance Loss", "Point Based Visualization", "Interactive Large Higher Order Tetrahedral Volume Visualization" ], "authors": [ { "givenName": "Yuan", "surname": "Zhou", "fullName": "Yuan Zhou", "affiliation": "Dept. of Comput. Sci., Illinois Univ., Urbana, IL", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Garland", "fullName": "Michael Garland", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2006-09-01 00:00:00", "pubType": "trans", "pages": "1229-1236", "year": "2006", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/01532809", "title": "Rendering tetrahedral meshes with higher-order attenuation functions for digital radiograph reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532809/12OmNAlNiKF", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660039", "title": "Rendering Tetrahedral Meshes with Higher-Order Attenuation Functions for Digital Radiograph Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660039/12OmNAoUTua", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1999/5897/0/00809868", "title": "Tetrahedral mesh compression with the cut-border machine", "doi": null, "abstractUrl": "/proceedings-article/visual/1999/00809868/12OmNBr4ev1", "parentPublication": { "id": "proceedings/visual/1999/5897/0", "title": "Proceedings Visualization '99", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/1999/5897/0/58970009", "title": "Tetrahedral Mesh Compression with the Cut-Border Machine", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/1999/58970009/12OmNxeM46m", "parentPublication": { "id": "proceedings/ieee-vis/1999/5897/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vv/2004/8781/0/87810071", "title": "Texture-Encoded Tetrahedral Strips", "doi": null, "abstractUrl": "/proceedings-article/vv/2004/87810071/12OmNxwnctV", "parentPublication": { "id": "proceedings/vv/2004/8781/0", "title": "Volume Visualization and Graphics, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vg/2005/26/0/01500537", "title": "Simplification of unstructured tetrahedral meshes by point sampling", "doi": null, "abstractUrl": "/proceedings-article/vg/2005/01500537/12OmNyywxC8", "parentPublication": { "id": "proceedings/vg/2005/26/0", "title": "Volume Graphics 2005", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2002/1489/0/14890193", "title": "Haptic Rendering of Data on Unstructured Tetrahedral Grids", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890193/12OmNz5s0PQ", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880433", "title": "TetSplat Real-Time Rendering and Volume Clipping of Large Unstructured Tetrahedral Meshes", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880433/12OmNzRHOOj", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1345", "title": "A Generic and Scalable Pipeline for GPU Tetrahedral Grid Rendering", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1345/13rRUwgQpDj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v1221", "articleId": "13rRUxcsYLE", "__typename": "AdjacentArticleType" }, "next": { "fno": "v1237", "articleId": "13rRUwbs2gk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNB9bvma", "title": "July-September", "year": "1999", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "5", "label": "July-September", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxly95r", "doi": "10.1109/2945.795214", "abstract": "Abstract—We present a method for the construction of multiple levels of tetrahedral meshes approximating a trivariate scalar-valued function at different levels of detail. Starting with an initial, high-resolution triangulation of a three-dimensional region, we construct coarser representation levels by collapsing edges of the mesh. Each triangulation defines a linear spline function, where the function values associated with the vertices are the spline coefficients. Error bounds are stored for individual tetrahedra and are updated as the mesh is simplified. Two algorithms are presented that simplify the mesh within prescribed error bounds. Each algorithm treats simplification on the mesh boundary. The result is a hierarchical data description suited for efficient visualization of large data sets at varying levels of detail.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We present a method for the construction of multiple levels of tetrahedral meshes approximating a trivariate scalar-valued function at different levels of detail. Starting with an initial, high-resolution triangulation of a three-dimensional region, we construct coarser representation levels by collapsing edges of the mesh. Each triangulation defines a linear spline function, where the function values associated with the vertices are the spline coefficients. Error bounds are stored for individual tetrahedra and are updated as the mesh is simplified. Two algorithms are presented that simplify the mesh within prescribed error bounds. Each algorithm treats simplification on the mesh boundary. The result is a hierarchical data description suited for efficient visualization of large data sets at varying levels of detail.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We present a method for the construction of multiple levels of tetrahedral meshes approximating a trivariate scalar-valued function at different levels of detail. Starting with an initial, high-resolution triangulation of a three-dimensional region, we construct coarser representation levels by collapsing edges of the mesh. Each triangulation defines a linear spline function, where the function values associated with the vertices are the spline coefficients. Error bounds are stored for individual tetrahedra and are updated as the mesh is simplified. Two algorithms are presented that simplify the mesh within prescribed error bounds. Each algorithm treats simplification on the mesh boundary. The result is a hierarchical data description suited for efficient visualization of large data sets at varying levels of detail.", "title": "Simplification of Tetrahedral Meshes with Error Bounds", "normalizedTitle": "Simplification of Tetrahedral Meshes with Error Bounds", "fno": "v0224", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Mesh Simplification", "Hierarchical Representation", "Multiresolution Method", "Scattered Data", "Spline", "Tetrahedral Mesh", "Visualization" ], "authors": [ { "givenName": "Issac J.", "surname": "Trotts", "fullName": "Issac J. Trotts", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Bernd", "surname": "Hamann", "fullName": "Bernd Hamann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Kenneth I.", "surname": "Joy", "fullName": "Kenneth I. Joy", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "03", "pubDate": "1999-07-01 00:00:00", "pubType": "trans", "pages": "224-237", "year": "1999", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0210", "articleId": "13rRUwgyOj7", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0238", "articleId": "13rRUxOdD85", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBVrjrd", "title": "Jan.-March", "year": "2018", "issueNum": "01", "idPrefix": "tn", "pubType": "journal", "volume": "5", "label": "Jan.-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwfZC14", "doi": "10.1109/TNSE.2018.2801198", "abstract": null, "abstracts": [], "normalizedAbstract": null, "title": "Editorial: Message from the Editor-in-Chief", "normalizedTitle": "Editorial: Message from the Editor-in-Chief", "fno": "08306587", "hasPdf": true, "idPrefix": "tn", "keywords": [], "authors": [ { "givenName": "Dapeng Oliver", "surname": "Wu", "fullName": "Dapeng Oliver Wu", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2018-01-01 00:00:00", "pubType": "trans", "pages": "1", "year": "2018", "issn": "2327-4697", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tm/2011/01/ttm2011010002", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2011/01/ttm2011010002/13rRUwIF6lL", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2013/02/tsc2013020146", "title": "Editorial: A Message from the Editor-in-Chief", "doi": null, "abstractUrl": "/journal/sc/2013/02/tsc2013020146/13rRUwbs28N", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2017/05/07891655", "title": "Editorial: A Message from the Incoming Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2017/05/07891655/13rRUwghd9N", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2014/01/ttm2014010002", "title": "Editorial: A Message from the Incoming Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2014/01/ttm2014010002/13rRUwgyOjX", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2006/01/e0002", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/ts/2006/01/e0002/13rRUxASujb", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2013/01/tsc2013010002", "title": "Editorial: A message from the new Editor-in-Chief", "doi": null, "abstractUrl": "/journal/sc/2013/01/tsc2013010002/13rRUxlgy96", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2017/01/07842729", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/sc/2017/01/07842729/13rRUyXKxRL", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2008/04/ttm2008040386", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2008/04/ttm2008040386/13rRUygBwic", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2014/01/ttm2014010001", "title": "Editorial: A Message from the Outgoing Editor-in-Chief and Associate Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2014/01/ttm2014010001/13rRUynHujJ", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/cc/2020/02/09109590", "title": "Editorial: A Message from the Incoming Editor-in-Chief", "doi": null, "abstractUrl": "/journal/cc/2020/02/09109590/1kpElzQzX4k", "parentPublication": { "id": "trans/cc", "title": "IEEE Transactions on Cloud Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "07997937", "articleId": "13rRUwhpBEJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBqdri8", "title": "Jan.-Feb.", "year": "2013", "issueNum": "01", "idPrefix": "tb", "pubType": "journal", "volume": "10", "label": "Jan.-Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwgQpBC", "doi": "10.1109/TCBB.2013.56", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Editorial from the New Editor-in-Chief", "normalizedTitle": "Editorial from the New Editor-in-Chief", "fno": "ttb2013010001", "hasPdf": true, "idPrefix": "tb", "keywords": [], "authors": [ { "givenName": "Ying", "surname": "Xu", "fullName": "Ying Xu", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2013-01-01 00:00:00", "pubType": "trans", "pages": "1", "year": "2013", "issn": "1545-5963", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tb/2017/02/07887793", "title": "Editorial from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tb/2017/02/07887793/13rRUwIF6jw", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2011/01/ttm2011010002", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2011/01/ttm2011010002/13rRUwIF6lL", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2015/01/06980146", "title": "Editorial from the New Editor in Chief", "doi": null, "abstractUrl": "/journal/tc/2015/01/06980146/13rRUxASu07", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2006/01/e0002", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/ts/2006/01/e0002/13rRUxASujb", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2013/01/tsc2013010002", "title": "Editorial: A message from the new Editor-in-Chief", "doi": null, "abstractUrl": "/journal/sc/2013/01/tsc2013010002/13rRUxlgy96", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2017/01/07842729", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/sc/2017/01/07842729/13rRUyXKxRL", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2008/04/ttm2008040386", "title": "Editorial: A Message from the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tm/2008/04/ttm2008040386/13rRUygBwic", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/2018/01/08249710", "title": "Editorial from the New Editor in Chief", "doi": null, "abstractUrl": "/journal/ts/2018/01/08249710/13rRUygT7uv", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2021/01/09372372", "title": "Editorial from the New Editor in Chief", "doi": null, "abstractUrl": "/journal/ec/2021/01/09372372/1rNPKvWrRu0", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2021/06/09642422", "title": "Editorial: From the New Editor-in-Chief", "doi": null, "abstractUrl": "/journal/tb/2021/06/09642422/1zarvQplbtC", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttb20130100c2", "articleId": "13rRUyYSWrf", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttb2013010002", "articleId": "13rRUzp02mN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCm7Bxu", "title": "July", "year": "2011", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "17", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYrbUC", "doi": "10.1109/TVCG.2011.87", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Guest Editors' Introduction: Special Section on the ACM Symposium on Virtual Reality and Software Technology (VRST 2009)", "normalizedTitle": "Guest Editors' Introduction: Special Section on the ACM Symposium on Virtual Reality and Software Technology (VRST 2009)", "fno": "ttg2011070873", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Daniel", "surname": "Thalmann", "fullName": "Daniel Thalmann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Benjamin", "surname": "Lok", "fullName": "Benjamin Lok", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "07", "pubDate": "2011-07-01 00:00:00", "pubType": "trans", "pages": "873-874", "year": "2011", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2016/07/07478595", "title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2015", "doi": null, "abstractUrl": "/journal/tg/2016/07/07478595/13rRUEgarsL", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/03/ttg2012030354", "title": "Guest Editors' Introduction: Special Section on ACM VRST", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030354/13rRUILLkvn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010004", "title": "Guest Editor's Introduction: Special Section on VRST", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010004/13rRUwI5TQR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0003", "title": "Guest Editors' Introduction: Special Section on ACM VRST 2005", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0003/13rRUwI5TXs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2015/08/07138667", "title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014", "doi": null, "abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/05/ttg2013050721", "title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games (I3D 2012)", "doi": null, "abstractUrl": "/journal/tg/2013/05/ttg2013050721/13rRUxBa5rV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/08/06847259", "title": "Guest Editors&#x0027; Introduction: Special Section on the IEEE Pacific Visualization Symposium", "doi": null, "abstractUrl": "/journal/tg/2014/08/06847259/13rRUxD9gXJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/03/07835794", "title": "Guest Editors Introduction: Special Section on the ACM Symposium on Virtual Reality Software and Technology 2015", "doi": null, "abstractUrl": "/journal/tg/2017/03/07835794/13rRUxYIMV4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06881790", "title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)", "doi": null, "abstractUrl": "/journal/tg/2014/10/06881790/13rRUy0HYRq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/02/v0129", "title": "Guest Editors' Introduction: Special Section on ACM VRST", "doi": null, "abstractUrl": "/journal/tg/2006/02/v0129/13rRUy3gn7n", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "ttg2011070875", "articleId": "13rRUyYSWsP", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBVrjqW", "title": "March/April", "year": "2006", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "12", "label": "March/April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy3gn7n", "doi": "10.1109/TVCG.2006.31", "abstract": null, "abstracts": [ { "abstractType": "Regular", "content": "", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": null, "title": "Guest Editors' Introduction: Special Section on ACM VRST", "normalizedTitle": "Guest Editors' Introduction: Special Section on ACM VRST", "fno": "v0129", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Layout", "Virtual Reality", "Animation", "Liquid Crystal Displays", "Graphics", "Hair", "Electrostatics", "Deformable Models", "Lattices", "Context Modeling" ], "authors": [ { "givenName": "Rynson W.H.", "surname": "Lau", "fullName": "Rynson W.H. Lau", "affiliation": "Department of Computer Science, City University of Hong Kong, Tat Chee Avenue, Kowloon, Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Hans-Pete", "surname": "Seidel", "fullName": "Hans-Pete Seidel", "affiliation": "Max-Planck-Institut Informatik, Stuhlsatzenhausweg 85, 66123 Saarbruecken, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "02", "pubDate": "2006-03-01 00:00:00", "pubType": "trans", "pages": "129-130", "year": "2006", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2012/03/ttg2012030354", "title": "Guest Editors' Introduction: Special Section on ACM VRST", "doi": null, "abstractUrl": "/journal/tg/2012/03/ttg2012030354/13rRUILLkvn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2012/08/ttc2012081057", "title": "Guest Editors' Introduction: Special Section on Computer Arithmetic", "doi": null, "abstractUrl": "/journal/tc/2012/08/ttc2012081057/13rRUNvyajT", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2009/01/ttg2009010004", "title": "Guest Editor's Introduction: Special Section on VRST", "doi": null, "abstractUrl": "/journal/tg/2009/01/ttg2009010004/13rRUwI5TQR", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0003", "title": "Guest Editors' Introduction: Special Section on ACM VRST 2005", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0003/13rRUwI5TXs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/03/v0420", "title": "Guest Editors' Introduction: Special Section on Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2007/03/v0420/13rRUwjoNwU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2009/02/ttc2009020145", "title": "Guest Editors' Introduction: Special Section on Computer Arithmetic", "doi": null, "abstractUrl": "/journal/tc/2009/02/ttc2009020145/13rRUxBa5bd", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/07/ttg2011070873", "title": "Guest Editors' Introduction: Special Section on the ACM Symposium on Virtual Reality and Software Technology (VRST 2009)", "doi": null, "abstractUrl": "/journal/tg/2011/07/ttg2011070873/13rRUxYrbUC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/10/06881790", "title": "Guest Editors' Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)", "doi": null, "abstractUrl": "/journal/tg/2014/10/06881790/13rRUy0HYRq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tc/2019/07/08733176", "title": "Guest Editors Introduction: Special Section on Computer Arithmetic", "doi": null, "abstractUrl": "/journal/tc/2019/07/08733176/1aFvsiBlmrS", "parentPublication": { "id": "trans/tc", "title": "IEEE Transactions on Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "01580446", "articleId": "13rRUEgarnD", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0131", "articleId": "13rRUxZzAhw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwGqBqi", "title": "April-June", "year": "2011", "issueNum": "02", "idPrefix": "pc", "pubType": "magazine", "volume": "10", "label": "April-June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUytnsU6", "doi": "10.1109/MPRV.2011.30", "abstract": "The retail experience is undergoing significant changes due to a confluence of pervasive computing technologies, such as affordable smartphones with a plethora of retail applications, social media, sensing and analytics, and wireless technologies. This issue contains three articles that examine some of the opportunities and challenges in the pervasive retail space.", "abstracts": [ { "abstractType": "Regular", "content": "The retail experience is undergoing significant changes due to a confluence of pervasive computing technologies, such as affordable smartphones with a plethora of retail applications, social media, sensing and analytics, and wireless technologies. This issue contains three articles that examine some of the opportunities and challenges in the pervasive retail space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The retail experience is undergoing significant changes due to a confluence of pervasive computing technologies, such as affordable smartphones with a plethora of retail applications, social media, sensing and analytics, and wireless technologies. This issue contains three articles that examine some of the opportunities and challenges in the pervasive retail space.", "title": "Guest Editors' Introduction", "normalizedTitle": "Guest Editors' Introduction", "fno": "mpc2011020016", "hasPdf": true, "idPrefix": "pc", "keywords": [ "Pervasive Retail", "Mobile Computing", "Smartphones", "Sensor Networks", "Usability", "Privacy", "Security" ], "authors": [ { "givenName": "Chandra", "surname": "Narayanaswami", "fullName": "Chandra Narayanaswami", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Antonio", "surname": "Krüger", "fullName": "Antonio Krüger", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Natalia", "surname": "Marmasse", "fullName": "Natalia Marmasse", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "02", "pubDate": "2011-04-01 00:00:00", "pubType": "mags", "pages": "16-18", "year": "2011", "issn": "1536-1268", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "mags/pc/2007/01/b1017", "title": "Guest Editors' Introduction: Pervasive Computing in Healthcare", "doi": null, "abstractUrl": "/magazine/pc/2007/01/b1017/13rRUEgaryo", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2016/03/mpc2016030011", "title": "Pervasive Displays [Guest editors' introduction]", "doi": null, "abstractUrl": "/magazine/pc/2016/03/mpc2016030011/13rRUILtJiM", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2015/04/mpc2015040022", "title": "Pervasive Food [Guest editors' introduction]", "doi": null, "abstractUrl": "/magazine/pc/2015/04/mpc2015040022/13rRUNvgz7h", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2007/04/mpc2007040015", "title": "Guest Editors' Introduction: Security & Privacy", "doi": null, "abstractUrl": "/magazine/pc/2007/04/mpc2007040015/13rRUxAStXZ", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2006/04/b4018", "title": "Guest Editors' Introduction: Intelligent Transportation and Pervasive Computing", "doi": null, "abstractUrl": "/magazine/pc/2006/04/b4018/13rRUy0qnDn", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2009/01/mmu2009010014", "title": "Guest Editors' Introduction: Intelligent and Pervasive Multimedia Systems", "doi": null, "abstractUrl": "/magazine/mu/2009/01/mmu2009010014/13rRUyXKxO1", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2003/02/b2032", "title": "Developing Consumer-Friendly Pervasive Retail Systems", "doi": null, "abstractUrl": "/magazine/pc/2003/02/b2032/13rRUyY28VE", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2011/02/mpc2011020044", "title": "Enhancing the Shopping Experience", "doi": null, "abstractUrl": "/magazine/pc/2011/02/mpc2011020044/13rRUyY28VG", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mpc2011020012", "articleId": "13rRUxBJhDf", "__typename": "AdjacentArticleType" }, "next": { "fno": "mpc2011020019", "articleId": "13rRUwInvGo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNC3Xhdt", "title": "July", "year": "2015", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "21", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUytWF9m", "doi": "10.1109/TVCG.2015.2403328", "abstract": "Human motion capture (mocap) is a widely used technique for digitalizing human movements. With growing usage, compressing mocap data has received increasing attention, since compact data size enables efficient storage and transmission. Our analysis shows that mocap data have some unique characteristics that distinguish themselves from images and videos. Therefore, directly borrowing image or video compression techniques, such as discrete cosine transform, does not work well. In this paper, we propose a novel mocap-tailored transform coding algorithm that takes advantage of these features. Our algorithm segments the input mocap sequences into clips, which are represented in 2D matrices. Then it computes a set of data-dependent orthogonal bases to transform the matrices to frequency domain, in which the transform coefficients have significantly less dependency. Finally, the compression is obtained by entropy coding of the quantized coefficients and the bases. Our method has low computational cost and can be easily extended to compress mocap databases. It also requires neither training nor complicated parameter setting. Experimental results demonstrate that the proposed scheme significantly outperforms state-of-the-art algorithms in terms of compression performance and speed.", "abstracts": [ { "abstractType": "Regular", "content": "Human motion capture (mocap) is a widely used technique for digitalizing human movements. With growing usage, compressing mocap data has received increasing attention, since compact data size enables efficient storage and transmission. Our analysis shows that mocap data have some unique characteristics that distinguish themselves from images and videos. Therefore, directly borrowing image or video compression techniques, such as discrete cosine transform, does not work well. In this paper, we propose a novel mocap-tailored transform coding algorithm that takes advantage of these features. Our algorithm segments the input mocap sequences into clips, which are represented in 2D matrices. Then it computes a set of data-dependent orthogonal bases to transform the matrices to frequency domain, in which the transform coefficients have significantly less dependency. Finally, the compression is obtained by entropy coding of the quantized coefficients and the bases. Our method has low computational cost and can be easily extended to compress mocap databases. It also requires neither training nor complicated parameter setting. Experimental results demonstrate that the proposed scheme significantly outperforms state-of-the-art algorithms in terms of compression performance and speed.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Human motion capture (mocap) is a widely used technique for digitalizing human movements. With growing usage, compressing mocap data has received increasing attention, since compact data size enables efficient storage and transmission. Our analysis shows that mocap data have some unique characteristics that distinguish themselves from images and videos. Therefore, directly borrowing image or video compression techniques, such as discrete cosine transform, does not work well. In this paper, we propose a novel mocap-tailored transform coding algorithm that takes advantage of these features. Our algorithm segments the input mocap sequences into clips, which are represented in 2D matrices. Then it computes a set of data-dependent orthogonal bases to transform the matrices to frequency domain, in which the transform coefficients have significantly less dependency. Finally, the compression is obtained by entropy coding of the quantized coefficients and the bases. Our method has low computational cost and can be easily extended to compress mocap databases. It also requires neither training nor complicated parameter setting. Experimental results demonstrate that the proposed scheme significantly outperforms state-of-the-art algorithms in terms of compression performance and speed.", "title": "Human Motion Capture Data Tailored Transform Coding", "normalizedTitle": "Human Motion Capture Data Tailored Transform Coding", "fno": "07042272", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Discrete Cosine Transforms", "Transform Coding", "Trajectory", "Correlation", "Videos", "Silicon", "Optimization", "Motion Capture", "Transform Coding", "Data Compression", "Optimization", "Motion Capture", "Transform Coding", "Data Compression" ], "authors": [ { "givenName": "Junhui", "surname": "Hou", "fullName": "Junhui Hou", "affiliation": "School of Electrical and Electronics Engineering, Nanyang Technological University, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Lap-Pui", "surname": "Chau", "fullName": "Lap-Pui Chau", "affiliation": "School of Electrical and Electronics Engineering, Nanyang Technological University, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Nadia", "surname": "Magnenat-Thalmann", "fullName": "Nadia Magnenat-Thalmann", "affiliation": "Institute for Media Innovation, Nanyang Technological University, Singapore", "__typename": "ArticleAuthorType" }, { "givenName": "Ying", "surname": "He", "fullName": "Ying He", "affiliation": "School of Computer Engineering, Nanyang Technological University, Singapore", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2015-07-01 00:00:00", "pubType": "trans", "pages": "848-859", "year": "2015", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2011/348/0/06011934", "title": "Hybrid low-delay compression of motion capture data", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011934/12OmNqFrGC9", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890691", "title": "Transform coding in AVS2", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890691/12OmNvA1hAB", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1994/6405/2/00471599", "title": "Lapped multiple bases realizations for the transform coding of still images", "doi": null, "abstractUrl": "/proceedings-article/acssc/1994/00471599/12OmNxdm4Ke", "parentPublication": { "id": "proceedings/acssc/1994/6405/1", "title": "Proceedings of 1994 28th Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2009/3557/3/3557e739", "title": "The Improved Method of FGS and Simulation Based on DCT and Wavelet Transform", "doi": null, "abstractUrl": "/proceedings-article/etcs/2009/3557e739/12OmNxzuMKx", "parentPublication": { "id": "proceedings/etcs/2009/3557/3", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890222", "title": "Restoring corrupted motion capture data via jointly low-rank matrix completion", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890222/12OmNy3iFw0", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a449", "title": "Generalized Discrete Cosine Transform", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a449/12OmNyUWRa0", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acssc/1993/4120/0/00342322", "title": "Three-dimensional transform coding of multispectral data", "doi": null, "abstractUrl": "/proceedings-article/acssc/1993/00342322/12OmNykCccn", "parentPublication": { "id": "proceedings/acssc/1993/4120/0", "title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1994/6952/2/00413593", "title": "Adaptively subsampled image coding with warped polynomials", "doi": null, "abstractUrl": "/proceedings-article/icip/1994/00413593/12OmNz61duH", "parentPublication": { "id": "proceedings/icip/1994/6952/2", "title": "Proceedings of 1st International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346332", "title": "Progressive transmission of scientific data using biorthogonal wavelet transform", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346332/12OmNzVXNQ0", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0005", "title": "Human Motion Capture Data Compression by Model-Based Indexing: A Power Aware Approach", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0005/13rRUB7a1fI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07027857", "articleId": "13rRUwcS1CX", "__typename": "AdjacentArticleType" }, "next": { "fno": "07018970", "articleId": "13rRUILLkvu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrkBwz4", "title": "July/August", "year": "2006", "issueNum": "04", "idPrefix": "cg", "pubType": "magazine", "volume": "26", "label": "July/August", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxZ0o3V", "doi": "10.1109/MCG.2006.92", "abstract": "This article discusses the development of an immersive table tennis simulation. After describing the hardware necessities of the system, the authors delve into different aspects of the simulation. These include collision detection, physical simulation, and some aspects of the game design. Since table tennis a fast sports, the synchronization of the human player?s movements and the visual output on the projection wall is a challenging problem. The authors analyzed the latencies of all subcomponents of the system and designed a prediction method that allows for high speed interaction with the application.", "abstracts": [ { "abstractType": "Regular", "content": "This article discusses the development of an immersive table tennis simulation. After describing the hardware necessities of the system, the authors delve into different aspects of the simulation. These include collision detection, physical simulation, and some aspects of the game design. Since table tennis a fast sports, the synchronization of the human player?s movements and the visual output on the projection wall is a challenging problem. The authors analyzed the latencies of all subcomponents of the system and designed a prediction method that allows for high speed interaction with the application.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This article discusses the development of an immersive table tennis simulation. After describing the hardware necessities of the system, the authors delve into different aspects of the simulation. These include collision detection, physical simulation, and some aspects of the game design. Since table tennis a fast sports, the synchronization of the human player?s movements and the visual output on the projection wall is a challenging problem. The authors analyzed the latencies of all subcomponents of the system and designed a prediction method that allows for high speed interaction with the application.", "title": "V-Pong: An Immersive Table Tennis Simulation", "normalizedTitle": "V-Pong: An Immersive Table Tennis Simulation", "fno": "mcg2006040010", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Target Tracking", "Cameras", "Layout", "Multicast Protocols", "Rendering Computer Graphics", "Feedback", "Physics", "Tiled Displays", "Real Time Interaction", "Table Tennis Simulation", "Prediction", "Tracking" ], "authors": [ { "givenName": "Guido", "surname": "Brunnett", "fullName": "Guido Brunnett", "affiliation": "Chemnitz University of Technology, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Stephan", "surname": "Rusdorf", "fullName": "Stephan Rusdorf", "affiliation": "Chemnitz University of Technology, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Mario", "surname": "Lorenz", "fullName": "Mario Lorenz", "affiliation": "Chemnitz University of Technology, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2006-07-01 00:00:00", "pubType": "mags", "pages": "10-13", "year": "2006", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2013/4990/0/4990b019", "title": "Reconstruction of 3D Trajectories for Performance Analysis in Table Tennis", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2013/4990b019/12OmNAgoV7I", "parentPublication": { "id": "proceedings/cvprw/2013/4990/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2009/3883/0/3883a652", "title": "Tennis Space: An Interactive and Immersive Environment for Tennis Simulation", "doi": null, "abstractUrl": "/proceedings-article/icig/2009/3883a652/12OmNwtEEyl", "parentPublication": { "id": "proceedings/icig/2009/3883/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2015/6850/0/6850a712", "title": "Discussion on Psychological Perception Representation and Knowledge Acquisition of Table Tennis Players", "doi": null, "abstractUrl": "/proceedings-article/icisce/2015/6850a712/12OmNyL0Tq5", "parentPublication": { "id": "proceedings/icisce/2015/6850/0", "title": "2015 2nd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0015", "title": "Real-Time Interaction with a Humanoid Avatar in an Immersive Table Tennis Simulation", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0015/13rRUxOdD2y", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017600", "title": "iTTVis: Interactive Visualization of Table Tennis Data", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017600/13rRUyY28YD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icise-ie/2021/3829/0/382900b210", "title": "Application of Micro-lecture in Table Tennis Teaching for Children", "doi": null, "abstractUrl": "/proceedings-article/icise-ie/2021/382900b210/1C8GamvUKGI", "parentPublication": { "id": "proceedings/icise-ie/2021/3829/0", "title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a804", "title": "Assist Home Training Table Tennis Skill Acquisition via Immersive Learning and Web Technologies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a804/1CJd0JOwO9a", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2021/3892/0/389200a632", "title": "Tactical Decision System of Table Tennis Match based on C4.5 Decision Tree", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2021/389200a632/1t2nmIZ5RBe", "parentPublication": { "id": "proceedings/icmtma/2021/3892/0", "title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09446582", "title": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/12/09446582/1u8lz4qWghi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsmt/2020/8668/0/866800a149", "title": "Small Object Detection of Table Tennis Based on Deep Learning Network", "doi": null, "abstractUrl": "/proceedings-article/iccsmt/2020/866800a149/1u8pwyEampG", "parentPublication": { "id": "proceedings/iccsmt/2020/8668/0", "title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2006040006", "articleId": "13rRUxBa5zM", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2006040014", "articleId": "13rRUxOvecc", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1HMOit1lSk8", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1u8lz4qWghi", "doi": "10.1109/TVCG.2021.3086403", "abstract": "Sports professionals have been increasingly using Virtual Reality (VR) for training and assessment of skill-based sports. Yet fundamental questions about the virtue of VR training for skill-based sports remain unanswered: Can the complex motor skills in these sports be learned in VR? If so, do these skills transfer to the real world? We have developed a VR table tennis system that incorporates customized physics with realistic audio-visual stimuli, haptics, and motion capture to enhance VR immersion and collect information about the player&#x2019;s posture and technique. We have assessed skill acquisition and training transfer by comparing real table tennis performance between a control group (n=7) that received no training and an experimental group (n=8) trained for five sessions in VR. Results show a significant improvement in technique but no significant changes in the number of the returned balls in the experimental group in the real-life retention session. However, no significant differences are found in the control group. Our findings support the notion that complex skills can be learned in VR and that obtained skills can transfer to the real world. This work offers an inexpensive VR table tennis training platform, enabling effective training via real-time motor and ball returning technique feedback.", "abstracts": [ { "abstractType": "Regular", "content": "Sports professionals have been increasingly using Virtual Reality (VR) for training and assessment of skill-based sports. Yet fundamental questions about the virtue of VR training for skill-based sports remain unanswered: Can the complex motor skills in these sports be learned in VR? If so, do these skills transfer to the real world? We have developed a VR table tennis system that incorporates customized physics with realistic audio-visual stimuli, haptics, and motion capture to enhance VR immersion and collect information about the player&#x2019;s posture and technique. We have assessed skill acquisition and training transfer by comparing real table tennis performance between a control group (n=7) that received no training and an experimental group (n=8) trained for five sessions in VR. Results show a significant improvement in technique but no significant changes in the number of the returned balls in the experimental group in the real-life retention session. However, no significant differences are found in the control group. Our findings support the notion that complex skills can be learned in VR and that obtained skills can transfer to the real world. This work offers an inexpensive VR table tennis training platform, enabling effective training via real-time motor and ball returning technique feedback.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Sports professionals have been increasingly using Virtual Reality (VR) for training and assessment of skill-based sports. Yet fundamental questions about the virtue of VR training for skill-based sports remain unanswered: Can the complex motor skills in these sports be learned in VR? If so, do these skills transfer to the real world? We have developed a VR table tennis system that incorporates customized physics with realistic audio-visual stimuli, haptics, and motion capture to enhance VR immersion and collect information about the player’s posture and technique. We have assessed skill acquisition and training transfer by comparing real table tennis performance between a control group (n=7) that received no training and an experimental group (n=8) trained for five sessions in VR. Results show a significant improvement in technique but no significant changes in the number of the returned balls in the experimental group in the real-life retention session. However, no significant differences are found in the control group. Our findings support the notion that complex skills can be learned in VR and that obtained skills can transfer to the real world. This work offers an inexpensive VR table tennis training platform, enabling effective training via real-time motor and ball returning technique feedback.", "title": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality", "normalizedTitle": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality", "fno": "09446582", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Games", "Human Computer Interaction", "Sport", "Virtual Reality", "Complex Motor Skills", "Complex Skills", "Control Group", "Effective Training", "Inexpensive VR Table Tennis Training Platform", "Obtained Skills", "Performance Improvement", "Skill Acquisition", "Skill Transfer", "Skill Based Sports", "Skills Transfer", "Sports Professionals", "Table Tennis Performance", "Virtual Reality", "VR Immersion", "VR Table Tennis System", "VR Training", "Sports", "Virtual Reality", "Physics", "Videos", "Games", "Visualization", "Real Time Systems", "Performance Evaluation", "Virtual Reality", "Training", "Motor Learning", "Performance Improvement", "Skill Transfer" ], "authors": [ { "givenName": "Hawkar", "surname": "Oagaz", "fullName": "Hawkar Oagaz", "affiliation": "Department of Computer Science and Engineering, University of Colorado Denver, Denver, CO, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Breawn", "surname": "Schoun", "fullName": "Breawn Schoun", "affiliation": "Department of Computer Science and Engineering, University of Colorado Denver, Denver, CO, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Min-Hyung", "surname": "Choi", "fullName": "Min-Hyung Choi", "affiliation": "Department of Computer Science and Engineering, University of Colorado Denver, Denver, CO, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "4332-4343", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismse/2004/2217/0/22170186", "title": "Detecting Tactics Patterns for Archiving Tennis Video Clips", "doi": null, "abstractUrl": "/proceedings-article/ismse/2004/22170186/12OmNBtCCBd", "parentPublication": { "id": "proceedings/ismse/2004/2217/0", "title": "Multimedia Software Engineering, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2015/6850/0/6850a712", "title": "Discussion on Psychological Perception Representation and Knowledge Acquisition of Table Tennis Players", "doi": null, "abstractUrl": "/proceedings-article/icisce/2015/6850a712/12OmNyL0Tq5", "parentPublication": { "id": "proceedings/icisce/2015/6850/0", "title": "2015 2nd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2006/04/mcg2006040010", "title": "V-Pong: An Immersive Table Tennis Simulation", "doi": null, "abstractUrl": "/magazine/cg/2006/04/mcg2006040010/13rRUxZ0o3V", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017600", "title": "iTTVis: Interactive Visualization of Table Tennis Data", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017600/13rRUyY28YD", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a804", "title": "Assist Home Training Table Tennis Skill Acquisition via Immersive Learning and Web Technologies", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a804/1CJd0JOwO9a", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2022/5725/0/572500a275", "title": "Table Tennis Skill Learning in VR with Step by Step Guides using Forehand Drive as a Case Study", "doi": null, "abstractUrl": "/proceedings-article/aivr/2022/572500a275/1KmFgOCBg1a", "parentPublication": { "id": "proceedings/aivr/2022/5725/0", "title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807264", "title": "Tac-Simur: Tactic-based Simulative Visual Analytics of Table Tennis", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807264/1cG6vo24hRC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnisc/2018/6956/0/695600a325", "title": "Experimental Research on Applying Imagery Training Method in Teenager Table Tennis Training Classes", "doi": null, "abstractUrl": "/proceedings-article/icnisc/2018/695600a325/1dUo2B83mqk", "parentPublication": { "id": "proceedings/icnisc/2018/6956/0", "title": "2018 4th Annual International Conference on Network and Information Systems for Computers (ICNISC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaie/2020/6659/0/665900a406", "title": "Research on the Evaluation Index System of Table Tennis Classroom Teaching based on Core Literacy", "doi": null, "abstractUrl": "/proceedings-article/icaie/2020/665900a406/1oZBL0WTJK0", "parentPublication": { "id": "proceedings/icaie/2020/6659/0", "title": "2020 International Conference on Artificial Intelligence and Education (ICAIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/05/09382892", "title": "SPinPong - Virtual Reality Table Tennis Skill Acquisition using Visual, Haptic and Temporal Cues", "doi": null, "abstractUrl": "/journal/tg/2021/05/09382892/1saZrRoiA3C", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09445817", "articleId": "1u8lzby2Upy", "__typename": "AdjacentArticleType" }, "next": { "fno": "09447222", "articleId": "1ua0ob5v97O", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzvhvFS", "title": "May", "year": "2013", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwh80uy", "doi": "10.1109/TVCG.2012.163", "abstract": "Spatial judgments are important for many real-world tasks in engineering and scientific visualization. While existing research provides evidence that higher levels of display and interaction fidelity in virtual reality systems offer advantages for spatial understanding, few investigations have focused on small-scale spatial judgments or employed experimental tasks similar to those used in real-world applications. After an earlier study that considered a broad analysis of various spatial understanding tasks, we present the results of a follow-up study focusing on small-scale spatial judgments. In this research, we independently controlled field of regard, stereoscopy, and head-tracked rendering to study their effects on the performance of a task involving precise spatial inspections of complex 3D structures. Measuring time and errors, we asked participants to distinguish between structural gaps and intersections between components of 3D models designed to be similar to real underground cave systems. The overall results suggest that the addition of the higher fidelity system features support performance improvements in making small-scale spatial judgments. Through analyses of the effects of individual system components, the experiment shows that participants made significantly fewer errors with either an increased field of regard or with the addition of head-tracked rendering. The results also indicate that participants performed significantly faster when the system provided the combination of stereo and head-tracked rendering.", "abstracts": [ { "abstractType": "Regular", "content": "Spatial judgments are important for many real-world tasks in engineering and scientific visualization. While existing research provides evidence that higher levels of display and interaction fidelity in virtual reality systems offer advantages for spatial understanding, few investigations have focused on small-scale spatial judgments or employed experimental tasks similar to those used in real-world applications. After an earlier study that considered a broad analysis of various spatial understanding tasks, we present the results of a follow-up study focusing on small-scale spatial judgments. In this research, we independently controlled field of regard, stereoscopy, and head-tracked rendering to study their effects on the performance of a task involving precise spatial inspections of complex 3D structures. Measuring time and errors, we asked participants to distinguish between structural gaps and intersections between components of 3D models designed to be similar to real underground cave systems. The overall results suggest that the addition of the higher fidelity system features support performance improvements in making small-scale spatial judgments. Through analyses of the effects of individual system components, the experiment shows that participants made significantly fewer errors with either an increased field of regard or with the addition of head-tracked rendering. The results also indicate that participants performed significantly faster when the system provided the combination of stereo and head-tracked rendering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Spatial judgments are important for many real-world tasks in engineering and scientific visualization. While existing research provides evidence that higher levels of display and interaction fidelity in virtual reality systems offer advantages for spatial understanding, few investigations have focused on small-scale spatial judgments or employed experimental tasks similar to those used in real-world applications. After an earlier study that considered a broad analysis of various spatial understanding tasks, we present the results of a follow-up study focusing on small-scale spatial judgments. In this research, we independently controlled field of regard, stereoscopy, and head-tracked rendering to study their effects on the performance of a task involving precise spatial inspections of complex 3D structures. Measuring time and errors, we asked participants to distinguish between structural gaps and intersections between components of 3D models designed to be similar to real underground cave systems. The overall results suggest that the addition of the higher fidelity system features support performance improvements in making small-scale spatial judgments. Through analyses of the effects of individual system components, the experiment shows that participants made significantly fewer errors with either an increased field of regard or with the addition of head-tracked rendering. The results also indicate that participants performed significantly faster when the system provided the combination of stereo and head-tracked rendering.", "title": "Studying the Effects of Stereo, Head Tracking, and Field of Regard on a Small-Scale Spatial Judgment Task", "normalizedTitle": "Studying the Effects of Stereo, Head Tracking, and Field of Regard on a Small-Scale Spatial Judgment Task", "fno": "ttg2013050886", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visualization", "Electron Tubes", "Navigation", "Data Visualization", "Head", "Tracking", "Rendering Computer Graphics", "Graphical User Interfaces", "Artificial", "Augmented", "And Virtual Realities" ], "authors": [ { "givenName": "E. D.", "surname": "Ragan", "fullName": "E. D. Ragan", "affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "R.", "surname": "Kopper", "fullName": "R. Kopper", "affiliation": "Dept. of Comput. & Inf. Sci. & Eng., Univ. of Florida, Gainesville, FL, USA", "__typename": "ArticleAuthorType" }, { "givenName": "P.", "surname": "Schuchardt", "fullName": "P. Schuchardt", "affiliation": "Cavewhere, Blacksburg, VA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "D. A.", "surname": "Bowman", "fullName": "D. A. Bowman", "affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2013-05-01 00:00:00", "pubType": "trans", "pages": "886-896", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2012/1204/0/06184187", "title": "Democratizing rendering for multiple viewers in surround VR systems", "doi": null, "abstractUrl": "/proceedings-article/3dui/2012/06184187/12OmNBubOX9", "parentPublication": { "id": "proceedings/3dui/2012/1204/0", "title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04810998", "title": "Image Blending and View Clustering for Multi-Viewer Immersive Projection Environments", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04810998/12OmNCfSqFi", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2016/7258/0/07552858", "title": "Depth augmented stereo panorama for cinematic virtual reality with head-motion parallax", "doi": null, "abstractUrl": "/proceedings-article/icme/2016/07552858/12OmNs0TKW6", "parentPublication": { "id": "proceedings/icme/2016/7258/0", "title": "2016 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2012/4683/0/4683a125", "title": "Coarse Head Pose Estimation using Image Abstraction", "doi": null, "abstractUrl": "/proceedings-article/crv/2012/4683a125/12OmNwE9ORM", "parentPublication": { "id": "proceedings/crv/2012/4683/0", "title": "2012 Ninth Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2013/2869/0/06671800", "title": "Subtle cueing for visual search in head-tracked head worn displays", "doi": null, "abstractUrl": "/proceedings-article/ismar/2013/06671800/12OmNylbovt", "parentPublication": { "id": "proceedings/ismar/2013/2869/0", "title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/04/07384528", "title": "Visual Quality Adjustment for Volume Rendering in a Head-Tracked Virtual Environment", "doi": null, "abstractUrl": "/journal/tg/2016/04/07384528/13rRUxBrGh4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/05/08643434", "title": "SGaze: A Data-Driven Eye-Head Coordination Model for Realtime Gaze Prediction", "doi": null, "abstractUrl": "/journal/tg/2019/05/08643434/18K0lRIKi7m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a640", "title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0342", "title": "HeadNeRF: A Realtime NeRF-based Parametric Head Model", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0342/1H1hITKdHGg", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090625", "title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013050866", "articleId": "13rRUxBa561", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNqJZgIg", "title": "May/June", "year": "2006", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "12", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxBa5x4", "doi": "10.1109/TVCG.2006.55", "abstract": "Abstract—Accuracy of memory performance per se is an imperfect reflection of the cognitive activity (awareness states) that underlies performance in memory tasks. The aim of this research is to investigate the effect of varied visual and interaction fidelity of immersive virtual environments on memory awareness states. A between groups experiment was carried out to explore the effect of rendering quality on location-based recognition memory for objects and associated states of awareness. The experimental space, consisting of two interconnected rooms, was rendered either flat-shaded or using radiosity rendering. The computer graphics simulations were displayed on a stereo head-tracked Head Mounted Display. Participants completed a recognition memory task after exposure to the experimental space and reported one of four states of awareness following object recognition. These reflected the level of visual mental imagery involved during retrieval, the familiarity of the recollection, and also included guesses. Experimental results revealed variations in the distribution of participants' awareness states across conditions while memory performance failed to reveal any. Interestingly, results revealed a higher proportion of recollections associated with mental imagery in the flat-shaded condition. These findings comply with similar effects revealed in two earlier studies summarized here, which demonstrated that the less \"naturalistic” interaction interface or interface of low interaction fidelity provoked a higher proportion of recognitions based on visual mental images.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Accuracy of memory performance per se is an imperfect reflection of the cognitive activity (awareness states) that underlies performance in memory tasks. The aim of this research is to investigate the effect of varied visual and interaction fidelity of immersive virtual environments on memory awareness states. A between groups experiment was carried out to explore the effect of rendering quality on location-based recognition memory for objects and associated states of awareness. The experimental space, consisting of two interconnected rooms, was rendered either flat-shaded or using radiosity rendering. The computer graphics simulations were displayed on a stereo head-tracked Head Mounted Display. Participants completed a recognition memory task after exposure to the experimental space and reported one of four states of awareness following object recognition. These reflected the level of visual mental imagery involved during retrieval, the familiarity of the recollection, and also included guesses. Experimental results revealed variations in the distribution of participants' awareness states across conditions while memory performance failed to reveal any. Interestingly, results revealed a higher proportion of recollections associated with mental imagery in the flat-shaded condition. These findings comply with similar effects revealed in two earlier studies summarized here, which demonstrated that the less \"naturalistic” interaction interface or interface of low interaction fidelity provoked a higher proportion of recognitions based on visual mental images.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Accuracy of memory performance per se is an imperfect reflection of the cognitive activity (awareness states) that underlies performance in memory tasks. The aim of this research is to investigate the effect of varied visual and interaction fidelity of immersive virtual environments on memory awareness states. A between groups experiment was carried out to explore the effect of rendering quality on location-based recognition memory for objects and associated states of awareness. The experimental space, consisting of two interconnected rooms, was rendered either flat-shaded or using radiosity rendering. The computer graphics simulations were displayed on a stereo head-tracked Head Mounted Display. Participants completed a recognition memory task after exposure to the experimental space and reported one of four states of awareness following object recognition. These reflected the level of visual mental imagery involved during retrieval, the familiarity of the recollection, and also included guesses. Experimental results revealed variations in the distribution of participants' awareness states across conditions while memory performance failed to reveal any. Interestingly, results revealed a higher proportion of recollections associated with mental imagery in the flat-shaded condition. These findings comply with similar effects revealed in two earlier studies summarized here, which demonstrated that the less \"naturalistic” interaction interface or interface of low interaction fidelity provoked a higher proportion of recognitions based on visual mental images.", "title": "The Effect of Visual and Interaction Fidelity on Spatial Cognition in Immersive Virtual Environments", "normalizedTitle": "The Effect of Visual and Interaction Fidelity on Spatial Cognition in Immersive Virtual Environments", "fno": "v0396", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Helmet Mounted Displays", "Human Factors", "Object Recognition", "Rendering Computer Graphics", "User Interfaces", "Virtual Reality", "Visual Fidelity", "Interaction Fidelity", "Spatial Cognition", "Immersive Virtual Environment", "Memory Awareness States", "Rendering Quality", "Computer Graphics Simulation", "Stereo Head Tracked Head Mounted Display", "Object Recognition", "Mental Imagery", "User Interface", "Cognition", "Virtual Environment", "Rendering Computer Graphics", "Reflection", "Computer Graphics", "Computational Modeling", "Computer Simulation", "Head", "Computer Displays", "Object Recognition", "Three Dimensional Graphics And Realism", "Virtual Reality", "Methodology And Techniques", "Interaction Techniques" ], "authors": [ { "givenName": "Katerina", "surname": "Mania", "fullName": "Katerina Mania", "affiliation": "Dept. of Informatics, Sussex Univ., Brighton, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Dave", "surname": "Wooldridge", "fullName": "Dave Wooldridge", "affiliation": "Dept. of Informatics, Sussex Univ., Brighton, UK", "__typename": "ArticleAuthorType" }, { "givenName": "Matthew", "surname": "Coxon", "fullName": "Matthew Coxon", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Andrew", "surname": "Robinson", "fullName": "Andrew Robinson", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2006-05-01 00:00:00", "pubType": "trans", "pages": "396-404", "year": "2006", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/centric/2010/4141/0/4141a052", "title": "User-Centered Interface Reconfiguration for Error Reduction in Human-Computer Interaction", "doi": null, "abstractUrl": "/proceedings-article/centric/2010/4141a052/12OmNCu4nci", "parentPublication": { "id": "proceedings/centric/2010/4141/0", "title": "Advances in Human-oriented and Personalized Mechanisms, Technologies, and Services, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223346", "title": "An evaluation of virtual human appearance fidelity on user's positive and negative affect in human-virtual human interaction", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223346/12OmNvjyxwr", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2016/5698/0/07907475", "title": "Goal Aware Context Respectful Counseling Agent", "doi": null, "abstractUrl": "/proceedings-article/sitis/2016/07907475/12OmNwHhoOg", "parentPublication": { "id": "proceedings/sitis/2016/5698/0", "title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/coginf/2010/8042/0/05599796", "title": "The effect of cognitive load on interaction pattern of emotion and working memory: An ERP study", "doi": null, "abstractUrl": "/proceedings-article/coginf/2010/05599796/12OmNz6iOoI", "parentPublication": { "id": "proceedings/coginf/2010/8042/0", "title": "2010 9th IEEE International Conference on Cognitive Informatics (ICCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccit/2009/3896/0/3896b314", "title": "Analysis on Real Estate Investment Behavior Cognition Based on HS Model", "doi": null, "abstractUrl": "/proceedings-article/iccit/2009/3896b314/12OmNzBOicX", "parentPublication": { "id": "proceedings/iccit/2009/3896/0", "title": "Convergence Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/04/07383334", "title": "Effects of Virtual Human Appearance Fidelity on Emotion Contagion in Affective Inter-Personal Simulations", "doi": null, "abstractUrl": "/journal/tg/2016/04/07383334/13rRUygBw7c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aciiw/2021/0021/0/09666402", "title": "An Affect as Interaction Approach for Stress Management Among Paramedics", "doi": null, "abstractUrl": "/proceedings-article/aciiw/2021/09666402/1A3hP6VGtEs", "parentPublication": { "id": "proceedings/aciiw/2021/0021/0", "title": "2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2019/6737/0/673700a001", "title": "Machine Learning and Human Cognition Combined to Enhance Knowledge Discovery Fidelity", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2019/673700a001/1htC7rMjR9S", "parentPublication": { "id": "proceedings/cogmi/2019/6737/0", "title": "2019 IEEE First International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2009/3943/0/04811057", "title": "Immersive Rear Projection on Curved Screens", "doi": null, "abstractUrl": "/proceedings-article/vr/2009/04811057/1lssAh0wwUg", "parentPublication": { "id": "proceedings/vr/2009/3943/0", "title": "2009 IEEE Virtual Reality Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2021/4106/0/410600a389", "title": "Effects of Immersive Spherical Video-based Virtual Reality on Cognition and Affect Outcomes of Learning: A Meta-analysis", "doi": null, "abstractUrl": "/proceedings-article/icalt/2021/410600a389/1vJZUuZGuWY", "parentPublication": { "id": "proceedings/icalt/2021/4106/0", "title": "2021 International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v0386", "articleId": "13rRUygBw6Z", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0405", "articleId": "13rRUwj7cp2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1qV36rtU49i", "title": "March", "year": "2021", "issueNum": "03", "idPrefix": "tp", "pubType": "journal", "volume": "43", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1eyaoql8wYE", "doi": "10.1109/TPAMI.2019.2949299", "abstract": "An automated fingerprint recognition system (AFRS) for 3D fingerprints is essential and highly promising for biometric security. Despite the progress in developing 3D AFRSs, achieving high-quality real-time reconstruction and high-accuracy recognition of 3D fingerprints remain two challenging issues. To address them, we propose a robust 3D AFRS based on ridge-valley (RV)-guided 3D fingerprint reconstruction and 3D topology polymer (TTP) feature extraction. The former considers the unique fingerprint characteristics of the RV and achieves real-time reconstruction. Unlike traditional triangulation-based methods that establish correspondences between points by cross-correlation-based searching, we propose to establish RV correspondences (RVCs) between ridges/valleys by defining and calculating a RVC matrix based on the topology of RV curves. To enhance depth reconstruction, curve-based smoothing is proposed to refine our novel RV disparity map. The TTP feature codes the 3D topology by projecting the 3D minutiae onto multiple planes and extracting their corresponding 2D topologies and has proven to be effective and efficient for 3D fingerprint recognition. Comprehensive experimental results demonstrate that our method outperforms the state-of-the-art methods in terms of both reconstruction and recognition accuracy. Also, due to its very short running time, it is appropriate for practical applications.", "abstracts": [ { "abstractType": "Regular", "content": "An automated fingerprint recognition system (AFRS) for 3D fingerprints is essential and highly promising for biometric security. Despite the progress in developing 3D AFRSs, achieving high-quality real-time reconstruction and high-accuracy recognition of 3D fingerprints remain two challenging issues. To address them, we propose a robust 3D AFRS based on ridge-valley (RV)-guided 3D fingerprint reconstruction and 3D topology polymer (TTP) feature extraction. The former considers the unique fingerprint characteristics of the RV and achieves real-time reconstruction. Unlike traditional triangulation-based methods that establish correspondences between points by cross-correlation-based searching, we propose to establish RV correspondences (RVCs) between ridges/valleys by defining and calculating a RVC matrix based on the topology of RV curves. To enhance depth reconstruction, curve-based smoothing is proposed to refine our novel RV disparity map. The TTP feature codes the 3D topology by projecting the 3D minutiae onto multiple planes and extracting their corresponding 2D topologies and has proven to be effective and efficient for 3D fingerprint recognition. Comprehensive experimental results demonstrate that our method outperforms the state-of-the-art methods in terms of both reconstruction and recognition accuracy. Also, due to its very short running time, it is appropriate for practical applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An automated fingerprint recognition system (AFRS) for 3D fingerprints is essential and highly promising for biometric security. Despite the progress in developing 3D AFRSs, achieving high-quality real-time reconstruction and high-accuracy recognition of 3D fingerprints remain two challenging issues. To address them, we propose a robust 3D AFRS based on ridge-valley (RV)-guided 3D fingerprint reconstruction and 3D topology polymer (TTP) feature extraction. The former considers the unique fingerprint characteristics of the RV and achieves real-time reconstruction. Unlike traditional triangulation-based methods that establish correspondences between points by cross-correlation-based searching, we propose to establish RV correspondences (RVCs) between ridges/valleys by defining and calculating a RVC matrix based on the topology of RV curves. To enhance depth reconstruction, curve-based smoothing is proposed to refine our novel RV disparity map. The TTP feature codes the 3D topology by projecting the 3D minutiae onto multiple planes and extracting their corresponding 2D topologies and has proven to be effective and efficient for 3D fingerprint recognition. Comprehensive experimental results demonstrate that our method outperforms the state-of-the-art methods in terms of both reconstruction and recognition accuracy. Also, due to its very short running time, it is appropriate for practical applications.", "title": "3D Fingerprint Recognition based on Ridge-Valley-Guided 3D Reconstruction and 3D Topology Polymer Feature Extraction", "normalizedTitle": "3D Fingerprint Recognition based on Ridge-Valley-Guided 3D Reconstruction and 3D Topology Polymer Feature Extraction", "fno": "08887282", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Curve Fitting", "Feature Extraction", "Fingerprint Identification", "Image Matching", "Image Reconstruction", "Recognition Accuracy", "3 D Fingerprint Recognition", "Ridge Valley Guided 3 D Reconstruction", "Automated Fingerprint Recognition System", "3 D AFR Ss", "High Quality Real Time Reconstruction", "High Accuracy Recognition", "Ridge Valley Guided 3 D Fingerprint Reconstruction", "Unique Fingerprint Characteristics", "Traditional Triangulation Based Methods", "Cross Correlation Based", "Depth Reconstruction", "Curve Based Smoothing", "Three Dimensional Displays", "Two Dimensional Displays", "Image Reconstruction", "Cameras", "Feature Extraction", "Topology", "Fingerprint Recognition", "Biometrics", "3 D Fingerprint Recognition", "Real Time 3 D Fingerprint Reconstruction", "3 D Topology Feature Extraction" ], "authors": [ { "givenName": "Xuefei", "surname": "Yin", "fullName": "Xuefei Yin", "affiliation": "School of Engineering and Information Technology, University of New South Wales, Canberra, ACT, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Yanming", "surname": "Zhu", "fullName": "Yanming Zhu", "affiliation": "School of Engineering and Information Technology, University of New South Wales, Canberra, ACT, Australia", "__typename": "ArticleAuthorType" }, { "givenName": "Jiankun", "surname": "Hu", "fullName": "Jiankun Hu", "affiliation": "School of Engineering and Information Technology, University of New South Wales, Canberra, ACT, Australia", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "03", "pubDate": "2021-03-01 00:00:00", "pubType": "trans", "pages": "1085-1091", "year": "2021", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iscc/2013/3755/0/06754942", "title": "Efficient fingerprint extraction for high performance Intrusion Detection System", "doi": null, "abstractUrl": "/proceedings-article/iscc/2013/06754942/12OmNxxdZDK", "parentPublication": { "id": "proceedings/iscc/2013/3755/0", "title": "2013 IEEE Symposium on Computers and Communications (ISCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209a684", "title": "3D Fingerprint Phantoms", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209a684/12OmNyfdOQ8", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/02/ttp2011020209", "title": "Fingerprint Reconstruction: From Minutiae to Phase", "doi": null, "abstractUrl": "/journal/tp/2011/02/ttp2011020209/13rRUIIVldS", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2013/04/mcg2013040073", "title": "Extracting Valley-Ridge Lines from Point-Cloud-Based 3D Fingerprint Models", "doi": null, "abstractUrl": "/magazine/cg/2013/04/mcg2013040073/13rRUNvgzcs", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2011/01/ttp2011010072", "title": "Global Ridge Orientation Modeling for Partial Fingerprint Identification", "doi": null, "abstractUrl": "/journal/tp/2011/01/ttp2011010072/13rRUwInv5w", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2015/03/06857403", "title": "Towards Contactless, Low-Cost and Accurate 3D Fingerprint Identification", "doi": null, "abstractUrl": "/journal/tp/2015/03/06857403/13rRUyfbws0", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acpr/2017/3354/0/3354a822", "title": "Fingerprint Image Enhancement Based on Classification DBMs Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/acpr/2017/3354a822/17D45VTRoBg", "parentPublication": { "id": "proceedings/acpr/2017/3354/0", "title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/12/08100983", "title": "Tetrahedron Based Fast 3D Fingerprint Identification Using Colored LEDs Illumination", "doi": null, "abstractUrl": "/journal/tp/2018/12/08100983/17D45X2fUH9", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10005833", "title": "Monocular 3D Fingerprint Reconstruction and Unwarping", "doi": null, "abstractUrl": "/journal/tp/5555/01/10005833/1JF3RNqTuNy", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/08/08664143", "title": "Contactless Biometric Identification Using 3D Finger Knuckle Patterns", "doi": null, "abstractUrl": "/journal/tp/2020/08/08664143/1l6NYhQfLpe", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08851288", "articleId": "1dFoya9cl9e", "__typename": "AdjacentArticleType" }, "next": { "fno": "08920005", "articleId": "1fsFnejO2IM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1qWHDLnpOBW", "name": "ttp202103-08887282s1-supplementary_pami.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttp202103-08887282s1-supplementary_pami.pdf", "extension": "pdf", "size": "168 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNALlciD", "title": "July-September", "year": "2000", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "6", "label": "July-September", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwj7coZ", "doi": "10.1109/2945.879786", "abstract": "Abstract—We describe a new approach for interactively approximating specular reflections in arbitrary curved surfaces. The technique is applicable to any smooth implicitly defined reflecting surface that is equipped with a ray intersection procedure; it is also extremely efficient as it employs local perturbations to interpolate point samples analytically. After ray tracing a sparse set of reflection paths with respect to a given vantage point and static reflecting surfaces, the algorithm rapidly approximates reflections of arbitrary points in 3-space by expressing them as perturbations of nearby points with known reflections. The reflection of each new point is approximated to second-order accuracy by applying a closed-form perturbation formula to one or more nearby reflection paths. This formula is derived from the Taylor expansion of a reflection path and is based on first and second-order path derivatives. After preprocessing, the approach is fast enough to compute reflections of tessellated diffuse objects in arbitrary curved surfaces at interactive rates using standard graphics hardware. The resulting images are nearly indistinguishable from ray traced images that take several orders of magnitude longer to generate.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We describe a new approach for interactively approximating specular reflections in arbitrary curved surfaces. The technique is applicable to any smooth implicitly defined reflecting surface that is equipped with a ray intersection procedure; it is also extremely efficient as it employs local perturbations to interpolate point samples analytically. After ray tracing a sparse set of reflection paths with respect to a given vantage point and static reflecting surfaces, the algorithm rapidly approximates reflections of arbitrary points in 3-space by expressing them as perturbations of nearby points with known reflections. The reflection of each new point is approximated to second-order accuracy by applying a closed-form perturbation formula to one or more nearby reflection paths. This formula is derived from the Taylor expansion of a reflection path and is based on first and second-order path derivatives. After preprocessing, the approach is fast enough to compute reflections of tessellated diffuse objects in arbitrary curved surfaces at interactive rates using standard graphics hardware. The resulting images are nearly indistinguishable from ray traced images that take several orders of magnitude longer to generate.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We describe a new approach for interactively approximating specular reflections in arbitrary curved surfaces. The technique is applicable to any smooth implicitly defined reflecting surface that is equipped with a ray intersection procedure; it is also extremely efficient as it employs local perturbations to interpolate point samples analytically. After ray tracing a sparse set of reflection paths with respect to a given vantage point and static reflecting surfaces, the algorithm rapidly approximates reflections of arbitrary points in 3-space by expressing them as perturbations of nearby points with known reflections. The reflection of each new point is approximated to second-order accuracy by applying a closed-form perturbation formula to one or more nearby reflection paths. This formula is derived from the Taylor expansion of a reflection path and is based on first and second-order path derivatives. After preprocessing, the approach is fast enough to compute reflections of tessellated diffuse objects in arbitrary curved surfaces at interactive rates using standard graphics hardware. The resulting images are nearly indistinguishable from ray traced images that take several orders of magnitude longer to generate.", "title": "Perturbation Methods for Interactive Specular Reflections", "normalizedTitle": "Perturbation Methods for Interactive Specular Reflections", "fno": "v0253", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Animation Systems", "Illumination Effects", "Implicit Surfaces", "Matting And Compositing", "Optics", "Ray Tracing" ], "authors": [ { "givenName": "Min", "surname": "Chen", "fullName": "Min Chen", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "James", "surname": "Arvo", "fullName": "James Arvo", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "03", "pubDate": "2000-07-01 00:00:00", "pubType": "trans", "pages": "253-264", "year": "2000", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "v0236", "articleId": "13rRUwIF6dC", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0265", "articleId": "13rRUyoPSOV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclnL", "title": "November/December", "year": "2006", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "12", "label": "November/December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxjQyp5", "doi": "10.1109/TVCG.2006.103", "abstract": "Abstract—This paper presents sample-based cameras for rendering high quality reflections on convex reflectors at interactive rates. The method supports change of view, moving objects and reflectors, higher order reflections, view-dependent lighting of reflected objects, and reflector surface properties. In order to render reflections with the feed forward graphics pipeline, one has to project reflected vertices. A sample-based camera is a collection of BSP trees of pinhole cameras that jointly approximate the projection function. It is constructed from the reflected rays defined by the desired view and the scene reflectors. A scene point is projected by invoking only the cameras that contain it in their frustums. Reflections are rendered by projecting the scene geometry and then rasterizing in hardware.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—This paper presents sample-based cameras for rendering high quality reflections on convex reflectors at interactive rates. The method supports change of view, moving objects and reflectors, higher order reflections, view-dependent lighting of reflected objects, and reflector surface properties. In order to render reflections with the feed forward graphics pipeline, one has to project reflected vertices. A sample-based camera is a collection of BSP trees of pinhole cameras that jointly approximate the projection function. It is constructed from the reflected rays defined by the desired view and the scene reflectors. A scene point is projected by invoking only the cameras that contain it in their frustums. Reflections are rendered by projecting the scene geometry and then rasterizing in hardware.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—This paper presents sample-based cameras for rendering high quality reflections on convex reflectors at interactive rates. The method supports change of view, moving objects and reflectors, higher order reflections, view-dependent lighting of reflected objects, and reflector surface properties. In order to render reflections with the feed forward graphics pipeline, one has to project reflected vertices. A sample-based camera is a collection of BSP trees of pinhole cameras that jointly approximate the projection function. It is constructed from the reflected rays defined by the desired view and the scene reflectors. A scene point is projected by invoking only the cameras that contain it in their frustums. Reflections are rendered by projecting the scene geometry and then rasterizing in hardware.", "title": "Sample-Based Cameras for Feed Forward Reflection Rendering", "normalizedTitle": "Sample-Based Cameras for Feed Forward Reflection Rendering", "fno": "v1590", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cameras", "Feeds", "Optical Reflection", "Rendering Computer Graphics", "Layout", "Graphics", "Pipelines", "Tree Graphs", "Geometry", "Hardware", "Sample Based Graphics", "Reflections", "Interactive Rendering", "Image Based Rendering" ], "authors": [ { "givenName": "Voicu", "surname": "Popescu", "fullName": "Voicu Popescu", "affiliation": "Dept. of Comput. Sci., Purdue Univ., West Lafayette, IN", "__typename": "ArticleAuthorType" }, { "givenName": "Elisha", "surname": "Sacks", "fullName": "Elisha Sacks", "affiliation": "Dept. of Comput. Sci., Purdue Univ., West Lafayette, IN", "__typename": "ArticleAuthorType" }, { "givenName": "Chunhui", "surname": "Mei", "fullName": "Chunhui Mei", "affiliation": "Dept. of Comput. Sci., Purdue Univ., West Lafayette, IN", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2006-11-01 00:00:00", "pubType": "trans", "pages": "1590-1600", "year": "2006", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2015/6964/0/07298939", "title": "Reflection removal using ghosting cues", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2015/07298939/12OmNxRF6XF", "parentPublication": { "id": "proceedings/cvpr/2015/6964/0", "title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/00937522", "title": "New perspectives on geometric reflection theory from rough surfaces", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/00937522/12OmNzmclV4", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/09/06803934", "title": "Second-Order Feed-Forward Renderingfor Specular and Glossy Reflections", "doi": null, "abstractUrl": "/journal/tg/2014/09/06803934/13rRUwInvyA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2011/06/mcg2011060068", "title": "Nonpinhole Approximations for Interactive Rendering", "doi": null, "abstractUrl": "/magazine/cg/2011/06/mcg2011060068/13rRUxly9gf", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f481", "title": "Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f481/1H1jnh582jK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8388", "title": "NeRFReN: Neural Radiance Fields with Reflections", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8388/1H1nhdo3vFe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2022/6399/0/639900a518", "title": "Borehole Radar Response to Fracture Characteristics: A Forward Simulation Study", "doi": null, "abstractUrl": "/proceedings-article/aiam/2022/639900a518/1LRlN94EJnq", "parentPublication": { "id": "proceedings/aiam/2022/6399/0", "title": "2022 4th International Conference on Artificial Intelligence and Advanced Manufacturing (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090544", "title": "Removal of the Infrared Light Reflection of Eyeglass Using Multi-Channel CycleGAN Applied for the Gaze Estimation Images", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090544/1jIxvetbThe", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800c395", "title": "Reflection Scene Separation From a Single Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800c395/1m3nQCNCXXG", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a320", "title": "Improved vergence and accommodation via Purkinje Image tracking with multiple cameras for AR glasses", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a320/1pysxaykIAo", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v1580", "articleId": "13rRUxCitJ4", "__typename": "AdjacentArticleType" }, "next": { "fno": "v1601", "articleId": "13rRUwfZC06", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzw8iSS", "title": "July/August", "year": "2008", "issueNum": "04", "idPrefix": "cg", "pubType": "magazine", "volume": "28", "label": "July/August", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUytWFbC", "doi": "10.1109/MCG.2008.73", "abstract": "This system represents furry surfaces as nonuniform layers of texture slices, automatically adjusting the layers to achieve efficient, high-quality rendering. It employs layered shadow maps to simulate self-shadowing. Interactive tools let users intuitively create and edit furry objects and instantly view the rendered objects.", "abstracts": [ { "abstractType": "Regular", "content": "This system represents furry surfaces as nonuniform layers of texture slices, automatically adjusting the layers to achieve efficient, high-quality rendering. It employs layered shadow maps to simulate self-shadowing. Interactive tools let users intuitively create and edit furry objects and instantly view the rendered objects.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This system represents furry surfaces as nonuniform layers of texture slices, automatically adjusting the layers to achieve efficient, high-quality rendering. It employs layered shadow maps to simulate self-shadowing. Interactive tools let users intuitively create and edit furry objects and instantly view the rendered objects.", "title": "Interactive Fur Shaping and Rendering Using Nonuniform-Layered Textures", "normalizedTitle": "Interactive Fur Shaping and Rendering Using Nonuniform-Layered Textures", "fno": "mcg2008040085", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Textures", "Realistic Fur Synthesis", "Interactive Rendering", "Shadow Maps" ], "authors": [ { "givenName": "Gang", "surname": "Yang", "fullName": "Gang Yang", "affiliation": "Beijing Forestry University", "__typename": "ArticleAuthorType" }, { "givenName": "Hanqiu", "surname": "Sun", "fullName": "Hanqiu Sun", "affiliation": "Chinese University of Hong Kong", "__typename": "ArticleAuthorType" }, { "givenName": "Enhua", "surname": "Wu", "fullName": "Enhua Wu", "affiliation": "University of Macau", "__typename": "ArticleAuthorType" }, { "givenName": "Lifeng", "surname": "Wang", "fullName": "Lifeng Wang", "affiliation": "Autodesk Shanghai", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2008-07-01 00:00:00", "pubType": "mags", "pages": "85-93", "year": "2008", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ispdc/2010/4120/0/4120a079", "title": "Parallel ID Shadow-Map Decompression on GPU", "doi": null, "abstractUrl": "/proceedings-article/ispdc/2010/4120a079/12OmNxZkhtp", "parentPublication": { "id": "proceedings/ispdc/2010/4120/0", "title": "Parallel and Distributed Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2004/2171/0/21710248", "title": "Real-Time Rendering of Human Hair Using Programmable Graphics Hardware", "doi": null, "abstractUrl": "/proceedings-article/cgi/2004/21710248/12OmNzdoMwf", "parentPublication": { "id": "proceedings/cgi/2004/2171/0", "title": "Proceedings. Computer Graphics International", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2009/10/ttp2009101862", "title": "Layered Dynamic Textures", "doi": null, "abstractUrl": "/journal/tp/2009/10/ttp2009101862/13rRUxASuiR", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2008040074", "articleId": "13rRUwInv6Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2008040098", "articleId": "13rRUygBw22", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNzmclnL", "title": "November/December", "year": "2006", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "12", "label": "November/December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxASubv", "doi": "10.1109/TVCG.2006.90", "abstract": "Abstract—Synthesizing expressive facial animation is a very challenging topic within the graphics community. In this paper, we present an expressive facial animation synthesis system enabled by automated learning from facial motion capture data. Accurate 3D motions of the markers on the face of a human subject are captured while he/she recites a predesigned corpus, with specific spoken and visual expressions. We present a novel motion capture mining technique that \"learns” speech coarticulation models for diphones and triphones from the recorded data. A Phoneme-Independent Expression Eigenspace (PIEES) that encloses the dynamic expression signals is constructed by motion signal processing (phoneme-based time-warping and subtraction) and Principal Component Analysis (PCA) reduction. New expressive facial animations are synthesized as follows: First, the learned coarticulation models are concatenated to synthesize neutral visual speech according to novel speech input, then a texture-synthesis-based approach is used to generate a novel dynamic expression signal from the PIEES model, and finally the synthesized expression signal is blended with the synthesized neutral visual speech to create the final expressive facial animation. Our experiments demonstrate that the system can effectively synthesize realistic expressive facial animation.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—Synthesizing expressive facial animation is a very challenging topic within the graphics community. In this paper, we present an expressive facial animation synthesis system enabled by automated learning from facial motion capture data. Accurate 3D motions of the markers on the face of a human subject are captured while he/she recites a predesigned corpus, with specific spoken and visual expressions. We present a novel motion capture mining technique that \"learns” speech coarticulation models for diphones and triphones from the recorded data. A Phoneme-Independent Expression Eigenspace (PIEES) that encloses the dynamic expression signals is constructed by motion signal processing (phoneme-based time-warping and subtraction) and Principal Component Analysis (PCA) reduction. New expressive facial animations are synthesized as follows: First, the learned coarticulation models are concatenated to synthesize neutral visual speech according to novel speech input, then a texture-synthesis-based approach is used to generate a novel dynamic expression signal from the PIEES model, and finally the synthesized expression signal is blended with the synthesized neutral visual speech to create the final expressive facial animation. Our experiments demonstrate that the system can effectively synthesize realistic expressive facial animation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—Synthesizing expressive facial animation is a very challenging topic within the graphics community. In this paper, we present an expressive facial animation synthesis system enabled by automated learning from facial motion capture data. Accurate 3D motions of the markers on the face of a human subject are captured while he/she recites a predesigned corpus, with specific spoken and visual expressions. We present a novel motion capture mining technique that \"learns” speech coarticulation models for diphones and triphones from the recorded data. A Phoneme-Independent Expression Eigenspace (PIEES) that encloses the dynamic expression signals is constructed by motion signal processing (phoneme-based time-warping and subtraction) and Principal Component Analysis (PCA) reduction. New expressive facial animations are synthesized as follows: First, the learned coarticulation models are concatenated to synthesize neutral visual speech according to novel speech input, then a texture-synthesis-based approach is used to generate a novel dynamic expression signal from the PIEES model, and finally the synthesized expression signal is blended with the synthesized neutral visual speech to create the final expressive facial animation. Our experiments demonstrate that the system can effectively synthesize realistic expressive facial animation.", "title": "Expressive Facial Animation Synthesis by Learning Speech Coarticulation and Expression Spaces", "normalizedTitle": "Expressive Facial Animation Synthesis by Learning Speech Coarticulation and Expression Spaces", "fno": "v1523", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Facial Animation", "Expressive Speech", "Animation Synthesis", "Speech Coarticulation", "Texture Synthesis", "Motion Capture", "Data Driven" ], "authors": [ { "givenName": "Zhigang", "surname": "Deng", "fullName": "Zhigang Deng", "affiliation": "IEEE Computer Society", "__typename": "ArticleAuthorType" }, { "givenName": "Ulrich", "surname": "Neumann", "fullName": "Ulrich Neumann", "affiliation": "IEEE Computer Society", "__typename": "ArticleAuthorType" }, { "givenName": "J.P.", "surname": "Lewis", "fullName": "J.P. Lewis", "affiliation": "IEEE Computer Society", "__typename": "ArticleAuthorType" }, { "givenName": "Tae-Yong", "surname": "Kim", "fullName": "Tae-Yong Kim", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Murtaza", "surname": "Bulut", "fullName": "Murtaza Bulut", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Shrikanth", "surname": "Narayanan", "fullName": "Shrikanth Narayanan", "affiliation": "IEEE", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2006-11-01 00:00:00", "pubType": "trans", "pages": "1523-1534", "year": "2006", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cgi/2001/1007/0/10070038", "title": "Principal Components of Expressive Speech Animation", "doi": null, "abstractUrl": "/proceedings-article/cgi/2001/10070038/12OmNAOKnZf", "parentPublication": { "id": "proceedings/cgi/2001/1007/0", "title": "Proceedings. Computer Graphics International 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2002/1784/0/17840077", "title": "\"May I talk to you? :-)\" — Facial Animation from Text", "doi": null, "abstractUrl": "/proceedings-article/pg/2002/17840077/12OmNAkWveH", "parentPublication": { "id": "proceedings/pg/2002/1784/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icig/2007/2929/0/29290874", "title": "Modeling Expressive Wrinkles of Face For Animation", "doi": null, "abstractUrl": "/proceedings-article/icig/2007/29290874/12OmNBV9Iif", "parentPublication": { "id": "proceedings/icig/2007/2929/0", "title": "Image and Graphics, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890335", "title": "Hierarchical facial expression animation by motion capture data", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890335/12OmNx76TWi", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2010/4166/0/4166a009", "title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx", "parentPublication": { "id": "proceedings/cgiv/2010/4166/0", "title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2016/04/mmu2016040068", "title": "Expressive Modulation of Neutral Visual Speech", "doi": null, "abstractUrl": "/magazine/mu/2016/04/mmu2016040068/13rRUwbaqIM", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2005/03/v0341", "title": "Creating Speech-Synchronized Animation", "doi": null, "abstractUrl": "/journal/tg/2005/03/v0341/13rRUxE04tq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/02/mcg2012020076", "title": "Sparse Coding for Flexible, Robust 3D Facial-Expression Synthesis", "doi": null, "abstractUrl": "/magazine/cg/2012/02/mcg2012020076/13rRUyYBljd", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798145", "title": "Speech-Driven Facial Animation by LSTM-RNN for Communication Use", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798145/1cJ0YZ9Bfgs", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09497710", "title": "Facial Expression Animation by Landmark Guided Residual Module", "doi": null, "abstractUrl": "/journal/ta/5555/01/09497710/1vzY3CSt5Pq", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v1511", "articleId": "13rRUxBJhvl", "__typename": "AdjacentArticleType" }, "next": { "fno": "v1535", "articleId": "13rRUxNW1Zb", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxWuinq", "title": "July-Sept.", "year": "2019", "issueNum": "03", "idPrefix": "ta", "pubType": "journal", "volume": "10", "label": "July-Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwd9CJZ", "doi": "10.1109/TAFFC.2017.2714671", "abstract": "Emotions have an important role in daily life, not only in human interaction, but also in decision-making processes, and in the perception of the world around us. Due to the recent interest shown by the research community in establishing emotional interactions between humans and computers, the identification of the emotional state of the former became a need. This can be achieved through multiple measures, such as subjective self-reports, autonomic and neurophysiological measurements. In the last years, Electroencephalography (EEG) received considerable attention from researchers, since it can provide a simple, cheap, portable, and ease-to-use solution for identifying emotions. In this paper, we present a survey of the neurophysiological research performed from 2009 to 2016, providing a comprehensive overview of the existing works in emotion recognition using EEG signals. We focus our analysis in the main aspects involved in the recognition process (e.g., subjects, features extracted, classifiers), and compare the works per them. From this analysis, we propose a set of good practice recommendations that researchers must follow to achieve reproducible, replicable, well-validated and high-quality results. We intend this survey to be useful for the research community working on emotion recognition through EEG signals, and in particular for those entering this field of research, since it offers a structured starting point.", "abstracts": [ { "abstractType": "Regular", "content": "Emotions have an important role in daily life, not only in human interaction, but also in decision-making processes, and in the perception of the world around us. Due to the recent interest shown by the research community in establishing emotional interactions between humans and computers, the identification of the emotional state of the former became a need. This can be achieved through multiple measures, such as subjective self-reports, autonomic and neurophysiological measurements. In the last years, Electroencephalography (EEG) received considerable attention from researchers, since it can provide a simple, cheap, portable, and ease-to-use solution for identifying emotions. In this paper, we present a survey of the neurophysiological research performed from 2009 to 2016, providing a comprehensive overview of the existing works in emotion recognition using EEG signals. We focus our analysis in the main aspects involved in the recognition process (e.g., subjects, features extracted, classifiers), and compare the works per them. From this analysis, we propose a set of good practice recommendations that researchers must follow to achieve reproducible, replicable, well-validated and high-quality results. We intend this survey to be useful for the research community working on emotion recognition through EEG signals, and in particular for those entering this field of research, since it offers a structured starting point.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Emotions have an important role in daily life, not only in human interaction, but also in decision-making processes, and in the perception of the world around us. Due to the recent interest shown by the research community in establishing emotional interactions between humans and computers, the identification of the emotional state of the former became a need. This can be achieved through multiple measures, such as subjective self-reports, autonomic and neurophysiological measurements. In the last years, Electroencephalography (EEG) received considerable attention from researchers, since it can provide a simple, cheap, portable, and ease-to-use solution for identifying emotions. In this paper, we present a survey of the neurophysiological research performed from 2009 to 2016, providing a comprehensive overview of the existing works in emotion recognition using EEG signals. We focus our analysis in the main aspects involved in the recognition process (e.g., subjects, features extracted, classifiers), and compare the works per them. From this analysis, we propose a set of good practice recommendations that researchers must follow to achieve reproducible, replicable, well-validated and high-quality results. We intend this survey to be useful for the research community working on emotion recognition through EEG signals, and in particular for those entering this field of research, since it offers a structured starting point.", "title": "Emotions Recognition Using EEG Signals: A Survey", "normalizedTitle": "Emotions Recognition Using EEG Signals: A Survey", "fno": "07946165", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Electroencephalography", "Emotion Recognition", "Medical Signal Processing", "Neurophysiology", "Electroencephalography", "Recognition Process", "Emotion Recognition", "Neurophysiological Research", "Emotional State", "Emotional Interactions", "Decision Making Processes", "Human Interaction", "EEG Signals", "Electroencephalography", "Electrodes", "Electric Potential", "Emotion Recognition", "Feature Extraction", "Brain", "Human Computer Interaction", "Emotions", "Electroencephalography", "Identification", "Recognition" ], "authors": [ { "givenName": "Soraia M.", "surname": "Alarcao", "fullName": "Soraia M. Alarcao", "affiliation": "Universidade de Lisboa, Lisboa, Portugal", "__typename": "ArticleAuthorType" }, { "givenName": "Manuel J.", "surname": "Fonseca", "fullName": "Manuel J. Fonseca", "affiliation": "Universidade de Lisboa, Lisboa, Portugal", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-07-01 00:00:00", "pubType": "trans", "pages": "374-393", "year": "2019", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ism/2014/4311/0/4311a277", "title": "Personalized Music Emotion Recognition Using Electroencephalography (EEG)", "doi": null, "abstractUrl": "/proceedings-article/ism/2014/4311a277/12OmNwDj199", "parentPublication": { "id": "proceedings/ism/2014/4311/0", "title": "2014 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev/2013/0400/0/06572658", "title": "Human emotion recognition using frequency & statistical measures of EEG signal", "doi": null, "abstractUrl": "/proceedings-article/iciev/2013/06572658/12OmNwHhoTZ", "parentPublication": { "id": "proceedings/iciev/2013/0400/0", "title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2012/4814/0/4814a053", "title": "EEG-based Valence Level Recognition for Real-Time Applications", "doi": null, "abstractUrl": "/proceedings-article/cw/2012/4814a053/12OmNwp74MG", "parentPublication": { "id": "proceedings/cw/2012/4814/0", "title": "2012 International Conference on Cyberworlds", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2014/4761/0/06890301", "title": "Continuous emotion detection using EEG signals and facial expressions", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890301/12OmNyQYtb1", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/03/08241761", "title": "Emotion Analysis for Personality Inference from EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2018/03/08241761/13rRUytF47R", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2018/04/07835688", "title": "Real-Time Movie-Induced Discrete Emotion Recognition from EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2018/04/07835688/17D45XvMceV", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ainit/2021/1296/0/129600a217", "title": "Analysis of bimodal emotion recognition method based on EEG signals", "doi": null, "abstractUrl": "/proceedings-article/ainit/2021/129600a217/1BzWEXnfXmE", "parentPublication": { "id": "proceedings/ainit/2021/1296/0", "title": "2021 2nd International Seminar on Artificial Intelligence, Networking and Information Technology (AINIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/5555/01/09946368", "title": "MMPosE: Movie-induced Multi-label Positive Emotion Classification Through EEG Signals", "doi": null, "abstractUrl": "/journal/ta/5555/01/09946368/1IdqYG8gCvC", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/03/09154557", "title": "An Efficient LSTM Network for Emotion Recognition From Multichannel EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2022/03/09154557/1lZzEqvllxC", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09385899", "title": "EEG Feature Selection via Global Redundancy Minimization for Emotion Recognition", "doi": null, "abstractUrl": "/journal/ta/2023/01/09385899/1seiezf9cNG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07987785", "articleId": "13rRUxbCbrT", "__typename": "AdjacentArticleType" }, "next": { "fno": "07971947", "articleId": "13rRUxDqS6M", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1GlbpNMaT7y", "title": "July-Sept.", "year": "2022", "issueNum": "03", "idPrefix": "ta", "pubType": "journal", "volume": "13", "label": "July-Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1lZzEqvllxC", "doi": "10.1109/TAFFC.2020.3013711", "abstract": "Most previous EEG-based emotion recognition methods studied hand-crafted EEG features extracted from different electrodes. In this article, we study the relation among different EEG electrodes and propose a deep learning method to automatically extract the spatial features that characterize the functional relation between EEG signals at different electrodes. Our proposed deep model is called <bold>AT</bold>tention-based <bold>LSTM</bold> with <bold>D</bold>omain <bold>D</bold>iscriminator (ATDD-LSTM), a model based on Long Short-Term Memory (LSTM) for emotion recognition that can characterize nonlinear relations among EEG signals of different electrodes. To achieve state-of-the-art emotion recognition performance, the architecture of ATDD-LSTM has two distinguishing characteristics: (1) By applying the attention mechanism to the feature vectors produced by LSTM, ATDD-LSTM automatically selects suitable EEG channels for emotion recognition, which makes the learned model concentrate on the emotion related channels in response to a given emotion; (2) To minimize the significant feature distribution shift between different sessions and/or subjects, ATDD-LSTM uses a domain discriminator to modify the data representation space and generate domain-invariant features. We evaluate the proposed ATDD-LSTM model on three public EEG emotional databases (DEAP, SEED and CMEED) for emotion recognition. The experimental results demonstrate that our ATDD-LSTM model achieves superior performance on subject-dependent (for the same subject), subject-independent (for different subjects) and cross-session (for the same subject) evaluation.", "abstracts": [ { "abstractType": "Regular", "content": "Most previous EEG-based emotion recognition methods studied hand-crafted EEG features extracted from different electrodes. In this article, we study the relation among different EEG electrodes and propose a deep learning method to automatically extract the spatial features that characterize the functional relation between EEG signals at different electrodes. Our proposed deep model is called <bold>AT</bold>tention-based <bold>LSTM</bold> with <bold>D</bold>omain <bold>D</bold>iscriminator (ATDD-LSTM), a model based on Long Short-Term Memory (LSTM) for emotion recognition that can characterize nonlinear relations among EEG signals of different electrodes. To achieve state-of-the-art emotion recognition performance, the architecture of ATDD-LSTM has two distinguishing characteristics: (1) By applying the attention mechanism to the feature vectors produced by LSTM, ATDD-LSTM automatically selects suitable EEG channels for emotion recognition, which makes the learned model concentrate on the emotion related channels in response to a given emotion; (2) To minimize the significant feature distribution shift between different sessions and/or subjects, ATDD-LSTM uses a domain discriminator to modify the data representation space and generate domain-invariant features. We evaluate the proposed ATDD-LSTM model on three public EEG emotional databases (DEAP, SEED and CMEED) for emotion recognition. The experimental results demonstrate that our ATDD-LSTM model achieves superior performance on subject-dependent (for the same subject), subject-independent (for different subjects) and cross-session (for the same subject) evaluation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Most previous EEG-based emotion recognition methods studied hand-crafted EEG features extracted from different electrodes. In this article, we study the relation among different EEG electrodes and propose a deep learning method to automatically extract the spatial features that characterize the functional relation between EEG signals at different electrodes. Our proposed deep model is called ATtention-based LSTM with Domain Discriminator (ATDD-LSTM), a model based on Long Short-Term Memory (LSTM) for emotion recognition that can characterize nonlinear relations among EEG signals of different electrodes. To achieve state-of-the-art emotion recognition performance, the architecture of ATDD-LSTM has two distinguishing characteristics: (1) By applying the attention mechanism to the feature vectors produced by LSTM, ATDD-LSTM automatically selects suitable EEG channels for emotion recognition, which makes the learned model concentrate on the emotion related channels in response to a given emotion; (2) To minimize the significant feature distribution shift between different sessions and/or subjects, ATDD-LSTM uses a domain discriminator to modify the data representation space and generate domain-invariant features. We evaluate the proposed ATDD-LSTM model on three public EEG emotional databases (DEAP, SEED and CMEED) for emotion recognition. The experimental results demonstrate that our ATDD-LSTM model achieves superior performance on subject-dependent (for the same subject), subject-independent (for different subjects) and cross-session (for the same subject) evaluation.", "title": "An Efficient LSTM Network for Emotion Recognition From Multichannel EEG Signals", "normalizedTitle": "An Efficient LSTM Network for Emotion Recognition From Multichannel EEG Signals", "fno": "09154557", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Electroencephalography", "Feature Extraction", "Brain Modeling", "Emotion Recognition", "Electrodes", "Data Models", "Frequency Domain Analysis", "Emotion Recognition", "Multichannel EEG", "LSTM", "Attention Mechanism", "Domain Adaptation" ], "authors": [ { "givenName": "Xiaobing", "surname": "Du", "fullName": "Xiaobing Du", "affiliation": "Beijing Key Laboratory of Human Computer Interactions, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Cuixia", "surname": "Ma", "fullName": "Cuixia Ma", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guanhua", "surname": "Zhang", "fullName": "Guanhua Zhang", "affiliation": "Department of Computer Science and Technology, BNRist, MOE-Key Laboratory of Pervasive Computing, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jinyao", "surname": "Li", "fullName": "Jinyao Li", "affiliation": "Beijing Key Laboratory of Human Computer Interactions, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yu-Kun", "surname": "Lai", "fullName": "Yu-Kun Lai", "affiliation": "School of Computer Science and Informatics, Cardiff University, Cardiff, Wales, U.K.", "__typename": "ArticleAuthorType" }, { "givenName": "Guozhen", "surname": "Zhao", "fullName": "Guozhen Zhao", "affiliation": "CAS Key Laboratory of Behavioral Science, Institute of Psychology, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiaoming", "surname": "Deng", "fullName": "Xiaoming Deng", "affiliation": "Beijing Key Laboratory of Human Computer Interactions, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yong-Jin", "surname": "Liu", "fullName": "Yong-Jin Liu", "affiliation": "Department of Computer Science and Technology, BNRist, MOE-Key Laboratory of Pervasive Computing, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongan", "surname": "Wang", "fullName": "Hongan Wang", "affiliation": "State Key Laboratory of Computer Science, Institute of Software, Chinese Academy of Sciences, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2022-07-01 00:00:00", "pubType": "trans", "pages": "1528-1540", "year": "2022", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icme/2014/4761/0/06890301", "title": "Continuous emotion detection using EEG signals and facial expressions", "doi": null, "abstractUrl": "/proceedings-article/icme/2014/06890301/12OmNyQYtb1", "parentPublication": { "id": "proceedings/icme/2014/4761/0", "title": "2014 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ainit/2021/1296/0/129600a217", "title": "Analysis of bimodal emotion recognition method based on EEG signals", "doi": null, "abstractUrl": "/proceedings-article/ainit/2021/129600a217/1BzWEXnfXmE", "parentPublication": { "id": "proceedings/ainit/2021/1296/0", "title": "2021 2nd International Seminar on Artificial Intelligence, Networking and Information Technology (AINIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nana/2022/6131/0/613100a007", "title": "Spatiotemporal Emotion Recognition Method Based on EEG Signals During Music Listening Using 1D-CNN &#x0026; Stacked-LSTM", "doi": null, "abstractUrl": "/proceedings-article/nana/2022/613100a007/1JwPDoMlZeg", "parentPublication": { "id": "proceedings/nana/2022/6131/0", "title": "2022 International Conference on Networking and Network Applications (NaNA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse/2022/9633/0/963300a032", "title": "Electroencephalogram Emotion Recognition Based on Three-Dimensional Feature Matrix and Multivariate Neural Network", "doi": null, "abstractUrl": "/proceedings-article/cse/2022/963300a032/1Lz249wyDeM", "parentPublication": { "id": "proceedings/cse/2022/9633/0", "title": "2022 IEEE 25th International Conference on Computational Science and Engineering (CSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/02/08736804", "title": "From Regional to Global Brain: A Novel Hierarchical Spatial-Temporal Neural Network Model for EEG Emotion Recognition", "doi": null, "abstractUrl": "/journal/ta/2022/02/08736804/1aR7Ba3OXNm", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2021/05/09174858", "title": "Subject-Independent Emotion Recognition of EEG Signals Based on Dynamic Empirical Convolutional Neural Network", "doi": null, "abstractUrl": "/journal/tb/2021/05/09174858/1myqDpLgICc", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09321519", "title": "SparseDGCNN: Recognizing Emotion From Multichannel EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2023/01/09321519/1qmbfpFrHHi", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09373917", "title": "Variational Instance-Adaptive Graph for EEG Emotion Recognition", "doi": null, "abstractUrl": "/journal/ta/2023/01/09373917/1rPsZrOrPgI", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09385899", "title": "EEG Feature Selection via Global Redundancy Minimization for Emotion Recognition", "doi": null, "abstractUrl": "/journal/ta/2023/01/09385899/1seiezf9cNG", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/03/09448460", "title": "Graph-Embedded Convolutional Neural Network for Image-Based EEG Emotion Recognition", "doi": null, "abstractUrl": "/journal/ec/2022/03/09448460/1ugE9joZsl2", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09143398", "articleId": "1lxmhmOGuhq", "__typename": "AdjacentArticleType" }, "next": { "fno": "09161416", "articleId": "1m4ymmATaY8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1L8lujshfos", "title": "Jan.-March", "year": "2023", "issueNum": "01", "idPrefix": "ta", "pubType": "journal", "volume": "14", "label": "Jan.-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1qmbfpFrHHi", "doi": "10.1109/TAFFC.2021.3051332", "abstract": "Emotion recognition from EEG signals has attracted much attention in affective computing. Recently, a novel dynamic graph convolutional neural network (DGCNN) model was proposed, which simultaneously optimized the network parameters and a weighted graph <inline-formula><tex-math notation=\"LaTeX\">Z_$G$_Z</tex-math></inline-formula> characterizing the strength of functional relation between each pair of two electrodes in the EEG recording equipment. In this article, we propose a sparse DGCNN model which modifies DGCNN by imposing a sparseness constraint on <inline-formula><tex-math notation=\"LaTeX\">Z_$G$_Z</tex-math></inline-formula> and improves the emotion recognition performance. Our work is based on an important observation: the tomography study reveals that different brain regions sampled by EEG electrodes may be related to different functions of the brain and then the functional relations among electrodes are possibly highly localized and sparse. However, introducing sparseness constraint into the graph <inline-formula><tex-math notation=\"LaTeX\">Z_$G$_Z</tex-math></inline-formula> makes the loss function of sparse DGCNN non-differentiable at some singular points. To ensure that the training process of sparse DGCNN converges, we apply the forward-backward splitting method. To evaluate the performance of sparse DGCNN, we compare it with four representative recognition methods (SVM, DBN, GELM and DGCNN). In addition to comparing different recognition methods, our experiments also compare different features and spectral bands, including EEG features in time-frequency domain (DE, PSD, DASM, RASM, ASM and DCAU on different bands) extracted from four representative EEG datasets (SEED, DEAP, DREAMER, and CMEED). The results show that (1) sparse DGCNN has consistently better accuracy than representative methods and has a good scalability, and (2) DE, PSD, and ASM features on <inline-formula><tex-math notation=\"LaTeX\">Z_$\\gamma$_Z</tex-math></inline-formula> band convey most discriminative emotional information, and fusion of separate features and frequency bands can improve recognition performance.", "abstracts": [ { "abstractType": "Regular", "content": "Emotion recognition from EEG signals has attracted much attention in affective computing. Recently, a novel dynamic graph convolutional neural network (DGCNN) model was proposed, which simultaneously optimized the network parameters and a weighted graph <inline-formula><tex-math notation=\"LaTeX\">$G$</tex-math><alternatives><mml:math><mml:mi>G</mml:mi></mml:math><inline-graphic xlink:href=\"zheng-ieq1-3051332.gif\"/></alternatives></inline-formula> characterizing the strength of functional relation between each pair of two electrodes in the EEG recording equipment. In this article, we propose a sparse DGCNN model which modifies DGCNN by imposing a sparseness constraint on <inline-formula><tex-math notation=\"LaTeX\">$G$</tex-math><alternatives><mml:math><mml:mi>G</mml:mi></mml:math><inline-graphic xlink:href=\"zheng-ieq2-3051332.gif\"/></alternatives></inline-formula> and improves the emotion recognition performance. Our work is based on an important observation: the tomography study reveals that different brain regions sampled by EEG electrodes may be related to different functions of the brain and then the functional relations among electrodes are possibly highly localized and sparse. However, introducing sparseness constraint into the graph <inline-formula><tex-math notation=\"LaTeX\">$G$</tex-math><alternatives><mml:math><mml:mi>G</mml:mi></mml:math><inline-graphic xlink:href=\"zheng-ieq3-3051332.gif\"/></alternatives></inline-formula> makes the loss function of sparse DGCNN non-differentiable at some singular points. To ensure that the training process of sparse DGCNN converges, we apply the forward-backward splitting method. To evaluate the performance of sparse DGCNN, we compare it with four representative recognition methods (SVM, DBN, GELM and DGCNN). In addition to comparing different recognition methods, our experiments also compare different features and spectral bands, including EEG features in time-frequency domain (DE, PSD, DASM, RASM, ASM and DCAU on different bands) extracted from four representative EEG datasets (SEED, DEAP, DREAMER, and CMEED). The results show that (1) sparse DGCNN has consistently better accuracy than representative methods and has a good scalability, and (2) DE, PSD, and ASM features on <inline-formula><tex-math notation=\"LaTeX\">$\\gamma$</tex-math><alternatives><mml:math><mml:mi>&#x03B3;</mml:mi></mml:math><inline-graphic xlink:href=\"zheng-ieq4-3051332.gif\"/></alternatives></inline-formula> band convey most discriminative emotional information, and fusion of separate features and frequency bands can improve recognition performance.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Emotion recognition from EEG signals has attracted much attention in affective computing. Recently, a novel dynamic graph convolutional neural network (DGCNN) model was proposed, which simultaneously optimized the network parameters and a weighted graph - characterizing the strength of functional relation between each pair of two electrodes in the EEG recording equipment. In this article, we propose a sparse DGCNN model which modifies DGCNN by imposing a sparseness constraint on - and improves the emotion recognition performance. Our work is based on an important observation: the tomography study reveals that different brain regions sampled by EEG electrodes may be related to different functions of the brain and then the functional relations among electrodes are possibly highly localized and sparse. However, introducing sparseness constraint into the graph - makes the loss function of sparse DGCNN non-differentiable at some singular points. To ensure that the training process of sparse DGCNN converges, we apply the forward-backward splitting method. To evaluate the performance of sparse DGCNN, we compare it with four representative recognition methods (SVM, DBN, GELM and DGCNN). In addition to comparing different recognition methods, our experiments also compare different features and spectral bands, including EEG features in time-frequency domain (DE, PSD, DASM, RASM, ASM and DCAU on different bands) extracted from four representative EEG datasets (SEED, DEAP, DREAMER, and CMEED). The results show that (1) sparse DGCNN has consistently better accuracy than representative methods and has a good scalability, and (2) DE, PSD, and ASM features on - band convey most discriminative emotional information, and fusion of separate features and frequency bands can improve recognition performance.", "title": "SparseDGCNN: Recognizing Emotion From Multichannel EEG Signals", "normalizedTitle": "SparseDGCNN: Recognizing Emotion From Multichannel EEG Signals", "fno": "09321519", "hasPdf": true, "idPrefix": "ta", "keywords": [ "Convolutional Neural Nets", "Electroencephalography", "Emotion Recognition", "Feature Extraction", "Graph Theory", "Medical Signal Processing", "Signal Classification", "Support Vector Machines", "Affective Computing", "Different Brain Regions", "Different Recognition Methods", "Discriminative Emotional Information", "Dynamic Graph Convolutional Neural Network Model", "EEG Electrodes", "EEG Recording Equipment", "Emotion Recognition Performance", "Forward Backward Splitting Method", "Functional Relation", "Important Observation", "Loss Function", "Multichannel EEG Signals", "Network Parameters", "Representative Methods", "Representative Recognition Methods", "Sparse DGCNN Converges", "Sparse DGCNN Model", "Sparse DGCNN Nondifferentiable", "Sparseness Constraint", "Tomography Study", "Weighted Graph", "Electroencephalography", "Electrodes", "Brain Modeling", "Emotion Recognition", "Feature Extraction", "Physiology", "Convolution", "Emotion Recognition", "Multichannel EEG Signals", "Graph Convolutional Neural Network", "Sparse Constraints" ], "authors": [ { "givenName": "Guanhua", "surname": "Zhang", "fullName": "Guanhua Zhang", "affiliation": "Department of Computer Science and Technology, BNRist, MOE Key Laboratory of Pervasive Computing, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Minjing", "surname": "Yu", "fullName": "Minjing Yu", "affiliation": "College of Intelligence and Computing, Tianjin University, Tianjin, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yong-Jin", "surname": "Liu", "fullName": "Yong-Jin Liu", "affiliation": "Department of Computer Science and Technology, BNRist, MOE Key Laboratory of Pervasive Computing, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guozhen", "surname": "Zhao", "fullName": "Guozhen Zhao", "affiliation": "CAS Key Laboratory of Behavioral Science, Institute of Psychology, Beijing, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Dan", "surname": "Zhang", "fullName": "Dan Zhang", "affiliation": "Department of Psychology, Tsinghua University, Beijing, P.R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Wenming", "surname": "Zheng", "fullName": "Wenming Zheng", "affiliation": "MOE Key Laboratory of Child Development and Learning Science, Southeast University, Nanjing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-01-01 00:00:00", "pubType": "trans", "pages": "537-548", "year": "2023", "issn": "1949-3045", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tb/2019/06/08371302", "title": "Efficient Algorithms for Finding the Closest <inline-formula><tex-math notation=\"LaTeX\">Z_$l$_Z</tex-math></inline-formula>-Mers in Biological Data", "doi": null, "abstractUrl": "/journal/tb/2019/06/08371302/13rRUxlgyai", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ec/2022/03/09827923", "title": "PMNS for Efficient Arithmetic and Small Memory Cost", "doi": null, "abstractUrl": "/journal/ec/2022/03/09827923/1EWSBFUfd6M", "parentPublication": { "id": "trans/ec", "title": "IEEE Transactions on Emerging Topics in Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/01/09037115", "title": "Aligning Points to Lines: Provable Approximations", "doi": null, "abstractUrl": "/journal/tk/2022/01/09037115/1igMO6tI3Is", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/03/09154557", "title": "An Efficient LSTM Network for Emotion Recognition From Multichannel EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2022/03/09154557/1lZzEqvllxC", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2023/01/09286502", "title": "Multi-Target Positive Emotion Recognition From EEG Signals", "doi": null, "abstractUrl": "/journal/ta/2023/01/09286502/1poqIBFs1Ne", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/01/09452800", "title": "Core Decomposition on Uncertain Graphs Revisited", "doi": null, "abstractUrl": "/journal/tk/2023/01/09452800/1ulCu0Hdqs8", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/03/09534476", "title": "Discovering Significant Communities on Bipartite Graphs: An Index-Based Approach", "doi": null, "abstractUrl": "/journal/tk/2023/03/09534476/1wLbitNpdle", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/07/09585362", "title": "A Fast <inline-formula><tex-math notation=\"LaTeX\">Z_$f(r,k+1)/k$_Z</tex-math></inline-formula>-Diagnosis for Interconnection Networks Under MM* Model", "doi": null, "abstractUrl": "/journal/td/2022/07/09585362/1y11LlQdiGk", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/07/09609660", "title": "FFNLFD: Fault Diagnosis of Multiprocessor Systems at Local Node With Fault-Free Neighbors Under PMC Model and MM* Model", "doi": null, "abstractUrl": "/journal/td/2022/07/09609660/1yoxL0ygC2c", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2022/07/09609537", "title": "Hamiltonian Paths of <inline-formula><tex-math notation=\"LaTeX\">Z_$k$_Z</tex-math></inline-formula>-ary <inline-formula><tex-math notation=\"LaTeX\">Z_$n$_Z</tex-math></inline-formula>-cubes Avoiding Faulty Links and Passing Through Prescribed Linear Forests", "doi": null, "abstractUrl": "/journal/td/2022/07/09609537/1yoxLa2YFO0", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09260961", "articleId": "1oNUPQ9zvZ6", "__typename": "AdjacentArticleType" }, "next": { "fno": "09226486", "articleId": "1nYoz5XBxjW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1L8lESxDsZO", "name": "tta202301-09321519s1-supp1-3051332.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/tta202301-09321519s1-supp1-3051332.pdf", "extension": "pdf", "size": "329 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyv7moB", "title": "November/December", "year": "2005", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "11", "label": "November/December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyp7tWN", "doi": "10.1109/TVCG.2005.106", "abstract": "This paper describes a haptic rendering algorithm for arbitrary polygonal models using a six degree-of-freedom haptic interface. The algorithm supports activities such as virtual prototyping of complex polygonal models and adding haptic interaction to virtual environments. The underlying collision system computes local extrema in distance between the model controlled by the haptic device and the rest of the scene. The haptic rendering computes forces and torques on the moving model based on these local extrema. The system is demonstrated on models with tens of thousands of triangles and developed in an accessibility application for finding collision-free paths.", "abstracts": [ { "abstractType": "Regular", "content": "This paper describes a haptic rendering algorithm for arbitrary polygonal models using a six degree-of-freedom haptic interface. The algorithm supports activities such as virtual prototyping of complex polygonal models and adding haptic interaction to virtual environments. The underlying collision system computes local extrema in distance between the model controlled by the haptic device and the rest of the scene. The haptic rendering computes forces and torques on the moving model based on these local extrema. The system is demonstrated on models with tens of thousands of triangles and developed in an accessibility application for finding collision-free paths.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper describes a haptic rendering algorithm for arbitrary polygonal models using a six degree-of-freedom haptic interface. The algorithm supports activities such as virtual prototyping of complex polygonal models and adding haptic interaction to virtual environments. The underlying collision system computes local extrema in distance between the model controlled by the haptic device and the rest of the scene. The haptic rendering computes forces and torques on the moving model based on these local extrema. The system is demonstrated on models with tens of thousands of triangles and developed in an accessibility application for finding collision-free paths.", "title": "Six Degree-of-Freedom Haptic Rendering Using Spatialized Normal Cone Search", "normalizedTitle": "Six Degree-of-Freedom Haptic Rendering Using Spatialized Normal Cone Search", "fno": "v0661", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Index Terms Haptic I O", "Virtual Reality", "Computer Aided Design" ], "authors": [ { "givenName": "David E.", "surname": "Johnson", "fullName": "David E. Johnson", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Peter", "surname": "Willemsen", "fullName": "Peter Willemsen", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Elaine", "surname": "Cohen", "fullName": "Elaine Cohen", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2005-11-01 00:00:00", "pubType": "trans", "pages": "661-670", "year": "2005", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2000/6478/0/64780020", "title": "Six Degree-of-Freedom Haptic Display", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780020/12OmNA0vnOj", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2003/1890/0/18900229", "title": "Six Degree-of-Freedom Haptic Rendering of Complex Polygonal Models", "doi": null, "abstractUrl": "/proceedings-article/haptics/2003/18900229/12OmNAHEpCy", "parentPublication": { "id": "proceedings/haptics/2003/1890/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2006/0226/0/02260033", "title": "Standardized Evaluation of Haptic Rendering Systems", "doi": null, "abstractUrl": "/proceedings-article/haptics/2006/02260033/12OmNAObbDP", "parentPublication": { "id": "proceedings/haptics/2006/0226/0", "title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2006/0226/0/02260058", "title": "Performance Enhancement of a Haptic Arm Exoskeleton", "doi": null, "abstractUrl": "/proceedings-article/haptics/2006/02260058/12OmNscxj93", "parentPublication": { "id": "proceedings/haptics/2006/0226/0", "title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2005/2310/0/23100247", "title": "Stable and Responsive Six-Degree-of-Freedom Haptic Manipulation Using Implicit Integration", "doi": null, "abstractUrl": "/proceedings-article/whc/2005/23100247/12OmNvnwVnx", "parentPublication": { "id": "proceedings/whc/2005/2310/0", "title": "Proceedings. First Joint Eurohaptics Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems. World Haptics Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2006/0226/0/02260023", "title": "Adaptation of Haptic Interfaces for a LabVIEW-based System Dynamics Course", "doi": null, "abstractUrl": "/proceedings-article/haptics/2006/02260023/12OmNxXl5BV", "parentPublication": { "id": "proceedings/haptics/2006/0226/0", "title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/02/07742944", "title": "Six Degree-of-Freedom Haptic Simulation of a Stringed Musical Instrument for Triggering Sounds", "doi": null, "abstractUrl": "/journal/th/2017/02/07742944/13rRUNvgyWy", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2013/02/tth2013020167", "title": "Configuration-Based Optimization for Six Degree-of-Freedom Haptic Rendering for Fine Manipulation", "doi": null, "abstractUrl": "/journal/th/2013/02/tth2013020167/13rRUwjGoGd", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/11/ttg2011111714", "title": "Six Degrees-of-Freedom Haptic Interaction with Fluids", "doi": null, "abstractUrl": "/journal/tg/2011/11/ttg2011111714/13rRUxNW1Zj", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/03/v0458", "title": "A Six Degree-of-Freedom God-Object Method for Haptic Display of Rigid Bodies with Surface Properties", "doi": null, "abstractUrl": "/journal/tg/2007/03/v0458/13rRUyY28Yl", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v0649", "articleId": "13rRUB7a1fH", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0671", "articleId": "13rRUwgyOja", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNC0PGNr", "title": "January-March", "year": "1997", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "3", "label": "January-March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUILc8f6", "doi": "10.1109/2945.582332", "abstract": "Abstract—We study the topology of symmetric, second-order tensor fields. The results of this study can be readily extended to include general tensor fields through linear combination of symmetric tensor fields and vector fields. The goal is to represent their complex structure by a simple set of carefully chosen points, lines, and surfaces analogous to approaches in vector field topology. We extract topological skeletons of the eigenvector fields and use them for a compact, comprehensive description of the tensor field. Our approach is based on the premise: \"Analyze, then visualize.\"The basic constituents of tensor topology are the degenerate points, or points where eigenvalues are equal to each other. Degenerate points play a similar role as critical points in vector fields. In tensor fields we identify two kinds of elementary degenerate points, which we call wedge points and trisector points. They can combine to form more familiar singularities—such as saddles, nodes, centers, or foci. However, these are generally unstable structures in tensor fields. Based on the notions developed for 2D tensor fields, we extend the theory to include 3D degenerate points. Examples are given on the use of tensor field topology for the interpretation of physical systems.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract—We study the topology of symmetric, second-order tensor fields. The results of this study can be readily extended to include general tensor fields through linear combination of symmetric tensor fields and vector fields. The goal is to represent their complex structure by a simple set of carefully chosen points, lines, and surfaces analogous to approaches in vector field topology. We extract topological skeletons of the eigenvector fields and use them for a compact, comprehensive description of the tensor field. Our approach is based on the premise: \"Analyze, then visualize.\"The basic constituents of tensor topology are the degenerate points, or points where eigenvalues are equal to each other. Degenerate points play a similar role as critical points in vector fields. In tensor fields we identify two kinds of elementary degenerate points, which we call wedge points and trisector points. They can combine to form more familiar singularities—such as saddles, nodes, centers, or foci. However, these are generally unstable structures in tensor fields. Based on the notions developed for 2D tensor fields, we extend the theory to include 3D degenerate points. Examples are given on the use of tensor field topology for the interpretation of physical systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract—We study the topology of symmetric, second-order tensor fields. The results of this study can be readily extended to include general tensor fields through linear combination of symmetric tensor fields and vector fields. The goal is to represent their complex structure by a simple set of carefully chosen points, lines, and surfaces analogous to approaches in vector field topology. We extract topological skeletons of the eigenvector fields and use them for a compact, comprehensive description of the tensor field. Our approach is based on the premise: \"Analyze, then visualize.\"The basic constituents of tensor topology are the degenerate points, or points where eigenvalues are equal to each other. Degenerate points play a similar role as critical points in vector fields. In tensor fields we identify two kinds of elementary degenerate points, which we call wedge points and trisector points. They can combine to form more familiar singularities—such as saddles, nodes, centers, or foci. However, these are generally unstable structures in tensor fields. Based on the notions developed for 2D tensor fields, we extend the theory to include 3D degenerate points. Examples are given on the use of tensor field topology for the interpretation of physical systems.", "title": "The Topology of Symmetric, Second-Order 3D Tensor Fields", "normalizedTitle": "The Topology of Symmetric, Second-Order 3D Tensor Fields", "fno": "v0001", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [ { "givenName": "Lambertus", "surname": "Hesselink", "fullName": "Lambertus Hesselink", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Yuval", "surname": "Levy", "fullName": "Yuval Levy", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Yingmei", "surname": "Lavin", "fullName": "Yingmei Lavin", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": false, "isOpenAccess": false, "issueNum": "01", "pubDate": "1997-01-01 00:00:00", "pubType": "trans", "pages": "1-11", "year": "1997", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": null, "next": { "fno": "v0012", "articleId": "13rRUxC0SVZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1zBamVZHyne", "title": "Jan.", "year": "2022", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1xic6oeRxnO", "doi": "10.1109/TVCG.2021.3114808", "abstract": "3D asymmetric tensor fields have found many applications in science and engineering domains, such as fluid dynamics and solid mechanics. 3D asymmetric tensors can have complex eigenvalues, which makes their analysis and visualization more challenging than 3D symmetric tensors. Existing research in tensor field visualization focuses on 2D asymmetric tensor fields and 3D symmetric tensor fields. In this paper, we address the analysis and visualization of 3D asymmetric tensor fields. We introduce six topological surfaces and one topological curve, which lead to an eigenvalue space based on the tensor mode that we define. In addition, we identify several non-topological feature surfaces that are nonetheless physically important. Included in our analysis are the realizations that triple degenerate tensors are structurally stable and form curves, unlike the case for 3D symmetric tensors fields. Furthermore, there are two different ways of measuring the relative strengths of rotation and angular deformation in the tensor fields, unlike the case for 2D asymmetric tensor fields. We extract these feature surfaces using the A-patches algorithm. However, since three of our feature surfaces are quadratic, we develop a method to extract quadratic surfaces at any given accuracy. To facilitate the analysis of eigenvector fields, we visualize a hyperstreamline as a tree stem with the other two eigenvectors represented as thorns in the real domain or the dual-eigenvectors as leaves in the complex domain. To demonstrate the effectiveness of our analysis and visualization, we apply our approach to datasets from solid mechanics and fluid dynamics.", "abstracts": [ { "abstractType": "Regular", "content": "3D asymmetric tensor fields have found many applications in science and engineering domains, such as fluid dynamics and solid mechanics. 3D asymmetric tensors can have complex eigenvalues, which makes their analysis and visualization more challenging than 3D symmetric tensors. Existing research in tensor field visualization focuses on 2D asymmetric tensor fields and 3D symmetric tensor fields. In this paper, we address the analysis and visualization of 3D asymmetric tensor fields. We introduce six topological surfaces and one topological curve, which lead to an eigenvalue space based on the tensor mode that we define. In addition, we identify several non-topological feature surfaces that are nonetheless physically important. Included in our analysis are the realizations that triple degenerate tensors are structurally stable and form curves, unlike the case for 3D symmetric tensors fields. Furthermore, there are two different ways of measuring the relative strengths of rotation and angular deformation in the tensor fields, unlike the case for 2D asymmetric tensor fields. We extract these feature surfaces using the A-patches algorithm. However, since three of our feature surfaces are quadratic, we develop a method to extract quadratic surfaces at any given accuracy. To facilitate the analysis of eigenvector fields, we visualize a hyperstreamline as a tree stem with the other two eigenvectors represented as thorns in the real domain or the dual-eigenvectors as leaves in the complex domain. To demonstrate the effectiveness of our analysis and visualization, we apply our approach to datasets from solid mechanics and fluid dynamics.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "3D asymmetric tensor fields have found many applications in science and engineering domains, such as fluid dynamics and solid mechanics. 3D asymmetric tensors can have complex eigenvalues, which makes their analysis and visualization more challenging than 3D symmetric tensors. Existing research in tensor field visualization focuses on 2D asymmetric tensor fields and 3D symmetric tensor fields. In this paper, we address the analysis and visualization of 3D asymmetric tensor fields. We introduce six topological surfaces and one topological curve, which lead to an eigenvalue space based on the tensor mode that we define. In addition, we identify several non-topological feature surfaces that are nonetheless physically important. Included in our analysis are the realizations that triple degenerate tensors are structurally stable and form curves, unlike the case for 3D symmetric tensors fields. Furthermore, there are two different ways of measuring the relative strengths of rotation and angular deformation in the tensor fields, unlike the case for 2D asymmetric tensor fields. We extract these feature surfaces using the A-patches algorithm. However, since three of our feature surfaces are quadratic, we develop a method to extract quadratic surfaces at any given accuracy. To facilitate the analysis of eigenvector fields, we visualize a hyperstreamline as a tree stem with the other two eigenvectors represented as thorns in the real domain or the dual-eigenvectors as leaves in the complex domain. To demonstrate the effectiveness of our analysis and visualization, we apply our approach to datasets from solid mechanics and fluid dynamics.", "title": "Feature Curves and Surfaces of 3D Asymmetric Tensor Fields", "normalizedTitle": "Feature Curves and Surfaces of 3D Asymmetric Tensor Fields", "fno": "09552927", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Eigenvalues And Eigenfunctions", "Tensors", "Topology", "3 D Asymmetric Tensor Fields", "3 D Asymmetric Tensors", "Tensor Field Visualization", "3 D Symmetric Tensor Fields", "Tensor Mode", "Nontopological Feature Surfaces", "Triple Degenerate Tensors", "3 D Symmetric Tensors Fields", "Tensors", "Eigenvalues And Eigenfunctions", "Three Dimensional Displays", "Visualization", "Feature Extraction", "Solids", "Topology", "Tensor Field Visualization", "3 D Asymmetric Tensor Fields", "Tensor Field Topology", "Traceless Tensors", "Feature Surface Extraction", "Degenerate Surfaces", "Neutral Surfaces", "Balanced Surfaces", "Triple Degenerate Curves" ], "authors": [ { "givenName": "Shih-Hsuan", "surname": "Hung", "fullName": "Shih-Hsuan Hung", "affiliation": "School of Electrical Engineering and Computer Science, Oregon State University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Yue", "surname": "Zhang", "fullName": "Yue Zhang", "affiliation": "School of Electrical Engineering and Computer Science, Oregon State University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Harry", "surname": "Yeh", "fullName": "Harry Yeh", "affiliation": "School of Civil and Construction Engineering, Oregon State University, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Eugene", "surname": "Zhang", "fullName": "Eugene Zhang", "affiliation": "School of Electrical Engineering and Computer Science, Oregon State University, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2022-01-01 00:00:00", "pubType": "trans", "pages": "33-42", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ieee-vis/2005/2766/0/27660001", "title": "2D Asymmetric Tensor Analysis", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660001/12OmNAY79q0", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880313", "title": "Topological Lines in 3D Tensor Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880313/12OmNApLGKA", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532770", "title": "2D asymmetric tensor analysis", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532770/12OmNCw3z9K", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1994/6627/0/00346326", "title": "The topology of symmetric, second-order tensor fields", "doi": null, "abstractUrl": "/proceedings-article/visual/1994/00346326/12OmNvxKu1Q", "parentPublication": { "id": "proceedings/visual/1994/6627/0", "title": "Proceedings Visualization '94", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532841", "title": "Topological structures of 3D tensor fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532841/12OmNx5GTXp", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660070", "title": "Topological Structures of 3D Tensor Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660070/12OmNxeusY2", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/03/07286850", "title": "Feature Surfaces in Symmetric Tensor Fields Based on Eigenvalue Manifold", "doi": null, "abstractUrl": "/journal/tg/2016/03/07286850/13rRUwhpBE8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08453873", "title": "Robust and Fast Extraction of 3D Symmetric Tensor Field Topology", "doi": null, "abstractUrl": "/journal/tg/2019/01/08453873/17D45WHONif", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08805436", "title": "Multi-Scale Topological Analysis of Asymmetric Tensor Fields on Surfaces", "doi": null, "abstractUrl": "/journal/tg/2020/01/08805436/1cG4IGNd2Y8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09224154", "title": "Mode Surfaces of Symmetric Tensor Fields: Topological Analysis and Seamless Extraction", "doi": null, "abstractUrl": "/journal/tg/2021/02/09224154/1nV63QG11le", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09552195", "articleId": "1xic0yNxnws", "__typename": "AdjacentArticleType" }, "next": { "fno": "09555491", "articleId": "1xjQX1LHQJi", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1zBaTxP3Sms", "name": "ttg202201-09552927s1-supp1-3114808.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552927s1-supp1-3114808.mp4", "extension": "mp4", "size": "122 MB", "__typename": "WebExtraType" }, { "id": "1zBaTjMcxiw", "name": "ttg202201-09552927s1-supp2-3114808.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552927s1-supp2-3114808.pdf", "extension": "pdf", "size": "14.2 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNyxXlp2", "title": "July", "year": "2014", "issueNum": "07", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUILtJmb", "doi": "10.1109/TVCG.2014.2312008", "abstract": "We introduce a web-based computing infrastructure to assist the visual integration, mining and interactive navigation of large-scale astronomy observations. Following an analysis of the application domain, we design a client-server architecture to fetch distributed image data and to partition local data into a spatial index structure that allows prefix-matching of spatial objects. In conjunction with hardware-accelerated pixel-based overlays and an online cross-registration pipeline, this approach allows the fetching, displaying, panning and zooming of gigabit panoramas of the sky in real time. To further facilitate the integration and mining of spatial and non-spatial data, we introduce interactive trend images—compact visual representations for identifying outlier objects and for studying trends within large collections of spatial objects of a given class. In a demonstration, images from three sky surveys (SDSS, FIRST and simulated LSST results) are cross-registered and integrated as overlays, allowing cross-spectrum analysis of astronomy observations. Trend images are interactively generated from catalog data and used to visually mine astronomy observations of similar type. The front-end of the infrastructure uses the web technologies WebGL and HTML5 to enable cross-platform, web-based functionality. Our approach attains interactive rendering framerates; its power and flexibility enables it to serve the needs of the astronomy community. Evaluation on three case studies, as well as feedback from domain experts emphasize the benefits of this visual approach to the observational astronomy field; and its potential benefits to large scale geospatial visualization in general.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce a web-based computing infrastructure to assist the visual integration, mining and interactive navigation of large-scale astronomy observations. Following an analysis of the application domain, we design a client-server architecture to fetch distributed image data and to partition local data into a spatial index structure that allows prefix-matching of spatial objects. In conjunction with hardware-accelerated pixel-based overlays and an online cross-registration pipeline, this approach allows the fetching, displaying, panning and zooming of gigabit panoramas of the sky in real time. To further facilitate the integration and mining of spatial and non-spatial data, we introduce interactive trend images—compact visual representations for identifying outlier objects and for studying trends within large collections of spatial objects of a given class. In a demonstration, images from three sky surveys (SDSS, FIRST and simulated LSST results) are cross-registered and integrated as overlays, allowing cross-spectrum analysis of astronomy observations. Trend images are interactively generated from catalog data and used to visually mine astronomy observations of similar type. The front-end of the infrastructure uses the web technologies WebGL and HTML5 to enable cross-platform, web-based functionality. Our approach attains interactive rendering framerates; its power and flexibility enables it to serve the needs of the astronomy community. Evaluation on three case studies, as well as feedback from domain experts emphasize the benefits of this visual approach to the observational astronomy field; and its potential benefits to large scale geospatial visualization in general.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce a web-based computing infrastructure to assist the visual integration, mining and interactive navigation of large-scale astronomy observations. Following an analysis of the application domain, we design a client-server architecture to fetch distributed image data and to partition local data into a spatial index structure that allows prefix-matching of spatial objects. In conjunction with hardware-accelerated pixel-based overlays and an online cross-registration pipeline, this approach allows the fetching, displaying, panning and zooming of gigabit panoramas of the sky in real time. To further facilitate the integration and mining of spatial and non-spatial data, we introduce interactive trend images—compact visual representations for identifying outlier objects and for studying trends within large collections of spatial objects of a given class. In a demonstration, images from three sky surveys (SDSS, FIRST and simulated LSST results) are cross-registered and integrated as overlays, allowing cross-spectrum analysis of astronomy observations. Trend images are interactively generated from catalog data and used to visually mine astronomy observations of similar type. The front-end of the infrastructure uses the web technologies WebGL and HTML5 to enable cross-platform, web-based functionality. Our approach attains interactive rendering framerates; its power and flexibility enables it to serve the needs of the astronomy community. Evaluation on three case studies, as well as feedback from domain experts emphasize the benefits of this visual approach to the observational astronomy field; and its potential benefits to large scale geospatial visualization in general.", "title": "Large-Scale Overlays and Trends: Visually Mining, Panning and Zoomingthe Observable Universe", "normalizedTitle": "Large-Scale Overlays and Trends: Visually Mining, Panning and Zoomingthe Observable Universe", "fno": "06767150", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Catalogs", "Astronomy", "Market Research", "Visualization", "Servers", "Distributed Databases", "Data Visualization", "Geographic Geospatial Visualization", "Data Fusion And Integration", "Scalability Issues" ], "authors": [ { "givenName": "Timothy Basil", "surname": "Luciani", "fullName": "Timothy Basil Luciani", "affiliation": "Department of Computer Science , University of Pittsburgh, Pittsburgh,", "__typename": "ArticleAuthorType" }, { "givenName": "Brian", "surname": "Cherinka", "fullName": "Brian Cherinka", "affiliation": "Department of Physics and Astronomy , University of Pittsburgh, Pittsburgh,", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Oliphant", "fullName": "Daniel Oliphant", "affiliation": "Google, Pittsburgh,", "__typename": "ArticleAuthorType" }, { "givenName": "Sean", "surname": "Myers", "fullName": "Sean Myers", "affiliation": "Department of Computer Science , University of Pittsburgh, Pittsburgh,", "__typename": "ArticleAuthorType" }, { "givenName": "W. Michael", "surname": "Wood-Vasey", "fullName": "W. Michael Wood-Vasey", "affiliation": "Department of Physics and Astronomy , University of Pittsburgh, Pittsburgh,", "__typename": "ArticleAuthorType" }, { "givenName": "Alexandros", "surname": "Labrinidis", "fullName": "Alexandros Labrinidis", "affiliation": "Department of Computer Science , University of Pittsburgh, Pittsburgh,", "__typename": "ArticleAuthorType" }, { "givenName": "G. Elisabeta", "surname": "Marai", "fullName": "G. Elisabeta Marai", "affiliation": "Department of Computer Science , University of Pittsburgh, Pittsburgh,", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2014-07-01 00:00:00", "pubType": "trans", "pages": "1048-1061", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ldav/2012/4733/0/06378961", "title": "Panning and zooming the observable universe with prefix-matching indices and pixel-based overlays", "doi": null, "abstractUrl": "/proceedings-article/ldav/2012/06378961/12OmNApu5tJ", "parentPublication": { "id": "proceedings/ldav/2012/4733/0", "title": "2012 IEEE Symposium on Large Data Analysis and Visualization (LDAV 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2011/0155/0/06092337", "title": "Pixel-based overlays for navigating a galaxy of observations", "doi": null, "abstractUrl": "/proceedings-article/ldav/2011/06092337/12OmNBKEytn", "parentPublication": { "id": "proceedings/ldav/2011/0155/0", "title": "IEEE Symposium on Large Data Analysis and Visualization (LDAV 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/b&b/2017/2480/0/08120424", "title": "Creating engaging science projects with netsblox", "doi": null, "abstractUrl": "/proceedings-article/b&b/2017/08120424/12OmNBf94Yi", "parentPublication": { "id": "proceedings/b&b/2017/2480/0", "title": "2017 IEEE Blocks and Beyond Workshop (B&B)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2014/4288/2/06972103", "title": "NACluster: A Non-supervised Clustering Algorithm for Matching Multi Catalogues", "doi": null, "abstractUrl": "/proceedings-article/e-science/2014/06972103/12OmNrJiCRA", "parentPublication": { "id": "proceedings/e-science/2014/4288/2", "title": "2014 IEEE 10th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892264", "title": "Group immersive education with digital fulldome planetariums", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892264/12OmNxiKs1k", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2016/2303/0/2303a223", "title": "StarWatch 2.0: RFI Filter for SETI Signals", "doi": null, "abstractUrl": "/proceedings-article/cw/2016/2303a223/12OmNyKrH69", "parentPublication": { "id": "proceedings/cw/2016/2303/0", "title": "2016 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122631", "title": "Graphical Overlays: Using Layered Elements to Aid Chart Reading", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122631/13rRUyfKIHJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2006/05/v1005", "title": "Scalable WIM: Effective Exploration in Large-scale Astrophysical Environments", "doi": null, "abstractUrl": "/journal/tg/2006/05/v1005/13rRUygBw70", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/06/04376206", "title": "Visualization of Cosmological Particle-Based Datasets", "doi": null, "abstractUrl": "/journal/tg/2007/06/04376206/13rRUyuegh4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2021/3827/0/382700a035", "title": "ContourDiff: Revealing Differential Trends in Spatiotemporal Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2021/382700a035/1y4oFxfTrOw", "parentPublication": { "id": "proceedings/iv/2021/3827/0", "title": "2021 25th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06654163", "articleId": "13rRUygBwhK", "__typename": "AdjacentArticleType" }, "next": { "fno": "06658748", "articleId": "13rRUB7a112", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvSbBJE", "title": "September/October", "year": "2005", "issueNum": "05", "idPrefix": "tg", "pubType": "journal", "volume": "11", "label": "September/October", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwbaqLp", "doi": "10.1109/TVCG.2005.84", "abstract": "From our terrestrially confined viewpoint, the actual three-dimensional shape of distant astronomical objects is, in general, very challenging to determine. For one class of astronomical objects, however, spatial structure can be recovered from conventional 2D images alone. So-called planetary nebulae (PNe) exhibit pronounced symmetry characteristics that come about due to fundamental physical processes. Making use of this symmetry constraint, we present a technique to automatically recover the axisymmetric structure of many planetary nebulae from photographs. With GPU-based volume rendering driving a nonlinear optimization, we estimate the nebula's local emission density as a function of its radial and axial coordinates and we recover the orientation of the nebula relative to Earth. The optimization refines the nebula model and its orientation by minimizing the differences between the rendered image and the original astronomical image. The resulting model allows creating realistic 3D visualizations of these nebulae, for example, for planetarium shows and other educational purposes. In addition, the recovered spatial distribution of the emissive gas can help astrophysicists gain deeper insight into the formation processes of planetary nebulae.", "abstracts": [ { "abstractType": "Regular", "content": "From our terrestrially confined viewpoint, the actual three-dimensional shape of distant astronomical objects is, in general, very challenging to determine. For one class of astronomical objects, however, spatial structure can be recovered from conventional 2D images alone. So-called planetary nebulae (PNe) exhibit pronounced symmetry characteristics that come about due to fundamental physical processes. Making use of this symmetry constraint, we present a technique to automatically recover the axisymmetric structure of many planetary nebulae from photographs. With GPU-based volume rendering driving a nonlinear optimization, we estimate the nebula's local emission density as a function of its radial and axial coordinates and we recover the orientation of the nebula relative to Earth. The optimization refines the nebula model and its orientation by minimizing the differences between the rendered image and the original astronomical image. The resulting model allows creating realistic 3D visualizations of these nebulae, for example, for planetarium shows and other educational purposes. In addition, the recovered spatial distribution of the emissive gas can help astrophysicists gain deeper insight into the formation processes of planetary nebulae.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "From our terrestrially confined viewpoint, the actual three-dimensional shape of distant astronomical objects is, in general, very challenging to determine. For one class of astronomical objects, however, spatial structure can be recovered from conventional 2D images alone. So-called planetary nebulae (PNe) exhibit pronounced symmetry characteristics that come about due to fundamental physical processes. Making use of this symmetry constraint, we present a technique to automatically recover the axisymmetric structure of many planetary nebulae from photographs. With GPU-based volume rendering driving a nonlinear optimization, we estimate the nebula's local emission density as a function of its radial and axial coordinates and we recover the orientation of the nebula relative to Earth. The optimization refines the nebula model and its orientation by minimizing the differences between the rendered image and the original astronomical image. The resulting model allows creating realistic 3D visualizations of these nebulae, for example, for planetarium shows and other educational purposes. In addition, the recovered spatial distribution of the emissive gas can help astrophysicists gain deeper insight into the formation processes of planetary nebulae.", "title": "Reconstruction and Visualization of Planetary Nebulae", "normalizedTitle": "Reconstruction and Visualization of Planetary Nebulae", "fno": "v0485", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Index Terms Astronomy", "Constrained Optimization", "Volumetric Image Representation", "Scene Analysis", "Volume Visualization" ], "authors": [ { "givenName": "Marcus", "surname": "Magnor", "fullName": "Marcus Magnor", "affiliation": "IEEE", "__typename": "ArticleAuthorType" }, { "givenName": "Gordon", "surname": "Kindlmann", "fullName": "Gordon Kindlmann", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "Charles", "surname": "Hansen", "fullName": "Charles Hansen", "affiliation": "IEEE", "__typename": "ArticleAuthorType" }, { "givenName": "Neb", "surname": "Duric", "fullName": "Neb Duric", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2005-09-01 00:00:00", "pubType": "trans", "pages": "485-496", "year": "2005", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/mss/1995/7064/0/70640263", "title": "The Planetary Data System Web catalog interface-another use of the Planetary Data System Data Model", "doi": null, "abstractUrl": "/proceedings-article/mss/1995/70640263/12OmNBRbko0", "parentPublication": { "id": "proceedings/mss/1995/7064/0", "title": "Proceedings of IEEE 14th Symposium on Mass Storage Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880083", "title": "Constrained Inverse Volume Rendering for Planetary Nebulae", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880083/12OmNButq4x", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgi/2000/0643/0/06430081", "title": "Visualization of Eclipses and Planetary Conjunction Events: The Interplay between Model Coherence, Scaling and Animation", "doi": null, "abstractUrl": "/proceedings-article/cgi/2000/06430081/12OmNqyUUww", "parentPublication": { "id": "proceedings/cgi/2000/0643/0", "title": "Computer Graphics International Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccms/2010/5642/2/05421076", "title": "The Study About the Gear Mesh Nonlinearity and Dynamic Characteristics of Planetary Gearbox", "doi": null, "abstractUrl": "/proceedings-article/iccms/2010/05421076/12OmNrNh0xM", "parentPublication": { "id": "proceedings/iccms/2010/5642/2", "title": "2010 Second International Conference on Computer Modeling and Simulation (ICCMS 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icinis/2015/8221/0/8221a105", "title": "Visualization Technology and Its Application for Massive Astronomical Data Analyses", "doi": null, "abstractUrl": "/proceedings-article/icinis/2015/8221a105/12OmNwpGgN3", "parentPublication": { "id": "proceedings/icinis/2015/8221/0", "title": "2015 8th International Conference on Intelligent Networks and Intelligent Systems (ICINIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpads/2012/4903/0/4903a284", "title": "CUDA Acceleration of 3D Dynamic Scene Reconstruction and 3D Motion Estimation for Motion Capture", "doi": null, "abstractUrl": "/proceedings-article/icpads/2012/4903a284/12OmNxETahN", "parentPublication": { "id": "proceedings/icpads/2012/4903/0", "title": "Parallel and Distributed Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122188", "title": "Visualization of Astronomical Nebulae via Distributed Multi-GPU Compressed Sensing Tomography", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122188/13rRUwcS1CT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2012/03/mcs2012030078", "title": "Interactive Visualization and Simulation of Astronomical Nebulae", "doi": null, "abstractUrl": "/magazine/cs/2012/03/mcs2012030078/13rRUzpzeIT", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icectt/2020/9928/0/992800a029", "title": "Configuration Design of Multi-Stage Planetary Differential Gear Train with Double Planetary Gears and Scheme Optimization Based on Fuzzy Analytic Hierarchy Process", "doi": null, "abstractUrl": "/proceedings-article/icectt/2020/992800a029/1oa5kLueVoY", "parentPublication": { "id": "proceedings/icectt/2020/9928/0", "title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "01471697", "articleId": "1htxECRyK5i", "__typename": "AdjacentArticleType" }, "next": { "fno": "v0497", "articleId": "13rRUxNmPDJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNyPQ4Dx", "title": "Dec.", "year": "2012", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "18", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwcS1CT", "doi": "10.1109/TVCG.2012.281", "abstract": "The 3D visualization of astronomical nebulae is a challenging problem since only a single 2D projection is observable from our fixed vantage point on Earth. We attempt to generate plausible and realistic looking volumetric visualizations via a tomographic approach that exploits the spherical or axial symmetry prevalent in some relevant types of nebulae. Different types of symmetry can be implemented by using different randomized distributions of virtual cameras. Our approach is based on an iterative compressed sensing reconstruction algorithm that we extend with support for position-dependent volumetric regularization and linear equality constraints. We present a distributed multi-GPU implementation that is capable of reconstructing high-resolution datasets from arbitrary projections. Its robustness and scalability are demonstrated for astronomical imagery from the Hubble Space Telescope. The resulting volumetric data is visualized using direct volume rendering. Compared to previous approaches, our method preserves a much higher amount of detail and visual variety in the 3D visualization, especially for objects with only approximate symmetry.", "abstracts": [ { "abstractType": "Regular", "content": "The 3D visualization of astronomical nebulae is a challenging problem since only a single 2D projection is observable from our fixed vantage point on Earth. We attempt to generate plausible and realistic looking volumetric visualizations via a tomographic approach that exploits the spherical or axial symmetry prevalent in some relevant types of nebulae. Different types of symmetry can be implemented by using different randomized distributions of virtual cameras. Our approach is based on an iterative compressed sensing reconstruction algorithm that we extend with support for position-dependent volumetric regularization and linear equality constraints. We present a distributed multi-GPU implementation that is capable of reconstructing high-resolution datasets from arbitrary projections. Its robustness and scalability are demonstrated for astronomical imagery from the Hubble Space Telescope. The resulting volumetric data is visualized using direct volume rendering. Compared to previous approaches, our method preserves a much higher amount of detail and visual variety in the 3D visualization, especially for objects with only approximate symmetry.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The 3D visualization of astronomical nebulae is a challenging problem since only a single 2D projection is observable from our fixed vantage point on Earth. We attempt to generate plausible and realistic looking volumetric visualizations via a tomographic approach that exploits the spherical or axial symmetry prevalent in some relevant types of nebulae. Different types of symmetry can be implemented by using different randomized distributions of virtual cameras. Our approach is based on an iterative compressed sensing reconstruction algorithm that we extend with support for position-dependent volumetric regularization and linear equality constraints. We present a distributed multi-GPU implementation that is capable of reconstructing high-resolution datasets from arbitrary projections. Its robustness and scalability are demonstrated for astronomical imagery from the Hubble Space Telescope. The resulting volumetric data is visualized using direct volume rendering. Compared to previous approaches, our method preserves a much higher amount of detail and visual variety in the 3D visualization, especially for objects with only approximate symmetry.", "title": "Visualization of Astronomical Nebulae via Distributed Multi-GPU Compressed Sensing Tomography", "normalizedTitle": "Visualization of Astronomical Nebulae via Distributed Multi-GPU Compressed Sensing Tomography", "fno": "ttg2012122188", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Rendering Computer Graphics", "Astronomy Computing", "Compressed Sensing", "Data Visualisation", "Graphics Processing Units", "Nebulae", "Direct Volume Rendering", "Astronomical Nebulae Visualization", "Distributed Multi GPU Compressed Sensing Tomography", "3 D Visualization", "Single 2 D Projection", "Earth", "Volumetric Visualizations", "Tomographic Approach", "Spherical Symmetry", "Axial Symmetry", "Compressed Sensing Reconstruction", "Position Dependent Volumetric Regularization", "Linear Equality Constraints", "Distributed Multi GPU Implementation", "High Resolution Datasets", "Astronomical Imagery", "Hubble Space Telescope", "Image Reconstruction", "Compressed Sensing", "Graphics Processing Unit", "Memory Management", "Reconstruction Algorithms", "Direct Volume Rendering", "Astronomical Visualization", "Distributed Volume Reconstruction" ], "authors": [ { "givenName": "S.", "surname": "Wenger", "fullName": "S. Wenger", "affiliation": "Inst. fur Computergraphik, Tech. Univ. Braunschweig, Braunschweig, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Ament", "fullName": "M. Ament", "affiliation": "VISUS, Univ. of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "S.", "surname": "Guthe", "fullName": "S. Guthe", "affiliation": "Inst. fur Computergraphik, Tech. Univ. Braunschweig, Braunschweig, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "D.", "surname": "Lorenz", "fullName": "D. Lorenz", "affiliation": "Inst. for Anal. & Algebra, Tech. Univ. Braunschweig, Braunschweig, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Tillmann", "fullName": "A. Tillmann", "affiliation": "Res. Group Optimization, Tech. Univ. Darmstadt, Darmstadt, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "D.", "surname": "Weiskopf", "fullName": "D. Weiskopf", "affiliation": "VISUS, Univ. of Stuttgart, Stuttgart, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Magnor", "fullName": "M. Magnor", "affiliation": "Inst. fur Computergraphik, Tech. Univ. Braunschweig, Braunschweig, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2012-12-01 00:00:00", "pubType": "trans", "pages": "2188-2197", "year": "2012", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2014/5209/0/5209b085", "title": "Effective Image Block Compressed Sensing", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b085/12OmNAOKnZz", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2017/3013/0/3013a368", "title": "Image Super-Resolution Reconstruction Based on Compressed Sensing", "doi": null, "abstractUrl": "/proceedings-article/icisce/2017/3013a368/12OmNBE7Mr2", "parentPublication": { "id": "proceedings/icisce/2017/3013/0", "title": "2017 4th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2004/8788/0/87880083", "title": "Constrained Inverse Volume Rendering for Planetary Nebulae", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2004/87880083/12OmNButq4x", "parentPublication": { "id": "proceedings/ieee-vis/2004/8788/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2017/6067/0/08019428", "title": "Deep networks for compressed image sensing", "doi": null, "abstractUrl": "/proceedings-article/icme/2017/08019428/12OmNvkpl2L", "parentPublication": { "id": "proceedings/icme/2017/6067/0", "title": "2017 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dcc/2017/6721/0/07923742", "title": "Deep Blind Compressed Sensing", "doi": null, "abstractUrl": "/proceedings-article/dcc/2017/07923742/12OmNy5hRcs", "parentPublication": { "id": "proceedings/dcc/2017/6721/0", "title": "2017 Data Compression Conference (DCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890579", "title": "Distributed compressed sensing for image signals", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890579/12OmNyqiaX1", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbmi/2011/4623/0/4623a019", "title": "Compressed Sensing for RF Signal Reconstruction in B-model Ultrasound Imaging", "doi": null, "abstractUrl": "/proceedings-article/icbmi/2011/4623a019/12OmNz4SOpU", "parentPublication": { "id": "proceedings/icbmi/2011/4623/0", "title": "Intelligent Computation and Bio-Medical Instrumentation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2011/4353/2/05750930", "title": "Compressed Sensing of Images Using Nonuniform Sampling", "doi": null, "abstractUrl": "/proceedings-article/icicta/2011/05750930/12OmNzWx07b", "parentPublication": { "id": "icicta/2011/4353/2", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/04/ttg2011040487", "title": "Compressive Rendering: A Rendering Application of Compressed Sensing", "doi": null, "abstractUrl": "/journal/tg/2011/04/ttg2011040487/13rRUwInv4k", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2012/03/mcs2012030078", "title": "Interactive Visualization and Simulation of Astronomical Nebulae", "doi": null, "abstractUrl": "/magazine/cs/2012/03/mcs2012030078/13rRUzpzeIT", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2012122178", "articleId": "13rRUyeTVhZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2012122198", "articleId": "13rRUwghd4X", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgFY", "name": "ttg2012122188s1.mpg", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2012122188s1.mpg", "extension": "mpg", "size": "36 MB", "__typename": "WebExtraType" }, { "id": "17ShDTXFgFZ", "name": "ttg2012122188s1.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2012122188s1.pdf", "extension": "pdf", "size": "3.52 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvDI3IO", "title": "Sept.-Oct.", "year": "2016", "issueNum": "05", "idPrefix": "so", "pubType": "magazine", "volume": "33", "label": "Sept.-Oct.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyuegnp", "doi": "10.1109/MS.2016.113", "abstract": "Simulation software is important to our understanding of the universe. The intrinsic multiphysics aspects are spiced with a range of temporal scales and spatial scales, both of which cover more digits than are available in the standard hardware. This, together with the intrinsic chaotic nature of many physical processes, poses quite a challenge. To meet this challenge, researchers developed the Astronomical Multipurpose Environment (AMUSE). Instead of writing a suite of multiphysics solvers from scratch, AMUSE's developers coupled existing solvers for each physical ingredient. The result is a highly inhomogeneous collection of dedicated solvers with a homogeneous protocol that scales to supercomputers.", "abstracts": [ { "abstractType": "Regular", "content": "Simulation software is important to our understanding of the universe. The intrinsic multiphysics aspects are spiced with a range of temporal scales and spatial scales, both of which cover more digits than are available in the standard hardware. This, together with the intrinsic chaotic nature of many physical processes, poses quite a challenge. To meet this challenge, researchers developed the Astronomical Multipurpose Environment (AMUSE). Instead of writing a suite of multiphysics solvers from scratch, AMUSE's developers coupled existing solvers for each physical ingredient. The result is a highly inhomogeneous collection of dedicated solvers with a homogeneous protocol that scales to supercomputers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Simulation software is important to our understanding of the universe. The intrinsic multiphysics aspects are spiced with a range of temporal scales and spatial scales, both of which cover more digits than are available in the standard hardware. This, together with the intrinsic chaotic nature of many physical processes, poses quite a challenge. To meet this challenge, researchers developed the Astronomical Multipurpose Environment (AMUSE). Instead of writing a suite of multiphysics solvers from scratch, AMUSE's developers coupled existing solvers for each physical ingredient. The result is a highly inhomogeneous collection of dedicated solvers with a homogeneous protocol that scales to supercomputers.", "title": "Creating the Virtual Universe", "normalizedTitle": "Creating the Virtual Universe", "fno": "mso2016050025", "hasPdf": true, "idPrefix": "so", "keywords": [ "Astronomy Computing", "Digital Simulation", "Virtual Universe", "Simulation Software", "Astronomical Multipurpose Environment", "AMUSE", "Multiphysics Solver", "Graphics Processing Units", "Computational Modeling", "Software Development", "Astrophysics", "Software Engineering", "Astronomy", "Simulation Software", "Astronomical Multipurpose Environment", "AMUSE", "Astrophysics", "Astrophysics Simulations", "Software Development", "Software Engineering" ], "authors": [ { "givenName": "Simon Portegies", "surname": "Zwart", "fullName": "Simon Portegies Zwart", "affiliation": "Leiden Observatory", "__typename": "ArticleAuthorType" }, { "givenName": "Jeroen", "surname": "Bedorf", "fullName": "Jeroen Bedorf", "affiliation": "Leiden Observatory", "__typename": "ArticleAuthorType" }, { "givenName": "Michiel", "surname": "van Genuchten", "fullName": "Michiel van Genuchten", "affiliation": "VitalHealth Software", "__typename": "ArticleAuthorType" }, { "givenName": "Les", "surname": "Hatton", "fullName": "Les Hatton", "affiliation": "Oakwood Computing Associates", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2016-09-01 00:00:00", "pubType": "mags", "pages": "25-29", "year": "2016", "issn": "0740-7459", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ccgrid/2013/4996/0/4996a202", "title": "The Astronomical Multipurpose Software Environment and the Ecology of Star Clusters", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2013/4996a202/12OmNA0vo0L", "parentPublication": { "id": "proceedings/ccgrid/2013/4996/0", "title": "Cluster Computing and the Grid, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icppw/2014/5615/0/5615a403", "title": "HLanc: Heterogeneous Parallel Implementation of the Implicitly Restarted Lanczos Method", "doi": null, "abstractUrl": "/proceedings-article/icppw/2014/5615a403/12OmNAR1aTg", "parentPublication": { "id": "proceedings/icppw/2014/5615/0", "title": "2014 43nd International Conference on Parallel Processing Workshops (ICCPW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvd/2007/2869/0/28690230", "title": "Voronoi Tessellations and the Cosmic Web: Spatial Patterns and Clustering across the Universe", "doi": null, "abstractUrl": "/proceedings-article/isvd/2007/28690230/12OmNz2TCEY", "parentPublication": { "id": "proceedings/isvd/2007/2869/0", "title": "4th International Symposium on Voronoi Diagrams in Science and Engineering (ISVD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2009/02/mcs2009020015", "title": "Advances in Computational Astrophysics", "doi": null, "abstractUrl": "/magazine/cs/2009/02/mcs2009020015/13rRUILtJuO", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0108", "title": "A Transparently Scalable Visualization Architecture for Exploring the Universe", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0108/13rRUwIF69b", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2018/8384/0/838400a697", "title": "Simulating the Weak Death of the Neutron in a Femtoscale Universe with Near-Exascale Computing", "doi": null, "abstractUrl": "/proceedings-article/sc/2018/838400a697/17D45WrVg5L", "parentPublication": { "id": "proceedings/sc/2018/8384/0", "title": "2018 SC18: The International Conference for High Performance Computing, Networking, Storage, and Analysis (SC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2018/9156/0/915600a374", "title": "Development of the OMUSE/AMUSE Modeling System", "doi": null, "abstractUrl": "/proceedings-article/e-science/2018/915600a374/17D45Wuc39M", "parentPublication": { "id": "proceedings/e-science/2018/9156/0", "title": "2018 IEEE 14th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900e467", "title": "Trust Your IMU: Consequences of Ignoring the IMU Drift", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900e467/1G56HJ5jZ0Q", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/se4science/2019/2276/0/227600a001", "title": "Creating Stable Productive CSE Software Development and Integration Processes in Unstable Environments on the Path to Exascale", "doi": null, "abstractUrl": "/proceedings-article/se4science/2019/227600a001/1d5ktuBJGAU", "parentPublication": { "id": "proceedings/se4science/2019/2276/0", "title": "2019 IEEE/ACM 14th International Workshop on Software Engineering for Science (SE4Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/03/09459442", "title": "Insights From the Software Design of a Multiphysics Multicomponent Scientific Code", "doi": null, "abstractUrl": "/magazine/cs/2021/03/09459442/1uvzZfLf4U8", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mso2016050022", "articleId": "13rRUwhpBCb", "__typename": "AdjacentArticleType" }, "next": { "fno": "mso2016050030", "articleId": "13rRUy3gn5D", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCbCrUN", "title": "Dec.", "year": "2013", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxD9gXI", "doi": "10.1109/TVCG.2013.127", "abstract": "We present a framework for acuity-driven visualization of super-high resolution image data on gigapixel displays. Tiled display walls offer a large workspace that can be navigated physically by the user. Based on head tracking information, the physical characteristics of the tiled display and the formulation of visual acuity, we guide an out-of-core gigapixel rendering scheme by delivering high levels of detail only in places where it is perceivable to the user. We apply this principle to gigapixel image rendering through adaptive level of detail selection. Additionally, we have developed an acuity-driven tessellation scheme for high-quality Focus-and-Context (F+C) lenses that significantly reduces visual artifacts while accurately capturing the underlying lens function. We demonstrate this framework on the Reality Deck, an immersive gigapixel display. We present the results of a user study designed to quantify the impact of our acuity-driven rendering optimizations in the visual exploration process. We discovered no evidence suggesting a difference in search task performance between our framework and naive rendering of gigapixel resolution data, while realizing significant benefits in terms of data transfer overhead. Additionally, we show that our acuity-driven tessellation scheme offers substantially increased frame rates when compared to naive pre-tessellation, while providing indistinguishable image quality.", "abstracts": [ { "abstractType": "Regular", "content": "We present a framework for acuity-driven visualization of super-high resolution image data on gigapixel displays. Tiled display walls offer a large workspace that can be navigated physically by the user. Based on head tracking information, the physical characteristics of the tiled display and the formulation of visual acuity, we guide an out-of-core gigapixel rendering scheme by delivering high levels of detail only in places where it is perceivable to the user. We apply this principle to gigapixel image rendering through adaptive level of detail selection. Additionally, we have developed an acuity-driven tessellation scheme for high-quality Focus-and-Context (F+C) lenses that significantly reduces visual artifacts while accurately capturing the underlying lens function. We demonstrate this framework on the Reality Deck, an immersive gigapixel display. We present the results of a user study designed to quantify the impact of our acuity-driven rendering optimizations in the visual exploration process. We discovered no evidence suggesting a difference in search task performance between our framework and naive rendering of gigapixel resolution data, while realizing significant benefits in terms of data transfer overhead. Additionally, we show that our acuity-driven tessellation scheme offers substantially increased frame rates when compared to naive pre-tessellation, while providing indistinguishable image quality.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a framework for acuity-driven visualization of super-high resolution image data on gigapixel displays. Tiled display walls offer a large workspace that can be navigated physically by the user. Based on head tracking information, the physical characteristics of the tiled display and the formulation of visual acuity, we guide an out-of-core gigapixel rendering scheme by delivering high levels of detail only in places where it is perceivable to the user. We apply this principle to gigapixel image rendering through adaptive level of detail selection. Additionally, we have developed an acuity-driven tessellation scheme for high-quality Focus-and-Context (F+C) lenses that significantly reduces visual artifacts while accurately capturing the underlying lens function. We demonstrate this framework on the Reality Deck, an immersive gigapixel display. We present the results of a user study designed to quantify the impact of our acuity-driven rendering optimizations in the visual exploration process. We discovered no evidence suggesting a difference in search task performance between our framework and naive rendering of gigapixel resolution data, while realizing significant benefits in terms of data transfer overhead. Additionally, we show that our acuity-driven tessellation scheme offers substantially increased frame rates when compared to naive pre-tessellation, while providing indistinguishable image quality.", "title": "Acuity-Driven Gigapixel Visualization", "normalizedTitle": "Acuity-Driven Gigapixel Visualization", "fno": "ttg2013122886", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Lenses", "Rendering Computer Graphics", "Pixels", "Data Visualization", "Image Resolution", "Context Awareness", "Reality Deck", "Lenses", "Rendering Computer Graphics", "Pixels", "Data Visualization", "Image Resolution", "Context Awareness", "Gigapixel Display", "Gigapixel Visualization", "Visual Acuity", "Focus And Context" ], "authors": [ { "givenName": "Charilaos", "surname": "Papadopoulos", "fullName": "Charilaos Papadopoulos", "affiliation": "Stony Brook Univ., Stony Brook, NY, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Arie E.", "surname": "Kaufman", "fullName": "Arie E. Kaufman", "affiliation": "Stony Brook Univ., Stony Brook, NY, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2013-12-01 00:00:00", "pubType": "trans", "pages": "2886-2895", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icitcs/2014/6541/0/07021710", "title": "A Height-Map Based Terrain Rendering with Tessellation Hardware", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2014/07021710/12OmNBE7Ms6", "parentPublication": { "id": "proceedings/icitcs/2014/6541/0", "title": "2014 International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrais/1996/7295/0/72950103", "title": "Gaze-directed Adaptive Rendering for Interacting with Virtual Space", "doi": null, "abstractUrl": "/proceedings-article/vrais/1996/72950103/12OmNBKW9z2", "parentPublication": { "id": "proceedings/vrais/1996/7295/0", "title": "Virtual Reality Annual International Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2011/707/0/05753115", "title": "Gigapixel Computational Imaging", "doi": null, "abstractUrl": "/proceedings-article/iccp/2011/05753115/12OmNzgwmPd", "parentPublication": { "id": "proceedings/iccp/2011/707/0", "title": "IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/01/mcg2015010033", "title": "The Reality Deck--an Immersive Gigapixel Display", "doi": null, "abstractUrl": "/magazine/cg/2015/01/mcg2015010033/13rRUzphDsw", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600e643", "title": "Speed up Object Detection on Gigapixel-level Images with Patch Arrangement", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600e643/1H1j2J77tJe", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2019/9214/0/921400a426", "title": "Gigapixel-Level Image Crowd Counting using Csrnet", "doi": null, "abstractUrl": "/proceedings-article/icmew/2019/921400a426/1cJ0DlWmsak", "parentPublication": { "id": "proceedings/icmew/2019/9214/0", "title": "2019 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2020/5230/0/09105244", "title": "Multiscale-VR: Multiscale Gigapixel 3D Panoramic Videography for Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/iccp/2020/09105244/1kkJTsDwBtS", "parentPublication": { "id": "proceedings/iccp/2020/5230/0", "title": "2020 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d265", "title": "PANDA: A Gigapixel-Level Human-Centric Video Dataset", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d265/1m3nTNCstNe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/11/09523836", "title": "Foveated Photon Mapping", "doi": null, "abstractUrl": "/journal/tg/2021/11/09523836/1wpquR1qr1S", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09547729", "title": "GigaMVS: A Benchmark for Ultra-Large-Scale Gigapixel-Level 3D Reconstruction", "doi": null, "abstractUrl": "/journal/tp/2022/11/09547729/1x9Tv1542aY", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013122878", "articleId": "13rRUwbs2gt", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013122896", "articleId": "13rRUwvT9gt", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrkBwzc", "title": "Nov.-Dec.", "year": "2012", "issueNum": "06", "idPrefix": "cs", "pubType": "magazine", "volume": "14", "label": "Nov.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUNvya4V", "doi": "10.1109/MCSE.2012.118", "abstract": "Computational dye advection helps engineers understand fluid dynamics simulations by providing interactive tools that mimic physical experiments.", "abstracts": [ { "abstractType": "Regular", "content": "Computational dye advection helps engineers understand fluid dynamics simulations by providing interactive tools that mimic physical experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Computational dye advection helps engineers understand fluid dynamics simulations by providing interactive tools that mimic physical experiments.", "title": "Dye-Based Flow Visualization", "normalizedTitle": "Dye-Based Flow Visualization", "fno": "mcs2012060080", "hasPdf": true, "idPrefix": "cs", "keywords": [ "Scientific Computing", "Computational Flow Visualization", "Dye Based Flow", "Flow Tracers", "Computational Dye Advection", "Fluid Dynamics" ], "authors": [ { "givenName": "Grzegorz K.", "surname": "Karch", "fullName": "Grzegorz K. Karch", "affiliation": "University of Stuttgart", "__typename": "ArticleAuthorType" }, { "givenName": "Filip", "surname": "Sadlo", "fullName": "Filip Sadlo", "affiliation": "University of Stuttgart", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Weiskopf", "fullName": "Daniel Weiskopf", "affiliation": "University of Stuttgart", "__typename": "ArticleAuthorType" }, { "givenName": "Charles D.", "surname": "Hansen", "fullName": "Charles D. Hansen", "affiliation": "University of Utah", "__typename": "ArticleAuthorType" }, { "givenName": "Guo-Shi", "surname": "Li", "fullName": "Guo-Shi Li", "affiliation": "Exxon Mobil Upstream Research Company", "__typename": "ArticleAuthorType" }, { "givenName": "Thomas", "surname": "Ertl", "fullName": "Thomas Ertl", "affiliation": "University of Stuttgart", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2012-11-01 00:00:00", "pubType": "mags", "pages": "80-86", "year": "2012", "issn": "1521-9615", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/sc/1988/0882/0/00044646", "title": "Interactive scientific visualization and parallel display techniques", "doi": null, "abstractUrl": "/proceedings-article/sc/1988/00044646/12OmNAZx8Me", "parentPublication": { "id": "proceedings/sc/1988/0882/0", "title": "SC Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/27660082", "title": "Texture-Based Visualization of Uncertainty in Flow Fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/27660082/12OmNB9KHue", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2000/6478/0/64780026", "title": "Hardware-Accelerated Texture Advection for Unsteady Flow Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2000/64780026/12OmNCbCrWR", "parentPublication": { "id": "proceedings/ieee-vis/2000/6478/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300031", "title": "3D IBFV: Hardware-Accelerated 3D Flow Visualization", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300031/12OmNrMZpBU", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2003/2030/0/20300018", "title": "Image Space Based Visualization of Unsteady Flow on Surfaces", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2003/20300018/12OmNxH9Xhw", "parentPublication": { "id": "proceedings/ieee-vis/2003/2030/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2010/6685/0/05429599", "title": "Physically-based interactive schlieren flow visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2010/05429599/12OmNyLiuAd", "parentPublication": { "id": "proceedings/pacificvis/2010/6685/0", "title": "2010 IEEE Pacific Visualization Symposium (PacificVis 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532853", "title": "Texture-based visualization of uncertainty in flow fields", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532853/12OmNzXWZGL", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2008/05/ttg2008051067", "title": "Flow Charts: Visualization of Vector Fields on Arbitrary Surfaces", "doi": null, "abstractUrl": "/journal/tg/2008/05/ttg2008051067/13rRUwhpBE3", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2002/03/v0211", "title": "Lagrangian-Eulerian Advection of Noise and Dye Textures for Unsteady Flow Visualization", "doi": null, "abstractUrl": "/journal/tg/2002/03/v0211/13rRUxD9h4X", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2007/01/v0135", "title": "Advections with Significantly Reduced Dissipation and Diffusion", "doi": null, "abstractUrl": "/journal/tg/2007/01/v0135/13rRUygT7y2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcs2012060076", "articleId": "13rRUwgQpym", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcs2012060088", "articleId": "13rRUwh80MH", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrJRP28", "title": "Nov.-Dec.", "year": "2012", "issueNum": "06", "idPrefix": "cg", "pubType": "magazine", "volume": "32", "label": "Nov.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUygT7Ap", "doi": "10.1109/MCG.2012.110", "abstract": "The Beaming project recreates, virtually, a real environment; using immersive VR, remote participants can visit the virtual model and interact with the people in the real environment. The real environment doesn't need extensive equipment and can be a space such as an office or meeting room, domestic environment, or social space.", "abstracts": [ { "abstractType": "Regular", "content": "The Beaming project recreates, virtually, a real environment; using immersive VR, remote participants can visit the virtual model and interact with the people in the real environment. The real environment doesn't need extensive equipment and can be a space such as an office or meeting room, domestic environment, or social space.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The Beaming project recreates, virtually, a real environment; using immersive VR, remote participants can visit the virtual model and interact with the people in the real environment. The real environment doesn't need extensive equipment and can be a space such as an office or meeting room, domestic environment, or social space.", "title": "Beaming: An Asymmetric Telepresence System", "normalizedTitle": "Beaming: An Asymmetric Telepresence System", "fno": "mcg2012060010", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Virtual Environments", "Avatars", "Haptic Interfaces", "Robots", "Human Computer Interaction", "Visualization", "Multimedia Communication", "Virtual Reality", "Collaboration", "Robotics", "Virtual Environments", "Avatars", "Haptic Interfaces", "Robots", "Human Computer Interaction", "Visualization", "Multimedia Communication", "Virtual Reality", "Collaboration", "Computer Graphics", "Beaming", "Collaborative Virtual Reality", "Virtual Reality", "Haptic Devices", "Human Computer Interfaces", "Head Mounted Displays", "Sphere Avatar", "Kinect", "Visualization", "Encountered Type Haptic Devices", "Spatial Interfaces", "Multimedia" ], "authors": [ { "givenName": "A.", "surname": "Steed", "fullName": "A. Steed", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "W.", "surname": "Steptoe", "fullName": "W. Steptoe", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "W.", "surname": "Oyekoya", "fullName": "W. Oyekoya", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "F.", "surname": "Pece", "fullName": "F. Pece", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "T.", "surname": "Weyrich", "fullName": "T. Weyrich", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "J.", "surname": "Kautz", "fullName": "J. Kautz", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "D.", "surname": "Friedman", "fullName": "D. Friedman", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Peer", "fullName": "A. Peer", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Solazzi", "fullName": "M. Solazzi", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "F.", "surname": "Tecchia", "fullName": "F. Tecchia", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Bergamasco", "fullName": "M. Bergamasco", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Slater", "fullName": "M. Slater", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2012-11-01 00:00:00", "pubType": "mags", "pages": "10-17", "year": "2012", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/haptics/2002/1489/0/14890010", "title": "Comparison of Human Haptic Size Identification and Discrimination Performance in Real and Simulated Environments", "doi": null, "abstractUrl": "/proceedings-article/haptics/2002/14890010/12OmNBzRNrh", "parentPublication": { "id": "proceedings/haptics/2002/1489/0", "title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vs-games/2017/5812/0/08056613", "title": "An immersive virtual environment for collaborative geovisualization", "doi": null, "abstractUrl": "/proceedings-article/vs-games/2017/08056613/12OmNvCi45l", "parentPublication": { "id": "proceedings/vs-games/2017/5812/0", "title": "2017 9th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892233", "title": "VRRobot: Robot actuated props in an infinite virtual environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892233/12OmNwkhTh6", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223402", "title": "Collaborative telepresence workspaces for space operation and science", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223402/12OmNyvY9ys", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/04/08283639", "title": "The Critical Role of Self-Contact for Embodiment in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2018/04/08283639/13rRUwI5TXC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/11/08466636", "title": "Superman vs Giant: A Study on Spatial Perception for a Multi-Scale Mixed Reality Flying Telepresence Interface", "doi": null, "abstractUrl": "/journal/tg/2018/11/08466636/14M3DZXcLXa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2022/5325/0/532500a576", "title": "Leaning-Based Control of an Immersive-Telepresence Robot", "doi": null, "abstractUrl": "/proceedings-article/ismar/2022/532500a576/1JrR64XrANW", "parentPublication": { "id": "proceedings/ismar/2022/5325/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797716", "title": "Reconciling Being in-Control vs. Being Helped for the Execution of Complex Movements in VR", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797716/1cJ1dFOKU3m", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089660", "title": "Determining Peripersonal Space Boundaries and Their Plasticity in Relation to Object and Agent Characteristics in an Immersive Virtual Environment", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089660/1jIxcGTb5Dy", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a052", "title": "Assessing Telepresence, Social Presence and Stress Response in a Virtual Reality Store", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a052/1yfxI1Gbi4o", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2012060006", "articleId": "13rRUxBrGjs", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2012060018", "articleId": "13rRUxZRbre", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvvc5OL", "title": "April", "year": "2013", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUzp02ok", "doi": "10.1109/TVCG.2013.33", "abstract": "We present a novel immersive telepresence system that allows distributed groups of users to meet in a shared virtual 3D world. Our approach is based on two coupled projection-based multi-user setups, each providing multiple users with perspectively correct stereoscopic images. At each site the users and their local interaction space are continuously captured using a cluster of registered depth and color cameras. The captured 3D information is transferred to the respective other location, where the remote participants are virtually reconstructed. We explore the use of these virtual user representations in various interaction scenarios in which local and remote users are face-to-face, side-by-side or decoupled. Initial experiments with distributed user groups indicate the mutual understanding of pointing and tracing gestures independent of whether they were performed by local or remote participants. Our users were excited about the new possibilities of jointly exploring a virtual city, where they relied on a world-in-miniature metaphor for mutual awareness of their respective locations.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel immersive telepresence system that allows distributed groups of users to meet in a shared virtual 3D world. Our approach is based on two coupled projection-based multi-user setups, each providing multiple users with perspectively correct stereoscopic images. At each site the users and their local interaction space are continuously captured using a cluster of registered depth and color cameras. The captured 3D information is transferred to the respective other location, where the remote participants are virtually reconstructed. We explore the use of these virtual user representations in various interaction scenarios in which local and remote users are face-to-face, side-by-side or decoupled. Initial experiments with distributed user groups indicate the mutual understanding of pointing and tracing gestures independent of whether they were performed by local or remote participants. Our users were excited about the new possibilities of jointly exploring a virtual city, where they relied on a world-in-miniature metaphor for mutual awareness of their respective locations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel immersive telepresence system that allows distributed groups of users to meet in a shared virtual 3D world. Our approach is based on two coupled projection-based multi-user setups, each providing multiple users with perspectively correct stereoscopic images. At each site the users and their local interaction space are continuously captured using a cluster of registered depth and color cameras. The captured 3D information is transferred to the respective other location, where the remote participants are virtually reconstructed. We explore the use of these virtual user representations in various interaction scenarios in which local and remote users are face-to-face, side-by-side or decoupled. Initial experiments with distributed user groups indicate the mutual understanding of pointing and tracing gestures independent of whether they were performed by local or remote participants. Our users were excited about the new possibilities of jointly exploring a virtual city, where they relied on a world-in-miniature metaphor for mutual awareness of their respective locations.", "title": "Immersive Group-to-Group Telepresence", "normalizedTitle": "Immersive Group-to-Group Telepresence", "fno": "ttg2013040616", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Calibration", "Cameras", "Servers", "Streaming Media", "Image Reconstruction", "Image Color Analysis", "Virtual Reality", "3 D Capture", "Multi User Virtual Reality", "Telepresence" ], "authors": [ { "givenName": "S.", "surname": "Beck", "fullName": "S. Beck", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Kunert", "fullName": "A. Kunert", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Kulik", "fullName": "A. Kulik", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": "B.", "surname": "Froehlich", "fullName": "B. Froehlich", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2013-04-01 00:00:00", "pubType": "trans", "pages": "616-625", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismar-adjunct/2017/6327/0/6327a218", "title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh", "parentPublication": { "id": "proceedings/ismar-adjunct/2017/6327/0", "title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2017/6549/0/07966773", "title": "RGB-D Camera Network Calibration and Streaming for 3D Telepresence in Large Environment", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2017/07966773/12OmNzcPADw", "parentPublication": { "id": "proceedings/bigmm/2017/6549/0", "title": "2017 IEEE Third International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2014/4311/0/4311a175", "title": "An Immersive Telepresence System Using a Real-Time Omnidirectional Camera and a Virtual Reality Head-Mounted Display", "doi": null, "abstractUrl": "/proceedings-article/ism/2014/4311a175/12OmNzn38Pl", "parentPublication": { "id": "proceedings/ism/2014/4311/0", "title": "2014 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/03/07792698", "title": "JackIn Head: Immersive Visual Telepresence System with Omnidirectional Wearable Camera", "doi": null, "abstractUrl": "/journal/tg/2017/03/07792698/13rRUx0geq0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2014/07/mco2014070046", "title": "Immersive 3D Telepresence", "doi": null, "abstractUrl": "/magazine/co/2014/07/mco2014070046/13rRUy0ZzW3", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a524", "title": "Synthesizing Novel Spaces for Remote Telepresence Experiences", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wevr/2019/4050/0/08809591", "title": "Immersive Gastronomic Experience with Distributed Reality", "doi": null, "abstractUrl": "/proceedings-article/wevr/2019/08809591/1cI62dVXsB2", "parentPublication": { "id": "proceedings/wevr/2019/4050/0", "title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090433", "title": "Virtual Tour: An Immersive Low Cost Telepresence System", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/07/09257094", "title": "Output-Sensitive Avatar Representations for Immersive Telepresence", "doi": null, "abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2021/1298/0/129800a451", "title": "The Owl: Immersive Telepresence Communication for Hybrid Conferences", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a451/1yeQG4fi6Dm", "parentPublication": { "id": "proceedings/ismar-adjunct/2021/1298/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013040606", "articleId": "13rRUILLkvq", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013040626", "articleId": "13rRUwd9CG2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXnFvO", "name": "ttg2013040616s1.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040616s1.mp4", "extension": "mp4", "size": "26.3 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvA1hrM", "title": "May/June", "year": "2007", "issueNum": "03", "idPrefix": "cg", "pubType": "magazine", "volume": "27", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUy3gmZO", "doi": "10.1109/MCG.2007.66", "abstract": "A novel texture synthesis method produces high-quality results by introducing a multiscaled texture similarity measurement. Compared with other multiscaled methods, this approach focuses on measuring texture properties at different scales ranging from local to global using an adaptive similarity metric that accounts for texture variations across different image regions.", "abstracts": [ { "abstractType": "Regular", "content": "A novel texture synthesis method produces high-quality results by introducing a multiscaled texture similarity measurement. Compared with other multiscaled methods, this approach focuses on measuring texture properties at different scales ranging from local to global using an adaptive similarity metric that accounts for texture variations across different image regions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A novel texture synthesis method produces high-quality results by introducing a multiscaled texture similarity measurement. Compared with other multiscaled methods, this approach focuses on measuring texture properties at different scales ranging from local to global using an adaptive similarity metric that accounts for texture variations across different image regions.", "title": "Multiscaled Texture Synthesis Using Multisized Pixel Neighborhoods", "normalizedTitle": "Multiscaled Texture Synthesis Using Multisized Pixel Neighborhoods", "fno": "mcg2007030041", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Texture Synthesis", "Texture Mapping", "Image Based Rendering", "Image Processing" ], "authors": [ { "givenName": "Feng", "surname": "Dong", "fullName": "Feng Dong", "affiliation": "Brunel University", "__typename": "ArticleAuthorType" }, { "givenName": "Xujiong", "surname": "Ye", "fullName": "Xujiong Ye", "affiliation": "Medicsight", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2007-05-01 00:00:00", "pubType": "mags", "pages": "41-47", "year": "2007", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/isise/2008/3494/2/3494b516", "title": "A Mapping Technique Based on Texture Synthesis", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494b516/12OmNC1Gufa", "parentPublication": { "id": "proceedings/isise/2008/3494/2", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2009/3789/0/3789a018", "title": "Skeletal Texture Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2009/3789a018/12OmNwIHopo", "parentPublication": { "id": "proceedings/cgiv/2009/3789/0", "title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isccs/2011/4443/0/4443a287", "title": "Fast Texture Synthesis Using Feature Matching", "doi": null, "abstractUrl": "/proceedings-article/isccs/2011/4443a287/12OmNyO8tOn", "parentPublication": { "id": "proceedings/isccs/2011/4443/0", "title": "Computer Science and Society, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/3/81833186", "title": "Image replacement through texture synthesis", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81833186/12OmNyrIatD", "parentPublication": { "id": "proceedings/icip/1997/8183/3", "title": "Proceedings of International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/1999/0040/0/00401055", "title": "Texture Extraction from Photographs and Rendering with Dynamic Texture Mapping", "doi": null, "abstractUrl": "/proceedings-article/iciap/1999/00401055/12OmNz61drx", "parentPublication": { "id": "proceedings/iciap/1999/0040/0", "title": "Image Analysis and Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isvd/2009/3781/0/3781a165", "title": "Feature-Based Texture Synthesis and Editing Using Voronoi Diagrams", "doi": null, "abstractUrl": "/proceedings-article/isvd/2009/3781a165/12OmNzRHOSU", "parentPublication": { "id": "proceedings/isvd/2009/3781/0", "title": "2009 Sixth International Symposium on Voronoi Diagrams", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/3/73103648", "title": "Pyramid-based texture analysis/synthesis", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73103648/12OmNznkKfg", "parentPublication": { "id": "proceedings/icip/1995/7310/3", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2003/04/v0512", "title": "Texture Synthesis for 3D Shape Representation", "doi": null, "abstractUrl": "/journal/tg/2003/04/v0512/13rRUEgarsB", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2001/02/v0120", "title": "Texture Mixing and Texture Movie Synthesis Using Statistical Learning", "doi": null, "abstractUrl": "/journal/tg/2001/02/v0120/13rRUwbaqLn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/04/ttp2010040733", "title": "Texture Synthesis with Grouplets", "doi": null, "abstractUrl": "/journal/tp/2010/04/ttp2010040733/13rRUx0xQ0D", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2007030018", "articleId": "13rRUwx1xJN", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2007030057", "articleId": "13rRUwhpBIo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvvc5OL", "title": "April", "year": "2013", "issueNum": "04", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "April", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgs2tr", "doi": "10.1109/TVCG.2013.55", "abstract": "The publication offers a note of thanks and lists its reviewers.", "abstracts": [ { "abstractType": "Regular", "content": "The publication offers a note of thanks and lists its reviewers.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The publication offers a note of thanks and lists its reviewers.", "title": "Paper reviewers", "normalizedTitle": "Paper reviewers", "fno": "ttg2013040000x", "hasPdf": true, "idPrefix": "tg", "keywords": [], "authors": [], "replicability": null, "showBuyMe": false, "showRecommendedArticles": false, "isOpenAccess": true, "issueNum": "04", "pubDate": "2013-04-01 00:00:00", "pubType": "trans", "pages": "x-xi", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [], "adjacentArticles": { "previous": { "fno": "ttg2013040000ix", "articleId": "13rRUyYSWsV", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013040000xii", "articleId": "13rRUILLkvp", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvGPE8n", "title": "Jan.", "year": "2016", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "22", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwInvBa", "doi": "10.1109/TVCG.2015.2467771", "abstract": "We propose TrajGraph, a new visual analytics method, for studying urban mobility patterns by integrating graph modeling and visual analysis with taxi trajectory data. A special graph is created to store and manifest real traffic information recorded by taxi trajectories over city streets. It conveys urban transportation dynamics which can be discovered by applying graph analysis algorithms. To support interactive, multiscale visual analytics, a graph partitioning algorithm is applied to create region-level graphs which have smaller size than the original street-level graph. Graph centralities, including Pagerank and betweenness, are computed to characterize the time-varying importance of different urban regions. The centralities are visualized by three coordinated views including a node-link graph view, a map view and a temporal information view. Users can interactively examine the importance of streets to discover and assess city traffic patterns. We have implemented a fully working prototype of this approach and evaluated it using massive taxi trajectories of Shenzhen, China. TrajGraph's capability in revealing the importance of city streets was evaluated by comparing the calculated centralities with the subjective evaluations from a group of drivers in Shenzhen. Feedback from a domain expert was collected. The effectiveness of the visual interface was evaluated through a formal user study. We also present several examples and a case study to demonstrate the usefulness of TrajGraph in urban transportation analysis.", "abstracts": [ { "abstractType": "Regular", "content": "We propose TrajGraph, a new visual analytics method, for studying urban mobility patterns by integrating graph modeling and visual analysis with taxi trajectory data. A special graph is created to store and manifest real traffic information recorded by taxi trajectories over city streets. It conveys urban transportation dynamics which can be discovered by applying graph analysis algorithms. To support interactive, multiscale visual analytics, a graph partitioning algorithm is applied to create region-level graphs which have smaller size than the original street-level graph. Graph centralities, including Pagerank and betweenness, are computed to characterize the time-varying importance of different urban regions. The centralities are visualized by three coordinated views including a node-link graph view, a map view and a temporal information view. Users can interactively examine the importance of streets to discover and assess city traffic patterns. We have implemented a fully working prototype of this approach and evaluated it using massive taxi trajectories of Shenzhen, China. TrajGraph's capability in revealing the importance of city streets was evaluated by comparing the calculated centralities with the subjective evaluations from a group of drivers in Shenzhen. Feedback from a domain expert was collected. The effectiveness of the visual interface was evaluated through a formal user study. We also present several examples and a case study to demonstrate the usefulness of TrajGraph in urban transportation analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose TrajGraph, a new visual analytics method, for studying urban mobility patterns by integrating graph modeling and visual analysis with taxi trajectory data. A special graph is created to store and manifest real traffic information recorded by taxi trajectories over city streets. It conveys urban transportation dynamics which can be discovered by applying graph analysis algorithms. To support interactive, multiscale visual analytics, a graph partitioning algorithm is applied to create region-level graphs which have smaller size than the original street-level graph. Graph centralities, including Pagerank and betweenness, are computed to characterize the time-varying importance of different urban regions. The centralities are visualized by three coordinated views including a node-link graph view, a map view and a temporal information view. Users can interactively examine the importance of streets to discover and assess city traffic patterns. We have implemented a fully working prototype of this approach and evaluated it using massive taxi trajectories of Shenzhen, China. TrajGraph's capability in revealing the importance of city streets was evaluated by comparing the calculated centralities with the subjective evaluations from a group of drivers in Shenzhen. Feedback from a domain expert was collected. The effectiveness of the visual interface was evaluated through a formal user study. We also present several examples and a case study to demonstrate the usefulness of TrajGraph in urban transportation analysis.", "title": "TrajGraph: A Graph-Based Visual Analytics Approach to Studying Urban Network Centralities Using Taxi Trajectory Data", "normalizedTitle": "TrajGraph: A Graph-Based Visual Analytics Approach to Studying Urban Network Centralities Using Taxi Trajectory Data", "fno": "07192687", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graph Theory", "Interactive Systems", "Public Transport", "Road Traffic", "Town And Country Planning", "Traffic Information Systems", "Graph Based Visual Analytics Approach", "Urban Network Centralities", "Traj Graph", "Urban Mobility Patterns", "Graph Modeling", "Visual Analysis", "Taxi Trajectory Data", "Real Traffic Information", "City Streets", "Urban Transportation Dynamics", "Graph Analysis Algorithms", "Interactive Multiscale Visual Analytics", "Graph Partitioning Algorithm", "Region Level Graphs", "Street Level Graph", "Graph Centralities", "Pagerank", "Betweenness", "Urban Regions", "Node Link Graph View", "Map View", "Temporal Information View", "City Traffic Patterns", "Shenzhen", "China", "Visual Interface", "Urban Transportation Analysis", "Urban Areas", "Public Transportation", "Roads", "Trajectory", "Visual Analytics", "Graph Based Visual Analytics", "Centrality", "Taxi Trajectories", "Urban Network", "Transportation Assessment", "Graph Based Visual Analytics", "Centrality", "Taxi Trajectories", "Urban Network", "Transportation Assessment" ], "authors": [ { "givenName": "Xiaoke", "surname": "Huang", "fullName": "Xiaoke Huang", "affiliation": "Department of Computer Science, Kent State University", "__typename": "ArticleAuthorType" }, { "givenName": "Ye", "surname": "Zhao", "fullName": "Ye Zhao", "affiliation": "Department of Computer Science, Kent State University", "__typename": "ArticleAuthorType" }, { "givenName": "Chao", "surname": "Ma", "fullName": "Chao Ma", "affiliation": "Department of Computer Science, Kent State University", "__typename": "ArticleAuthorType" }, { "givenName": "Jing", "surname": "Yang", "fullName": "Jing Yang", "affiliation": "Department of Computer Science, University of North Carolina at Charlotte", "__typename": "ArticleAuthorType" }, { "givenName": "Xinyue", "surname": "Ye", "fullName": "Xinyue Ye", "affiliation": "Department of Geography, Kent State University", "__typename": "ArticleAuthorType" }, { "givenName": "Chong", "surname": "Zhang", "fullName": "Chong Zhang", "affiliation": "Department of Computer Science, University of North Carolina at Charlotte", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2016-01-01 00:00:00", "pubType": "trans", "pages": "160-169", "year": "2016", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/uic-atc-scalcom/2015/7211/0/07518255", "title": "Discovering Urban Social Functional Regions Using Taxi Trajectories", "doi": null, "abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518255/12OmNwpXRXJ", "parentPublication": { "id": "proceedings/uic-atc-scalcom/2015/7211/0", "title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2017/1600/0/1600a615", "title": "Detecting Congestion and Detour of Taxi Trip via GPS Data", "doi": null, "abstractUrl": "/proceedings-article/dsc/2017/1600a615/12OmNybfr4x", "parentPublication": { "id": "proceedings/dsc/2017/1600/0", "title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2016/0883/1/0883a318", "title": "Understanding Urban Mobility via Taxi Trip Clustering", "doi": null, "abstractUrl": "/proceedings-article/mdm/2016/0883a318/12OmNzvQI7o", "parentPublication": { "id": "proceedings/mdm/2016/0883/1", "title": "2016 17th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08257968", "title": "Detecting unmetered taxi rides from trajectory data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08257968/17D45WODapu", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2018/7449/0/744900a864", "title": "Co-Ride: Collaborative Preference-Based Taxi-Sharing and Taxi-Dispatch", "doi": null, "abstractUrl": "/proceedings-article/ictai/2018/744900a864/17D45WWzW3R", "parentPublication": { "id": "proceedings/ictai/2018/7449/0", "title": "2018 IEEE 30th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hase/2019/8540/0/854000a009", "title": "Towards an Efficient Cyber-Physical System for First-Mile Taxi Transit in Urban Complex", "doi": null, "abstractUrl": "/proceedings-article/hase/2019/854000a009/18IoXQezeA8", "parentPublication": { "id": "proceedings/hase/2019/8540/0", "title": "2019 IEEE 19th International Symposium on High Assurance Systems Engineering (HASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2019/9226/0/922600a174", "title": "Visual Analytics of Taxi Trajectory Data via Topical Sub-trajectories", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2019/922600a174/1cMF7meccAo", "parentPublication": { "id": "proceedings/pacificvis/2019/9226/0", "title": "2019 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2020/2903/0/09101623", "title": "Mobility-Aware Dynamic Taxi Ridesharing", "doi": null, "abstractUrl": "/proceedings-article/icde/2020/09101623/1kaMziiyYz6", "parentPublication": { "id": "proceedings/icde/2020/2903/0", "title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2020/7303/0/730300b719", "title": "Taxi Demand Prediction using an LSTM-Based Deep Sequence Model and Points of Interest", "doi": null, "abstractUrl": "/proceedings-article/compsac/2020/730300b719/1nkDqaA4mfC", "parentPublication": { "id": "proceedings/compsac/2020/7303/0", "title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdacs/2021/2561/0/256100a039", "title": "Prediction and Detection of Urban Trajectory Using Data Mining and Deep Neural Network", "doi": null, "abstractUrl": "/proceedings-article/bdacs/2021/256100a039/1wiRueah6Du", "parentPublication": { "id": "proceedings/bdacs/2021/2561/0", "title": "2021 International Conference on Big Data Analysis and Computer Science (BDACS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07192719", "articleId": "13rRUwfZBVo", "__typename": "AdjacentArticleType" }, "next": { "fno": "07192706", "articleId": "13rRUwvBy8V", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXFgDD", "name": "ttg201601-07192687s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201601-07192687s1.zip", "extension": "zip", "size": "34 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNzayN2h", "title": "June", "year": "2016", "issueNum": "06", "idPrefix": "td", "pubType": "journal", "volume": "27", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxYINeY", "doi": "10.1109/TPDS.2015.2467392", "abstract": "Mobile advertising in vehicular networks is of great interest with which timely information can be fast spread into the network. Given a limited budget for hiring seed vehicles, how to achieve the maximum advertising coverage within a given period of time is NP-hard. In this paper, we propose an innovative scheme, POST, for mobile advertising in vehicular networks. The POST design is based on two key observations we have found by analyzing three large-scale vehicular traces. First, vehicles demonstrate dynamic sociality in the network; second, such vehicular sociality has strong temporal correlations. With the knowledge, POST uses Markov chains to infer future vehicular sociality and adopts two greedy heuristics to select the most “centric” vehicles as seeds for mobile advertising. Extensive simulations based on three real data sets of taxi and bus traces have been carried out. The results show that POST can greatly improve the coverage and the intensity of advertising. For all the three involved data sets, it achieves an average gain of 64 percent comparing with the state-of-art schemes.", "abstracts": [ { "abstractType": "Regular", "content": "Mobile advertising in vehicular networks is of great interest with which timely information can be fast spread into the network. Given a limited budget for hiring seed vehicles, how to achieve the maximum advertising coverage within a given period of time is NP-hard. In this paper, we propose an innovative scheme, POST, for mobile advertising in vehicular networks. The POST design is based on two key observations we have found by analyzing three large-scale vehicular traces. First, vehicles demonstrate dynamic sociality in the network; second, such vehicular sociality has strong temporal correlations. With the knowledge, POST uses Markov chains to infer future vehicular sociality and adopts two greedy heuristics to select the most “centric” vehicles as seeds for mobile advertising. Extensive simulations based on three real data sets of taxi and bus traces have been carried out. The results show that POST can greatly improve the coverage and the intensity of advertising. For all the three involved data sets, it achieves an average gain of 64 percent comparing with the state-of-art schemes.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Mobile advertising in vehicular networks is of great interest with which timely information can be fast spread into the network. Given a limited budget for hiring seed vehicles, how to achieve the maximum advertising coverage within a given period of time is NP-hard. In this paper, we propose an innovative scheme, POST, for mobile advertising in vehicular networks. The POST design is based on two key observations we have found by analyzing three large-scale vehicular traces. First, vehicles demonstrate dynamic sociality in the network; second, such vehicular sociality has strong temporal correlations. With the knowledge, POST uses Markov chains to infer future vehicular sociality and adopts two greedy heuristics to select the most “centric” vehicles as seeds for mobile advertising. Extensive simulations based on three real data sets of taxi and bus traces have been carried out. The results show that POST can greatly improve the coverage and the intensity of advertising. For all the three involved data sets, it achieves an average gain of 64 percent comparing with the state-of-art schemes.", "title": "POST: Exploiting Dynamic Sociality for Mobile Advertising in Vehicular Networks", "normalizedTitle": "POST: Exploiting Dynamic Sociality for Mobile Advertising in Vehicular Networks", "fno": "07192632", "hasPdf": true, "idPrefix": "td", "keywords": [ "Vehicles", "Advertising", "Mobile Communication", "Public Transportation", "Mobile Computing", "Social Network Services", "Correlation", "Social Network Analysis", "Vehicular Networks", "Mobile Advertising", "Dynamic Sociality", "Social Network Analysis", "Vehicular Networks", "Mobile Advertising", "Dynamic Sociality" ], "authors": [ { "givenName": "Jun", "surname": "Qin", "fullName": "Jun Qin", "affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Hongzi", "surname": "Zhu", "fullName": "Hongzi Zhu", "affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Yanmin", "surname": "Zhu", "fullName": "Yanmin Zhu", "affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Li", "surname": "Lu", "fullName": "Li Lu", "affiliation": "School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Guangtao", "surname": "Xue", "fullName": "Guangtao Xue", "affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, P. R. China", "__typename": "ArticleAuthorType" }, { "givenName": "Minglu", "surname": "Li", "fullName": "Minglu Li", "affiliation": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, P. R. China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2016-06-01 00:00:00", "pubType": "trans", "pages": "1770-1782", "year": "2016", "issn": "1045-9219", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icpads/2016/4457/0/4457a354", "title": "TaxiCast: Efficient Broadcasting of Multimedia Advertisements in Vehicular Ad-Hoc Networks", "doi": null, "abstractUrl": "/proceedings-article/icpads/2016/4457a354/12OmNCgrCUR", "parentPublication": { "id": "proceedings/icpads/2016/4457/0", "title": "2016 IEEE 22nd International Conference on Parallel and Distributed Systems (ICPADS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/gpc-workshops/2008/3177/0/3177a164", "title": "SmartMobile-AD: An Intelligent Mobile Advertising System", "doi": null, "abstractUrl": "/proceedings-article/gpc-workshops/2008/3177a164/12OmNzUPpih", "parentPublication": { "id": "proceedings/gpc-workshops/2008/3177/0", "title": "GPC Workshops - 2008 3rd International Conference on Grid and Pervasive Computing Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2017/3835/0/3835b183", "title": "An Automatic Approach for Transit Advertising in Public Transportation Systems", "doi": null, "abstractUrl": "/proceedings-article/icdm/2017/3835b183/12OmNzsrwnF", "parentPublication": { "id": "proceedings/icdm/2017/3835/0", "title": "2017 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2015/0329/0/0329a079", "title": "Towards Secure and Practical Targeted Mobile Advertising", "doi": null, "abstractUrl": "/proceedings-article/msn/2015/0329a079/12OmNzuZUr7", "parentPublication": { "id": "proceedings/msn/2015/0329/0", "title": "2015 11th International Conference on Mobile Ad-hoc and Sensor Networks (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/sc/2020/03/07909009", "title": "A Practical System for Privacy-Aware Targeted Mobile Advertising Services", "doi": null, "abstractUrl": "/journal/sc/2020/03/07909009/13rRUwjGoDF", "parentPublication": { "id": "trans/sc", "title": "IEEE Transactions on Services Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2015/01/06747403", "title": "TMC: Exploiting Trajectories for Multicast in Sparse Vehicular Networks", "doi": null, "abstractUrl": "/journal/td/2015/01/06747403/13rRUxNW1YX", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2014/12/06714420", "title": "Exploiting Trajectory-Based Coverage for Geocast in Vehicular Networks", "doi": null, "abstractUrl": "/journal/td/2014/12/06714420/13rRUyeTVhK", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tm/2018/12/08345228", "title": "Scheduling Advertisement Delivery in Vehicular Networks", "doi": null, "abstractUrl": "/journal/tm/2018/12/08345228/17D45WHONiS", "parentPublication": { "id": "trans/tm", "title": "IEEE Transactions on Mobile Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2017/4338/0/07917508", "title": "Vehicular fog computing: Vision and challenges", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2017/07917508/19wAJISMW64", "parentPublication": { "id": "proceedings/percom-workshops/2017/4338/0", "title": "2017 IEEE International Conference on Pervasive Computing and Communications: Workshops (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/2022/05/09320611", "title": "DMP: Content Delivery With Dynamic Movement Pattern in Vehicular Networks", "doi": null, "abstractUrl": "/journal/bd/2022/05/09320611/1qkwm61R732", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07161378", "articleId": "13rRUxC0SvJ", "__typename": "AdjacentArticleType" }, "next": { "fno": "07152926", "articleId": "13rRUyeCka2", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1EOzVCQBwyI", "title": "Aug.", "year": "2022", "issueNum": "08", "idPrefix": "tk", "pubType": "journal", "volume": "34", "label": "Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1nwbbtaE7N6", "doi": "10.1109/TKDE.2020.3026235", "abstract": "With widespread deployment of GPS devices, massive spatiotemporal trajectories became more accessible. This booming trend paved the solid data ground for researchers to discover the regularities or patterns of human mobility. However, there are still three challenges in semantic pattern extraction including semantic absence, semantic bias and semantic complexity. In this paper, we invent and apply a novel data structure namely <italic>City Semantic Diagram</italic> to overcome above three challenges. First, our approach resolves semantic absence by exactly identifying semantic behaviours from raw trajectories. Second, the design of semantic purification helps us to detect semantic complexity from human mobility. Third, we avoid semantic bias using objective data source such as ubiquitous GPS trajectories. Comprehensive and massive experiments have been conducted based on real taxi trajectories and points of interest in Shanghai. Compared with existing approaches, <italic>City Semantic Diagram</italic> is able to discover fine-grained semantic patterns effectively and accurately.", "abstracts": [ { "abstractType": "Regular", "content": "With widespread deployment of GPS devices, massive spatiotemporal trajectories became more accessible. This booming trend paved the solid data ground for researchers to discover the regularities or patterns of human mobility. However, there are still three challenges in semantic pattern extraction including semantic absence, semantic bias and semantic complexity. In this paper, we invent and apply a novel data structure namely <italic>City Semantic Diagram</italic> to overcome above three challenges. First, our approach resolves semantic absence by exactly identifying semantic behaviours from raw trajectories. Second, the design of semantic purification helps us to detect semantic complexity from human mobility. Third, we avoid semantic bias using objective data source such as ubiquitous GPS trajectories. Comprehensive and massive experiments have been conducted based on real taxi trajectories and points of interest in Shanghai. Compared with existing approaches, <italic>City Semantic Diagram</italic> is able to discover fine-grained semantic patterns effectively and accurately.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "With widespread deployment of GPS devices, massive spatiotemporal trajectories became more accessible. This booming trend paved the solid data ground for researchers to discover the regularities or patterns of human mobility. However, there are still three challenges in semantic pattern extraction including semantic absence, semantic bias and semantic complexity. In this paper, we invent and apply a novel data structure namely City Semantic Diagram to overcome above three challenges. First, our approach resolves semantic absence by exactly identifying semantic behaviours from raw trajectories. Second, the design of semantic purification helps us to detect semantic complexity from human mobility. Third, we avoid semantic bias using objective data source such as ubiquitous GPS trajectories. Comprehensive and massive experiments have been conducted based on real taxi trajectories and points of interest in Shanghai. Compared with existing approaches, City Semantic Diagram is able to discover fine-grained semantic patterns effectively and accurately.", "title": "Extract Human Mobility Patterns Powered by City Semantic Diagram", "normalizedTitle": "Extract Human Mobility Patterns Powered by City Semantic Diagram", "fno": "09208690", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Semantics", "Trajectory", "Global Positioning System", "Urban Areas", "Clustering Algorithms", "Data Mining", "Public Transportation", "Human Mobility", "Fine Grained Semantic Pattern", "GPS Trajectory", "Point Of Interest" ], "authors": [ { "givenName": "Zhangqing", "surname": "Shan", "fullName": "Zhangqing Shan", "affiliation": "School of Computer Science, Fudan University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Weiwei", "surname": "Sun", "fullName": "Weiwei Sun", "affiliation": "School of Computer Science, Fudan University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Baihua", "surname": "Zheng", "fullName": "Baihua Zheng", "affiliation": "School of Information Systems, Singapore Management University, Singapore", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "08", "pubDate": "2022-08-01 00:00:00", "pubType": "trans", "pages": "3765-3778", "year": "2022", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/mdm/2016/0883/1/0883a369", "title": "BusesinRio: Buses as Mobile Traffic Sensors: Managing the Bus GPS Data in the City of Rio de Janeiro", "doi": null, "abstractUrl": "/proceedings-article/mdm/2016/0883a369/12OmNB06l9J", "parentPublication": { "id": "proceedings/mdm/2016/0883/1", "title": "2016 17th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccbd/2016/3555/0/3555a019", "title": "When Taxi Meets Bus: Night Bus Stop Planning over Large-Scale Traffic Data", "doi": null, "abstractUrl": "/proceedings-article/ccbd/2016/3555a019/12OmNvA1hzb", "parentPublication": { "id": "proceedings/ccbd/2016/3555/0", "title": "2016 7th International Conference on Cloud Computing and Big Data (CCBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ms/2016/2625/0/2625a057", "title": "Big Data Mobile Services for New York City Taxi Riders and Drivers", "doi": null, "abstractUrl": "/proceedings-article/ms/2016/2625a057/12OmNwD1pXR", "parentPublication": { "id": "proceedings/ms/2016/2625/0", "title": "2016 IEEE International Conference on Mobile Services (MS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2016/0883/1/0883a318", "title": "Understanding Urban Mobility via Taxi Trip Clustering", "doi": null, "abstractUrl": "/proceedings-article/mdm/2016/0883a318/12OmNzvQI7o", "parentPublication": { "id": "proceedings/mdm/2016/0883/1", "title": "2016 17th IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534822", "title": "SemanticTraj: A New Approach to Interacting with Massive Taxi Trajectories", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534822/13rRUygT7sI", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258062", "title": "Travel purpose inference with GPS trajectories, POIs, and geo-tagged social media data", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258062/17D45WYQJ81", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328498", "title": "Understanding Travel Behavior of Private Cars via Trajectory Big Data Analysis in Urban Environments", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328498/17D45Wc1IKg", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciev-&-icivpr/2018/5163/0/08640961", "title": "Understanding Real Time Traffic Characteristics of Urban Zones Using GPS Data: A Computational Study on Dhaka City", "doi": null, "abstractUrl": "/proceedings-article/iciev-&-icivpr/2018/08640961/17PYEjURy8m", "parentPublication": { "id": "proceedings/iciev-&-icivpr/2018/5163/0", "title": "2018 Joint 7th International Conference on Informatics, Electronics & Vision (ICIEV) and 2018 2nd International Conference on Imaging, Vision & Pattern Recognition (icIVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-smartcity-dss/2019/2058/0/205800b197", "title": "On Predicting Taxi Movements Modes in Porto City Using Classification and Periodic Pattern Mining", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2019/205800b197/1dPomA15AHu", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2019/2058/0", "title": "2019 IEEE 21st International Conference on High Performance Computing and Communications; IEEE 17th International Conference on Smart City; IEEE 5th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/04/09662971", "title": "Crowd-Sensing Enhanced Parking Patrol Using Sharing Bikes&#x2019; Trajectories", "doi": null, "abstractUrl": "/journal/tk/2023/04/09662971/1zBahPDbTd6", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09242309", "articleId": "1oijnUrjUEo", "__typename": "AdjacentArticleType" }, "next": { "fno": "09240055", "articleId": "1oeZABekF0Y", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNAle6Qx", "title": "November/December", "year": "2007", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "13", "label": "November/December", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgarnG", "doi": "10.1109/TVCG.2007.70589", "abstract": "This paper presents scented widgets, graphical user interface controls enhanced with embedded visualizations that facilitate navigation in information spaces. We describe design guidelines for adding visual cues to common user interface widgets such as radio buttons, sliders, and combo boxes and contribute a general software framework for applying scented widgets within applications with minimal modifications to existing source code. We provide a number of example applications and describe a controlled experiment which finds that users exploring unfamiliar data make up to twice as many unique discoveries using widgets imbued with social navigation data. However, these differences equalize as familiarity with the data increases.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents scented widgets, graphical user interface controls enhanced with embedded visualizations that facilitate navigation in information spaces. We describe design guidelines for adding visual cues to common user interface widgets such as radio buttons, sliders, and combo boxes and contribute a general software framework for applying scented widgets within applications with minimal modifications to existing source code. We provide a number of example applications and describe a controlled experiment which finds that users exploring unfamiliar data make up to twice as many unique discoveries using widgets imbued with social navigation data. However, these differences equalize as familiarity with the data increases.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents scented widgets, graphical user interface controls enhanced with embedded visualizations that facilitate navigation in information spaces. We describe design guidelines for adding visual cues to common user interface widgets such as radio buttons, sliders, and combo boxes and contribute a general software framework for applying scented widgets within applications with minimal modifications to existing source code. We provide a number of example applications and describe a controlled experiment which finds that users exploring unfamiliar data make up to twice as many unique discoveries using widgets imbued with social navigation data. However, these differences equalize as familiarity with the data increases.", "title": "Scented Widgets: Improving Navigation Cues with Embedded Visualizations", "normalizedTitle": "Scented Widgets: Improving Navigation Cues with Embedded Visualizations", "fno": "v1129", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Graphical User Interfaces", "Scented Widgets", "Navigation Cues", "Embedded Visualizations", "Graphical User Interface Controls", "Information Spaces Navigation", "Design Guidelines", "Visual Cues", "Data Visualization", "Radio Navigation", "User Interfaces", "Data Analysis", "Application Software", "Switches", "Guidelines", "Costs", "Animal Structures", "Collaboration", "Information Visualization", "User Interface Toolkits", "Information Foraging", "Social Navigation", "Social Data Analysis" ], "authors": [ { "givenName": "Wesley", "surname": "Willett", "fullName": "Wesley Willett", "affiliation": "Computer Science Division at the University of California at Berkeley", "__typename": "ArticleAuthorType" }, { "givenName": "Jeffrey", "surname": "Heer", "fullName": "Jeffrey Heer", "affiliation": "Computer Science Division at the University of California at Berkeley", "__typename": "ArticleAuthorType" }, { "givenName": "Maneesh", "surname": "Agrawala", "fullName": "Maneesh Agrawala", "affiliation": "Computer Science Division at the University of California at Berkeley", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2007-11-01 00:00:00", "pubType": "trans", "pages": "1129-1136", "year": "2007", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/3dui/2014/3624/0/06798869", "title": "Poster: Investigating viewpoint visualizations for click & go navigation", "doi": null, "abstractUrl": "/proceedings-article/3dui/2014/06798869/12OmNAWpyuN", "parentPublication": { "id": "proceedings/3dui/2014/3624/0", "title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tpcg/2003/1942/0/19420026", "title": "Guided navigation in task-oriented 3D graph visualizations", "doi": null, "abstractUrl": "/proceedings-article/tpcg/2003/19420026/12OmNx0A7M7", "parentPublication": { "id": "proceedings/tpcg/2003/1942/0", "title": "Theory and Practice of Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2005/02/u2080", "title": "Navigation with Auditory Cues in a Virtual Environment", "doi": null, "abstractUrl": "/magazine/mu/2005/02/u2080/13rRUwInvi2", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/05/mcg2010050020", "title": "Spatial Navigation for Context-Aware Video Surveillance", "doi": null, "abstractUrl": "/magazine/cg/2010/05/mcg2010050020/13rRUx0xPOd", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534787", "title": "Visualizing Dimension Coverage to Support Exploratory Analysis", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534787/13rRUxcsYLV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/06/mcg2009060054", "title": "Spatial Input for Temporal Navigation in Scientific Visualizations", "doi": null, "abstractUrl": "/magazine/cg/2009/06/mcg2009060054/13rRUyZaxsX", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2018/9194/0/08533895", "title": "Evaluating Navigation Techniques for 3D Graph Visualizations in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/bdva/2018/08533895/17D45VN31ge", "parentPublication": { "id": "proceedings/bdva/2018/9194/0", "title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2019/7789/0/08679289", "title": "Event Venue Navigation for Visually Impaired People", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2019/08679289/18XkgEGveJW", "parentPublication": { "id": "proceedings/bigcomp/2019/7789/0", "title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08809750", "title": "Pattern-Driven Navigation in 2D Multiscale Visualizations with Scalable Insets", "doi": null, "abstractUrl": "/journal/tg/2020/01/08809750/1cHEu5CRoFq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600a257", "title": "Modelling Multi-Channel Emotions Using Facial Expression and Trajectory Cues for Improving Socially-Aware Robot Navigation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600a257/1iTvkpsDyY8", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "v1121", "articleId": "13rRUwI5UfW", "__typename": "AdjacentArticleType" }, "next": { "fno": "v1137", "articleId": "13rRUx0gefh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwudQUj", "title": "March", "year": "2014", "issueNum": "03", "idPrefix": "tp", "pubType": "journal", "volume": "36", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUEgarCx", "doi": "10.1109/TPAMI.2013.143", "abstract": "In recent years, there has been extensive research on sparse representation of vector-valued signals. In the matrix case, the data points are merely vectorized and treated as vectors thereafter (for example, image patches). However, this approach cannot be used for all matrices, as it may destroy the inherent structure of the data. Symmetric positive definite (SPD) matrices constitute one such class of signals, where their implicit structure of positive eigenvalues is lost upon vectorization. This paper proposes a novel sparse coding technique for positive definite matrices, which respects the structure of the Riemannian manifold and preserves the positivity of their eigenvalues, without resorting to vectorization. Synthetic and real-world computer vision experiments with region covariance descriptors demonstrate the need for and the applicability of the new sparse coding model. This work serves to bridge the gap between the sparse modeling paradigm and the space of positive definite matrices.", "abstracts": [ { "abstractType": "Regular", "content": "In recent years, there has been extensive research on sparse representation of vector-valued signals. In the matrix case, the data points are merely vectorized and treated as vectors thereafter (for example, image patches). However, this approach cannot be used for all matrices, as it may destroy the inherent structure of the data. Symmetric positive definite (SPD) matrices constitute one such class of signals, where their implicit structure of positive eigenvalues is lost upon vectorization. This paper proposes a novel sparse coding technique for positive definite matrices, which respects the structure of the Riemannian manifold and preserves the positivity of their eigenvalues, without resorting to vectorization. Synthetic and real-world computer vision experiments with region covariance descriptors demonstrate the need for and the applicability of the new sparse coding model. This work serves to bridge the gap between the sparse modeling paradigm and the space of positive definite matrices.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In recent years, there has been extensive research on sparse representation of vector-valued signals. In the matrix case, the data points are merely vectorized and treated as vectors thereafter (for example, image patches). However, this approach cannot be used for all matrices, as it may destroy the inherent structure of the data. Symmetric positive definite (SPD) matrices constitute one such class of signals, where their implicit structure of positive eigenvalues is lost upon vectorization. This paper proposes a novel sparse coding technique for positive definite matrices, which respects the structure of the Riemannian manifold and preserves the positivity of their eigenvalues, without resorting to vectorization. Synthetic and real-world computer vision experiments with region covariance descriptors demonstrate the need for and the applicability of the new sparse coding model. This work serves to bridge the gap between the sparse modeling paradigm and the space of positive definite matrices.", "title": "Tensor Sparse Coding for Positive Definite Matrices", "normalizedTitle": "Tensor Sparse Coding for Positive Definite Matrices", "fno": "ttp2014030592", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Covariance Matrices", "Sparse Matrices", "Vectors", "Symmetric Matrices", "Dictionaries", "Encoding", "Tin", "Optimization", "Sparse Coding", "Positive Definite Matrices", "Region Covariance Descriptors", "Computer Vision" ], "authors": [ { "givenName": "Ravishankar", "surname": "Sivalingam", "fullName": "Ravishankar Sivalingam", "affiliation": "Dept. of Comput. Sci. & Eng., Univ. of Minnesota, Twin Cities, Minneapolis, MN, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel", "surname": "Boley", "fullName": "Daniel Boley", "affiliation": "Dept. of Comput. Sci. & Eng., Univ. of Minnesota, Twin Cities, Minneapolis, MN, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Vassilios", "surname": "Morellas", "fullName": "Vassilios Morellas", "affiliation": "Dept. of Comput. Sci. & Eng., Univ. of Minnesota, Twin Cities, Minneapolis, MN, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Nikolaos", "surname": "Papanikolopoulos", "fullName": "Nikolaos Papanikolopoulos", "affiliation": "Dept. of Comput. Sci. & Eng., Univ. of Minnesota, Twin Cities, Minneapolis, MN, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2014-03-01 00:00:00", "pubType": "trans", "pages": "592-605", "year": "2014", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/aici/2010/4225/3/4225c547", "title": "The Generalized Inverse Inequalities for Symmetric Nonnegative Definite Matrices", "doi": null, "abstractUrl": "/proceedings-article/aici/2010/4225c547/12OmNAndilS", "parentPublication": { "id": "proceedings/aici/2010/4225/3", "title": "Artificial Intelligence and Computational Intelligence, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851f157", "title": "Kernel Sparse Subspace Clustering on Symmetric Positive Definite Manifolds", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851f157/12OmNBlFQUa", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2017/4662/0/08388651", "title": "Sparse representation based classification with intra-class variation dictionary on symmetric positive definite manifolds", "doi": null, "abstractUrl": "/proceedings-article/isspit/2017/08388651/12OmNqJHFte", "parentPublication": { "id": "proceedings/isspit/2017/4662/0", "title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2014/4985/0/06836085", "title": "Random projections on manifolds of Symmetric Positive Definite matrices for image classification", "doi": null, "abstractUrl": "/proceedings-article/wacv/2014/06836085/12OmNvmowR5", "parentPublication": { "id": "proceedings/wacv/2014/4985/0", "title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2016/0641/0/07477621", "title": "Image set classification by symmetric positive semi-definite matrices", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477621/12OmNx5Yv4o", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/splc/1993/4980/0/00365576", "title": "Distributed solution of sparse symmetric positive definite systems", "doi": null, "abstractUrl": "/proceedings-article/splc/1993/00365576/12OmNym2bUx", "parentPublication": { "id": "proceedings/splc/1993/4980/0", "title": "Proceedings of Scalable Parallel Libraries Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/08237720", "title": "Learning Discriminative αβ-Divergences for Positive Definite Matrices", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/08237720/12OmNzmLxEW", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2016/05/07159063", "title": "Bayesian Nonparametric Clustering for Positive Definite Matrices", "doi": null, "abstractUrl": "/journal/tp/2016/05/07159063/13rRUx0xPof", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486518", "title": "Support Vector Metric Learning on Symmetric Positive Definite Manifold", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486518/14jQfOuD6nK", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/09/09405430", "title": "Learning Log-Determinant Divergences for Positive Definite Matrices", "doi": null, "abstractUrl": "/journal/tp/2022/09/09405430/1sP16sJ7CCs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttp2014030577", "articleId": "13rRUxASuNM", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttp2014030606", "articleId": "13rRUxC0SPN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1KsRzJZl0ly", "title": "March", "year": "2023", "issueNum": "03", "idPrefix": "tk", "pubType": "journal", "volume": "35", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1x4UGS0CE3m", "doi": "10.1109/TKDE.2021.3113943", "abstract": "Symmetric nonnegative matrix factorization (SNMF) is effective to cluster nonlinearly separable data, which uses the constructed graph to capture the structure of inherent clusters. Nevertheless, many SNMF-based clustering approaches implicitly enforce either the sparseness constraint or the smoothness constraint with the limited supervised information in the form of cannot-link or must-link in a semi-supervised manner, which may not be quite satisfactory in many applications where sparseness and smoothness are demanded explicitly and simultaneously. In this paper, we propose a new semi-supervised SNMF-based approach termed Semi-supervised Structured SNMF-based clustering (S3NMF). The method flexibly enforces the block-diagonal structure to the similarity matrix, where the sparseness and smoothness are simultaneously considered, so that we can obtain the desirable assignment matrix by simultaneously learning similarity and assignment matrices in a constrained optimization problem. We formulate S3NMF with a semi-supervised manner and utilize the indirect constraints of sparseness and smoothness by cannot-link and must-link. To effectively solve S3NMF, we present an alternating iterative algorithm with theoretically proved convergence to seek for the solution of the optimization problem. Experiments on five benchmark data sets show better performance and satisfactory stability of the proposed method.", "abstracts": [ { "abstractType": "Regular", "content": "Symmetric nonnegative matrix factorization (SNMF) is effective to cluster nonlinearly separable data, which uses the constructed graph to capture the structure of inherent clusters. Nevertheless, many SNMF-based clustering approaches implicitly enforce either the sparseness constraint or the smoothness constraint with the limited supervised information in the form of cannot-link or must-link in a semi-supervised manner, which may not be quite satisfactory in many applications where sparseness and smoothness are demanded explicitly and simultaneously. In this paper, we propose a new semi-supervised SNMF-based approach termed Semi-supervised Structured SNMF-based clustering (S3NMF). The method flexibly enforces the block-diagonal structure to the similarity matrix, where the sparseness and smoothness are simultaneously considered, so that we can obtain the desirable assignment matrix by simultaneously learning similarity and assignment matrices in a constrained optimization problem. We formulate S3NMF with a semi-supervised manner and utilize the indirect constraints of sparseness and smoothness by cannot-link and must-link. To effectively solve S3NMF, we present an alternating iterative algorithm with theoretically proved convergence to seek for the solution of the optimization problem. Experiments on five benchmark data sets show better performance and satisfactory stability of the proposed method.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Symmetric nonnegative matrix factorization (SNMF) is effective to cluster nonlinearly separable data, which uses the constructed graph to capture the structure of inherent clusters. Nevertheless, many SNMF-based clustering approaches implicitly enforce either the sparseness constraint or the smoothness constraint with the limited supervised information in the form of cannot-link or must-link in a semi-supervised manner, which may not be quite satisfactory in many applications where sparseness and smoothness are demanded explicitly and simultaneously. In this paper, we propose a new semi-supervised SNMF-based approach termed Semi-supervised Structured SNMF-based clustering (S3NMF). The method flexibly enforces the block-diagonal structure to the similarity matrix, where the sparseness and smoothness are simultaneously considered, so that we can obtain the desirable assignment matrix by simultaneously learning similarity and assignment matrices in a constrained optimization problem. We formulate S3NMF with a semi-supervised manner and utilize the indirect constraints of sparseness and smoothness by cannot-link and must-link. To effectively solve S3NMF, we present an alternating iterative algorithm with theoretically proved convergence to seek for the solution of the optimization problem. Experiments on five benchmark data sets show better performance and satisfactory stability of the proposed method.", "title": "Block-Diagonal Guided Symmetric Nonnegative Matrix Factorization", "normalizedTitle": "Block-Diagonal Guided Symmetric Nonnegative Matrix Factorization", "fno": "09543530", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Graph Theory", "Iterative Methods", "Matrix Decomposition", "Optimisation", "Pattern Clustering", "Semi Supervised Learning Artificial Intelligence", "Sparse Matrices", "Alternating Iterative Algorithm", "Assignment Matrices", "Block Diagonal Guided Symmetric Nonnegative Matrix Factorization", "Block Diagonal Structure", "Cluster Nonlinearly Separable Data", "Constrained Optimization Problem", "Constructed Graph", "Desirable Assignment Matrix", "Indirect Constraints", "Inherent Clusters", "S 3 NMF", "Semisupervised Manner", "Semisupervised SNMF Based Approach", "Semisupervised Structured SNMF Based Clustering", "Similarity Matrix", "Smoothness Constraint", "Sparseness Constraint", "Supervised Information", "Symmetric Matrices", "Sparse Matrices", "Optimization", "Linear Programming", "Dimensionality Reduction", "Convergence", "Matrix Decomposition", "Block Diagonal Structure", "Symmetric Nonnegative Matrix Factorization SNMF", "Semi Supervised Clustering" ], "authors": [ { "givenName": "Yalan", "surname": "Qin", "fullName": "Yalan Qin", "affiliation": "School of Communication and Information Engineering, Shanghai University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Guorui", "surname": "Feng", "fullName": "Guorui Feng", "affiliation": "School of Communication and Information Engineering, Shanghai University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yanli", "surname": "Ren", "fullName": "Yanli Ren", "affiliation": "School of Communication and Information Engineering, Shanghai University, Shanghai, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xinpeng", "surname": "Zhang", "fullName": "Xinpeng Zhang", "affiliation": "School of Communication and Information Engineering, Shanghai University, Shanghai, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2023-03-01 00:00:00", "pubType": "trans", "pages": "2313-2325", "year": "2023", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2009/4442/0/05457714", "title": "Nonnegative Matrix Factorization with Gibbs Random Field modeling", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457714/12OmNBRbkpm", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2015/9504/0/9504b123", "title": "Part-Level Regularized Semi-Nonnegative Coding for Semi-Supervised Learning", "doi": null, "abstractUrl": "/proceedings-article/icdm/2015/9504b123/12OmNBqMDyM", "parentPublication": { "id": "proceedings/icdm/2015/9504/0", "title": "2015 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b431", "title": "Average Overlap for Clustering Incomplete Data Using Symmetric Non-negative Matrix Factorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b431/12OmNrkjVkq", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2013/4796/0/06781913", "title": "Robust music signal separation based on supervised nonnegative matrix factorization with prevention of basis sharing", "doi": null, "abstractUrl": "/proceedings-article/isspit/2013/06781913/12OmNvs4vsO", "parentPublication": { "id": "proceedings/isspit/2013/4796/0", "title": "2013 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2016/1611/0/07822591", "title": "Multi-view clustering microbiome data by joint symmetric nonnegative matrix factorization with Laplacian regularization", "doi": null, "abstractUrl": "/proceedings-article/bibm/2016/07822591/12OmNwdbV6d", "parentPublication": { "id": "proceedings/bibm/2016/1611/0", "title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2020/03/08051103", "title": "Clustering and Integrating of Heterogeneous Microbiome Data by Joint Symmetric Nonnegative Matrix Factorization with Laplacian Regularization", "doi": null, "abstractUrl": "/journal/tb/2020/03/08051103/13rRUIJcWvu", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-iucc/2017/3790/0/379001b144", "title": "CuSNMF: A Sparse Non-Negative Matrix Factorization Approach for Large-Scale Collaborative Filtering Recommender Systems on Multi-GPU", "doi": null, "abstractUrl": "/proceedings-article/ispa-iucc/2017/379001b144/17D45VUZMZt", "parentPublication": { "id": "proceedings/ispa-iucc/2017/3790/0", "title": "2017 IEEE International Symposium on Parallel and Distributed Processing with Applications and 2017 IEEE International Conference on Ubiquitous Computing and Communications (ISPA/IUCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2017/2715/0/08258332", "title": "Discovering dynamic patterns of urban space via semi-nonnegative matrix factorization", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08258332/17D45VsBTXv", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/05/09891776", "title": "A Progressive Hierarchical Alternating Least Squares Method for Symmetric Nonnegative Matrix Factorization", "doi": null, "abstractUrl": "/journal/tp/2023/05/09891776/1GF6LiZPb0s", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/01/09044402", "title": "Label Propagated Nonnegative Matrix Factorization for Clustering", "doi": null, "abstractUrl": "/journal/tk/2022/01/09044402/1isum89Ss4U", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09527110", "articleId": "1wzrJvKSJOw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09546546", "articleId": "1x6zzmXG7Fm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNxb5hpx", "title": "May/June", "year": "2009", "issueNum": "03", "idPrefix": "cg", "pubType": "magazine", "volume": "29", "label": "May/June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxBrGjm", "doi": "10.1109/MCG.2009.43", "abstract": "This system generates synthetic syndromic-surveillance data for evaluating visualization and visual-analytics techniques. Modeling data from emergency room departments, the system generates two years of patient data, into which system users can inject spatiotemporal disease outbreak signals. The result is a data set with known seasonal trends and irregular outbreak patterns.", "abstracts": [ { "abstractType": "Regular", "content": "This system generates synthetic syndromic-surveillance data for evaluating visualization and visual-analytics techniques. Modeling data from emergency room departments, the system generates two years of patient data, into which system users can inject spatiotemporal disease outbreak signals. The result is a data set with known seasonal trends and irregular outbreak patterns.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This system generates synthetic syndromic-surveillance data for evaluating visualization and visual-analytics techniques. Modeling data from emergency room departments, the system generates two years of patient data, into which system users can inject spatiotemporal disease outbreak signals. The result is a data set with known seasonal trends and irregular outbreak patterns.", "title": "Generating Synthetic Syndromic-Surveillance Data for Evaluating Visual-Analytics Techniques", "normalizedTitle": "Generating Synthetic Syndromic-Surveillance Data for Evaluating Visual-Analytics Techniques", "fno": "mcg2009030018", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Computer Graphics", "Visual Analytics", "Syndromic Surveillance", "Synthetic Data" ], "authors": [ { "givenName": "Ross", "surname": "Maciejewski", "fullName": "Ross Maciejewski", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "Ryan", "surname": "Hafen", "fullName": "Ryan Hafen", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "Stephen", "surname": "Rudolph", "fullName": "Stephen Rudolph", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "George", "surname": "Tebbetts", "fullName": "George Tebbetts", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "William S.", "surname": "Cleveland", "fullName": "William S. Cleveland", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" }, { "givenName": "Shaun J.", "surname": "Grannis", "fullName": "Shaun J. Grannis", "affiliation": "Indiana University", "__typename": "ArticleAuthorType" }, { "givenName": "David S.", "surname": "Ebert", "fullName": "David S. Ebert", "affiliation": "Purdue University", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2009-05-01 00:00:00", "pubType": "mags", "pages": "18-28", "year": "2009", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icalt/2010/4055/0/4055a048", "title": "A Mobile Learning System for Syndromic Surveillance and Diagnosis", "doi": null, "abstractUrl": "/proceedings-article/icalt/2010/4055a048/12OmNxQOjDG", "parentPublication": { "id": "proceedings/icalt/2010/4055/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2008/3268/0/3268a273", "title": "Visual Analytics for the Detection of Anomalous Maritime Behavior", "doi": null, "abstractUrl": "/proceedings-article/iv/2008/3268a273/12OmNyVerZM", "parentPublication": { "id": "proceedings/iv/2008/3268/0", "title": "2008 12th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2010/4257/0/4257a442", "title": "BODY -- Buckets of Disease Symptoms for Disease Outbreak Analysis", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2010/4257a442/12OmNyvGymA", "parentPublication": { "id": "proceedings/icdmw/2010/4257/0", "title": "2010 IEEE International Conference on Data Mining Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/04/ttg2011040440", "title": "Forecasting Hotspots—A Predictive Analytics Approach", "doi": null, "abstractUrl": "/journal/tg/2011/04/ttg2011040440/13rRUwdrdSv", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2010/05/mcg2010050018", "title": "Guest Editors' Introduction: Multimedia Analytics", "doi": null, "abstractUrl": "/magazine/cg/2010/05/mcg2010050018/13rRUxCRFQg", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ex/2005/05/x5026", "title": "Ontology-Centered Syndromic Surveillance for Bioterrorism", "doi": null, "abstractUrl": "/magazine/ex/2005/05/x5026/13rRUzpQPHL", "parentPublication": { "id": "mags/ex", "title": "IEEE Intelligent Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000b830", "title": "Soccer: Who Has the Ball? Generating Visual Analytics and Player Statistics", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b830/17D45VObpOM", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a251", "title": "Visual Analytics for Decomposing Temporal Event Series of Production Lines", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a251/17D45WcjjRK", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartcomp/2020/6997/0/699700a025", "title": "Analytics-Aware Storage of Surveillance Videos: Implementation and Optimization", "doi": null, "abstractUrl": "/proceedings-article/smartcomp/2020/699700a025/1oxocwBNR1C", "parentPublication": { "id": "proceedings/smartcomp/2020/6997/0", "title": "2020 IEEE International Conference on Smart Computing (SMARTCOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09286903", "title": "In Search of Patient Zero: Visual Analytics of Pathogen Transmission Pathways in Hospitals", "doi": null, "abstractUrl": "/journal/tg/2021/02/09286903/1por2UO3Q4M", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2009030016", "articleId": "13rRUwcS1uS", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2009030029", "articleId": "13rRUwvT9jx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwpGgK8", "title": "Dec.", "year": "2014", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUyv53Fr", "doi": "10.1109/TVCG.2014.2346575", "abstract": "Visual analytics is inherently a collaboration between human and computer. However, in current visual analytics systems, the computer has limited means of knowing about its users and their analysis processes. While existing research has shown that a user's interactions with a system reflect a large amount of the user's reasoning process, there has been limited advancement in developing automated, real-time techniques that mine interactions to learn about the user. In this paper, we demonstrate that we can accurately predict a user's task performance and infer some user personality traits by using machine learning techniques to analyze interaction data. Specifically, we conduct an experiment in which participants perform a visual search task, and apply well-known machine learning algorithms to three encodings of the users' interaction data. We achieve, depending on algorithm and encoding, between 62% and 83% accuracy at predicting whether each user will be fast or slow at completing the task. Beyond predicting performance, we demonstrate that using the same techniques, we can infer aspects of the user's personality factors, including locus of control, extraversion, and neuroticism. Further analyses show that strong results can be attained with limited observation time: in one case 95% of the final accuracy is gained after a quarter of the average task completion time. Overall, our findings show that interactions can provide information to the computer about its human collaborator, and establish a foundation for realizing mixed-initiative visual analytics systems.", "abstracts": [ { "abstractType": "Regular", "content": "Visual analytics is inherently a collaboration between human and computer. However, in current visual analytics systems, the computer has limited means of knowing about its users and their analysis processes. While existing research has shown that a user's interactions with a system reflect a large amount of the user's reasoning process, there has been limited advancement in developing automated, real-time techniques that mine interactions to learn about the user. In this paper, we demonstrate that we can accurately predict a user's task performance and infer some user personality traits by using machine learning techniques to analyze interaction data. Specifically, we conduct an experiment in which participants perform a visual search task, and apply well-known machine learning algorithms to three encodings of the users' interaction data. We achieve, depending on algorithm and encoding, between 62% and 83% accuracy at predicting whether each user will be fast or slow at completing the task. Beyond predicting performance, we demonstrate that using the same techniques, we can infer aspects of the user's personality factors, including locus of control, extraversion, and neuroticism. Further analyses show that strong results can be attained with limited observation time: in one case 95% of the final accuracy is gained after a quarter of the average task completion time. Overall, our findings show that interactions can provide information to the computer about its human collaborator, and establish a foundation for realizing mixed-initiative visual analytics systems.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual analytics is inherently a collaboration between human and computer. However, in current visual analytics systems, the computer has limited means of knowing about its users and their analysis processes. While existing research has shown that a user's interactions with a system reflect a large amount of the user's reasoning process, there has been limited advancement in developing automated, real-time techniques that mine interactions to learn about the user. In this paper, we demonstrate that we can accurately predict a user's task performance and infer some user personality traits by using machine learning techniques to analyze interaction data. Specifically, we conduct an experiment in which participants perform a visual search task, and apply well-known machine learning algorithms to three encodings of the users' interaction data. We achieve, depending on algorithm and encoding, between 62% and 83% accuracy at predicting whether each user will be fast or slow at completing the task. Beyond predicting performance, we demonstrate that using the same techniques, we can infer aspects of the user's personality factors, including locus of control, extraversion, and neuroticism. Further analyses show that strong results can be attained with limited observation time: in one case 95% of the final accuracy is gained after a quarter of the average task completion time. Overall, our findings show that interactions can provide information to the computer about its human collaborator, and establish a foundation for realizing mixed-initiative visual analytics systems.", "title": "Finding Waldo: Learning about Users from their Interactions", "normalizedTitle": "Finding Waldo: Learning about Users from their Interactions", "fno": "06875913", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Mice", "Visual Analytics", "Accuracy", "Data Visualization", "Computers", "Encoding", "Applied Machine Learning", "User Interactions", "Analytic Provenance", "Visualization" ], "authors": [ { "givenName": "Eli T", "surname": "Brown", "fullName": "Eli T Brown", "affiliation": ", Tufts U", "__typename": "ArticleAuthorType" }, { "givenName": "Alvitta", "surname": "Ottley", "fullName": "Alvitta Ottley", "affiliation": ", Tufts U", "__typename": "ArticleAuthorType" }, { "givenName": "Helen", "surname": "Zhao", "fullName": "Helen Zhao", "affiliation": ", Tufts U", "__typename": "ArticleAuthorType" }, { "givenName": "Quan", "surname": "Lin", "fullName": "Quan Lin", "affiliation": ", Tufts U", "__typename": "ArticleAuthorType" }, { "givenName": "Richard", "surname": "Souvenir", "fullName": "Richard Souvenir", "affiliation": ", U.N.C. Charlotte", "__typename": "ArticleAuthorType" }, { "givenName": "Alex", "surname": "Endert", "fullName": "Alex Endert", "affiliation": ", Pacific Northwest National Lab", "__typename": "ArticleAuthorType" }, { "givenName": "Remco", "surname": "Chang", "fullName": "Remco Chang", "affiliation": ", Tufts U", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2014-12-01 00:00:00", "pubType": "trans", "pages": "1663-1672", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2014/6227/0/07042510", "title": "PIVE: Per-Iteration visualization environment for supporting real-time interactions with computational methods", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042510/12OmNBgz4CP", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2016/4470/0/4470a709", "title": "An Interactive Circular Visual Analytic Tool for Visualization of Web Data", "doi": null, "abstractUrl": "/proceedings-article/wi/2016/4470a709/12OmNxwENqO", "parentPublication": { "id": "proceedings/wi/2016/4470/0", "title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2012/4752/0/06400535", "title": "Priming Locus of Control to affect performance", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400535/12OmNxwWoCm", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/03/mcg2009030052", "title": "Recovering Reasoning Processes from User Interactions", "doi": null, "abstractUrl": "/magazine/cg/2009/03/mcg2009030052/13rRUNvyans", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534759", "title": "A Grammar-based Approach for Modeling User Interactions and Generating Suggestions During the Data Exploration Process", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534759/13rRUxE04tG", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/06/ttg2013061034", "title": "PIWI: Visually Exploring Graphs Based on Their Community Structure", "doi": null, "abstractUrl": "/journal/tg/2013/06/ttg2013061034/13rRUxd2aYZ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192716", "title": "The Role of Uncertainty, Awareness, and Trust in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192716/13rRUxlgxTo", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/07/ttg2013071109", "title": "How Visualization Layout Relates to Locus of Control and Other Personality Factors", "doi": null, "abstractUrl": "/journal/tg/2013/07/ttg2013071109/13rRUy3xY2O", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192662", "title": "A Case Study Using Visualization Interaction Logs and Insight Metrics to Understand How Analysts Arrive at Insights", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192662/13rRUyuegha", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlui/2018/4063/0/10075648", "title": "Using Hidden Markov Models to Determine Cognitive States of Visual Analytic Users", "doi": null, "abstractUrl": "/proceedings-article/mlui/2018/10075648/1LIRyUMEbxm", "parentPublication": { "id": "proceedings/mlui/2018/4063/0", "title": "2018 IEEE Workshop on Machine Learning from User Interaction for Visualization and Analytics (MLUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06876049", "articleId": "13rRUyogGAd", "__typename": "AdjacentArticleType" }, "next": { "fno": "06876009", "articleId": "13rRUxASu0L", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYet4v", "name": "ttg201412-06875913s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201412-06875913s1.zip", "extension": "zip", "size": "6.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1txPs9C3tok", "title": "June", "year": "2021", "issueNum": "06", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "June", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1r3l972fCk8", "doi": "10.1109/TVCG.2021.3057483", "abstract": "To convey neural network architectures in publications, appropriate visualizations are of great importance. While most current deep learning papers contain such visualizations, these are usually handcrafted just before publication, which results in a lack of a common visual grammar, significant time investment, errors, and ambiguities. Current automatic network visualization tools focus on debugging the network itself and are not ideal for generating publication visualizations. Therefore, we present an approach to automate this process by translating network architectures specified in Keras into visualizations that can directly be embedded into any publication. To do so, we propose a visual grammar for convolutional neural networks (CNNs), which has been derived from an analysis of such figures extracted from all ICCV and CVPR papers published between 2013 and 2019. The proposed grammar incorporates visual encoding, network layout, layer aggregation, and legend generation. We have further realized our approach in an online system available to the community, which we have evaluated through expert feedback, and a quantitative study. It not only reduces the time needed to generate network visualizations for publications, but also enables a unified and unambiguous visualization design.", "abstracts": [ { "abstractType": "Regular", "content": "To convey neural network architectures in publications, appropriate visualizations are of great importance. While most current deep learning papers contain such visualizations, these are usually handcrafted just before publication, which results in a lack of a common visual grammar, significant time investment, errors, and ambiguities. Current automatic network visualization tools focus on debugging the network itself and are not ideal for generating publication visualizations. Therefore, we present an approach to automate this process by translating network architectures specified in Keras into visualizations that can directly be embedded into any publication. To do so, we propose a visual grammar for convolutional neural networks (CNNs), which has been derived from an analysis of such figures extracted from all ICCV and CVPR papers published between 2013 and 2019. The proposed grammar incorporates visual encoding, network layout, layer aggregation, and legend generation. We have further realized our approach in an online system available to the community, which we have evaluated through expert feedback, and a quantitative study. It not only reduces the time needed to generate network visualizations for publications, but also enables a unified and unambiguous visualization design.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "To convey neural network architectures in publications, appropriate visualizations are of great importance. While most current deep learning papers contain such visualizations, these are usually handcrafted just before publication, which results in a lack of a common visual grammar, significant time investment, errors, and ambiguities. Current automatic network visualization tools focus on debugging the network itself and are not ideal for generating publication visualizations. Therefore, we present an approach to automate this process by translating network architectures specified in Keras into visualizations that can directly be embedded into any publication. To do so, we propose a visual grammar for convolutional neural networks (CNNs), which has been derived from an analysis of such figures extracted from all ICCV and CVPR papers published between 2013 and 2019. The proposed grammar incorporates visual encoding, network layout, layer aggregation, and legend generation. We have further realized our approach in an online system available to the community, which we have evaluated through expert feedback, and a quantitative study. It not only reduces the time needed to generate network visualizations for publications, but also enables a unified and unambiguous visualization design.", "title": "Net2Vis &#x2013; A Visual Grammar for Automatically Generating Publication-Tailored CNN Architecture Visualizations", "normalizedTitle": "Net2Vis – A Visual Grammar for Automatically Generating Publication-Tailored CNN Architecture Visualizations", "fno": "09350177", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Convolutional Neural Nets", "Data Visualisation", "Deep Learning Artificial Intelligence", "Grammars", "Neural Net Architecture", "Automatic Network Visualization Tools", "Net 2 Vis", "Unambiguous Visualization Design", "Unified Visualization Design", "Legend Generation", "Network Layout", "Visual Encoding", "Convolutional Neural Networks", "Publication Visualizations", "Visual Grammar", "Deep Learning", "Neural Network Architectures", "Automatically Generating Publication Tailored CNN Architecture Visualizations", "Data Visualization", "Visualization", "Network Architecture", "Computer Architecture", "Grammar", "Layout", "Encoding", "Neural Networks", "Architecture Visualization", "Graph Layouting" ], "authors": [ { "givenName": "Alex", "surname": "Bäuerle", "fullName": "Alex Bäuerle", "affiliation": "Visual Computing Group at Ulm University, Ulm, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Christian", "surname": "van Onzenoodt", "fullName": "Christian van Onzenoodt", "affiliation": "Visual Computing Group at Ulm University, Ulm, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Timo", "surname": "Ropinski", "fullName": "Timo Ropinski", "affiliation": "Visual Computing Group at Ulm University, Ulm, Germany", "__typename": "ArticleAuthorType" } ], "replicability": { "isEnabled": true, "codeDownloadUrl": "https://github.com/viscom-ulm/Net2Vis.git", "codeRepositoryUrl": "https://github.com/viscom-ulm/Net2Vis", "__typename": "ArticleReplicabilityType" }, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "06", "pubDate": "2021-06-01 00:00:00", "pubType": "trans", "pages": "2980-2991", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2017/01/07539624", "title": "Vega-Lite: A Grammar of Interactive Graphics", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539624/13rRUIJuxvn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/12/08233127", "title": "Atom: A Grammar for Unit Visualizations", "doi": null, "abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440063", "title": "A Declarative Grammar of Flexible Volume Visualization Pipelines", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440063/17D45XacGi1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09916137", "title": "Revisiting the Design Patterns of Composite Visualizations", "doi": null, "abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2019/05/08744242", "title": "Data2Vis: Automatic Generation of Data Visualizations Using Sequence-to-Sequence Recurrent Neural Networks", "doi": null, "abstractUrl": "/magazine/cg/2019/05/08744242/1cFV5domibu", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222038", "title": "Kyrix-S: Authoring Scalable Scatterplot Visualizations of Big Data", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222038/1nTq1lYLbEY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2020/8014/0/801400a131", "title": "Encodable: Configurable Grammar for Visualization Components", "doi": null, "abstractUrl": "/proceedings-article/vis/2020/801400a131/1qRNXTuFymI", "parentPublication": { "id": "proceedings/vis/2020/8014/0", "title": "2020 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09417674", "title": "Nebula: A Coordinating Grammar of Graphics", "doi": null, "abstractUrl": "/journal/tg/2022/12/09417674/1taANyFFcmQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09557192", "title": "Gosling: A Grammar-based Toolkit for Scalable and Interactive Genomics Data Visualization", "doi": null, "abstractUrl": "/journal/tg/2022/01/09557192/1xlw1UFWxDa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2021/3335/0/333500a171", "title": "Atlas: Grammar-based Procedural Generation of Data Visualizations", "doi": null, "abstractUrl": "/proceedings-article/vis/2021/333500a171/1yXulf0d488", "parentPublication": { "id": "proceedings/vis/2021/3335/0", "title": "2021 IEEE Visualization Conference (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08907402", "articleId": "1f75TiiWgik", "__typename": "AdjacentArticleType" }, "next": { "fno": "08936883", "articleId": "1fTdX59qZUs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1txPGTwsJoY", "name": "ttg202106-09350177s1-tvcg-3057483-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202106-09350177s1-tvcg-3057483-mm.zip", "extension": "zip", "size": "236 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1Fp5V6TYCEE", "title": "Sept.", "year": "2022", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1Eb192ypuzS", "doi": "10.1109/TVCG.2022.3182488", "abstract": "The base learners and labeled samples (shots) in an ensemble few-shot classifier greatly affect the model performance. When the performance is not satisfactory, it is usually difficult to understand the underlying causes and make improvements. To tackle this issue, we propose a visual analysis method, FSLDiagnotor. Given a set of base learners and a collection of samples with a few shots, we consider two problems: 1) finding a subset of base learners that well predict the sample collections; and 2) replacing the low-quality shots with more representative ones to adequately represent the sample collections. We formulate both problems as sparse subset selection and develop two selection algorithms to recommend appropriate learners and shots, respectively. A matrix visualization and a scatterplot are combined to explain the recommended learners and shots in context and facilitate users in adjusting them. Based on the adjustment, the algorithm updates the recommendation results for another round of improvement. Two case studies are conducted to demonstrate that FSLDiagnotor helps build a few-shot classifier efficiently and increases the accuracy by 12&#x0025; and 21&#x0025;, respectively.", "abstracts": [ { "abstractType": "Regular", "content": "The base learners and labeled samples (shots) in an ensemble few-shot classifier greatly affect the model performance. When the performance is not satisfactory, it is usually difficult to understand the underlying causes and make improvements. To tackle this issue, we propose a visual analysis method, FSLDiagnotor. Given a set of base learners and a collection of samples with a few shots, we consider two problems: 1) finding a subset of base learners that well predict the sample collections; and 2) replacing the low-quality shots with more representative ones to adequately represent the sample collections. We formulate both problems as sparse subset selection and develop two selection algorithms to recommend appropriate learners and shots, respectively. A matrix visualization and a scatterplot are combined to explain the recommended learners and shots in context and facilitate users in adjusting them. Based on the adjustment, the algorithm updates the recommendation results for another round of improvement. Two case studies are conducted to demonstrate that FSLDiagnotor helps build a few-shot classifier efficiently and increases the accuracy by 12&#x0025; and 21&#x0025;, respectively.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The base learners and labeled samples (shots) in an ensemble few-shot classifier greatly affect the model performance. When the performance is not satisfactory, it is usually difficult to understand the underlying causes and make improvements. To tackle this issue, we propose a visual analysis method, FSLDiagnotor. Given a set of base learners and a collection of samples with a few shots, we consider two problems: 1) finding a subset of base learners that well predict the sample collections; and 2) replacing the low-quality shots with more representative ones to adequately represent the sample collections. We formulate both problems as sparse subset selection and develop two selection algorithms to recommend appropriate learners and shots, respectively. A matrix visualization and a scatterplot are combined to explain the recommended learners and shots in context and facilitate users in adjusting them. Based on the adjustment, the algorithm updates the recommendation results for another round of improvement. Two case studies are conducted to demonstrate that FSLDiagnotor helps build a few-shot classifier efficiently and increases the accuracy by 12% and 21%, respectively.", "title": "Diagnosing Ensemble Few-Shot Classifiers", "normalizedTitle": "Diagnosing Ensemble Few-Shot Classifiers", "fno": "09795241", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Image Classification", "Learning Artificial Intelligence", "Pattern Classification", "Few Shot Classifier", "Base Learners", "Visual Analysis Method", "Sample Collections", "Low Quality Shots", "Sparse Subset Selection", "Appropriate Learners", "Recommended Learners", "Visualization", "Feature Extraction", "Predictive Models", "Training", "Analytical Models", "Data Models", "Behavioral Sciences", "Few Shot Learning", "Ensemble Model", "Subset Selection", "Matrix Visualization", "Scatterplot" ], "authors": [ { "givenName": "Weikai", "surname": "Yang", "fullName": "Weikai Yang", "affiliation": "School of Software, BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xi", "surname": "Ye", "fullName": "Xi Ye", "affiliation": "University of Texas at Austin, Austin, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Xingxing", "surname": "Zhang", "fullName": "Xingxing Zhang", "affiliation": "Department of Computer Science and Technology, BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Lanxi", "surname": "Xiao", "fullName": "Lanxi Xiao", "affiliation": "Academy of Arts & Design, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jiazhi", "surname": "Xia", "fullName": "Jiazhi Xia", "affiliation": "Central South University, Changsha, Hunan, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zhongyuan", "surname": "Wang", "fullName": "Zhongyuan Wang", "affiliation": "Kuaishou Technology Company Ltd., Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Jun", "surname": "Zhu", "fullName": "Jun Zhu", "affiliation": "Department of Computer Science and Technology, BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Hanspeter", "surname": "Pfister", "fullName": "Hanspeter Pfister", "affiliation": "Harvard University, Cambridge, MA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Shixia", "surname": "Liu", "fullName": "Shixia Liu", "affiliation": "School of Software, BNRist, Tsinghua University, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": { "isEnabled": true, "codeDownloadUrl": "https://github.com/thu-vis/FSLDiagnotor.git", "codeRepositoryUrl": "https://github.com/thu-vis/FSLDiagnotor", "__typename": "ArticleReplicabilityType" }, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2022-09-01 00:00:00", "pubType": "trans", "pages": "3292-3306", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2022/0915/0/091500c040", "title": "Ortho-Shot: Low Displacement Rank Regularization with Data Augmentation for Few-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500c040/1B13PJ5gGRi", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/09737396", "title": "Few-Shot Learning with a Strong Teacher", "doi": null, "abstractUrl": "/journal/tp/5555/01/09737396/1BQibna3gm4", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/01/09720733", "title": "Dataset Bias in Few-Shot Image Recognition", "doi": null, "abstractUrl": "/journal/tp/2023/01/09720733/1BfU59nrjwY", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0/945700c404", "title": "MetaProfiling: Inferring User Profiles with Few-Shot Data", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2021/945700c404/1DNCuIhFBM4", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0", "title": "2021 IEEE 23rd Int Conf on High Performance Computing & Communications; 7th Int Conf on Data Science & Systems; 19th Int Conf on Smart City; 7th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900d687", "title": "Variable Few Shot Class Incremental and Open World Learning", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900d687/1G57q1hiERG", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i047", "title": "Learning What Not to Segment: A New Perspective on Few-Shot Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i047/1H1ks6Wxccg", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600o4217", "title": "Label, Verify, Correct: A Simple Few Shot Object Detection Method", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600o4217/1H1m6naU4XC", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a331", "title": "Few-Shot Learning With Embedded Class Models and Shot-Free Meta Training", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a331/1hVlSHT3MpW", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428122", "title": "GSS: Graph-Based Subspace Learning with Shots Initialization for few-shot Recognition", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428122/1uim3R9kMWA", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800a528", "title": "Few-Shot Adaptive Detection of Objects of Concern Using Generative Models with Negative Retraining", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800a528/1zw6iYI1GM0", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09676662", "articleId": "1A4SvuEiTSw", "__typename": "AdjacentArticleType" }, "next": { "fno": "09321557", "articleId": "1qmbp8bk4FO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1Fp638dcvGE", "name": "ttg202209-09795241s1-supp1-3182488.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202209-09795241s1-supp1-3182488.mp4", "extension": "mp4", "size": "78.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNvsDHDY", "title": "Jan.", "year": "2020", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1cG6piAXFwQ", "doi": "10.1109/TVCG.2019.2934619", "abstract": "A key challenge in developing and deploying Machine Learning (ML) systems is understanding their performance across a wide range of inputs. To address this challenge, we created the What-If Tool, an open-source application that allows practitioners to probe, visualize, and analyze ML systems, with minimal coding. The What-If Tool lets practitioners test performance in hypothetical situations, analyze the importance of different data features, and visualize model behavior across multiple models and subsets of input data. It also lets practitioners measure systems according to multiple ML fairness metrics. We describe the design of the tool, and report on real-life usage at different organizations.", "abstracts": [ { "abstractType": "Regular", "content": "A key challenge in developing and deploying Machine Learning (ML) systems is understanding their performance across a wide range of inputs. To address this challenge, we created the What-If Tool, an open-source application that allows practitioners to probe, visualize, and analyze ML systems, with minimal coding. The What-If Tool lets practitioners test performance in hypothetical situations, analyze the importance of different data features, and visualize model behavior across multiple models and subsets of input data. It also lets practitioners measure systems according to multiple ML fairness metrics. We describe the design of the tool, and report on real-life usage at different organizations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "A key challenge in developing and deploying Machine Learning (ML) systems is understanding their performance across a wide range of inputs. To address this challenge, we created the What-If Tool, an open-source application that allows practitioners to probe, visualize, and analyze ML systems, with minimal coding. The What-If Tool lets practitioners test performance in hypothetical situations, analyze the importance of different data features, and visualize model behavior across multiple models and subsets of input data. It also lets practitioners measure systems according to multiple ML fairness metrics. We describe the design of the tool, and report on real-life usage at different organizations.", "title": "The What-If Tool: Interactive Probing of Machine Learning Models", "normalizedTitle": "The What-If Tool: Interactive Probing of Machine Learning Models", "fno": "08807255", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Interactive Systems", "Learning Artificial Intelligence", "Public Domain Software", "Interactive Probing", "Machine Learning Models", "Open Source Application", "ML Systems", "Data Features", "Model Behavior Visualization", "What If Tool", "ML Fairness Metrics", "Tools", "Data Models", "Data Visualization", "Analytical Models", "Predictive Models", "Machine Learning", "Computational Modeling", "Interactive Machine Learning", "Model Debugging", "Model Comparison" ], "authors": [ { "givenName": "James", "surname": "Wexler", "fullName": "James Wexler", "affiliation": "Google Research", "__typename": "ArticleAuthorType" }, { "givenName": "Mahima", "surname": "Pushkarna", "fullName": "Mahima Pushkarna", "affiliation": "Google Research", "__typename": "ArticleAuthorType" }, { "givenName": "Tolga", "surname": "Bolukbasi", "fullName": "Tolga Bolukbasi", "affiliation": "Google Research", "__typename": "ArticleAuthorType" }, { "givenName": "Martin", "surname": "Wattenberg", "fullName": "Martin Wattenberg", "affiliation": "Google Research", "__typename": "ArticleAuthorType" }, { "givenName": "Fernanda", "surname": "Viégas", "fullName": "Fernanda Viégas", "affiliation": "Google Research", "__typename": "ArticleAuthorType" }, { "givenName": "Jimbo", "surname": "Wilson", "fullName": "Jimbo Wilson", "affiliation": "Google Research", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2020-01-01 00:00:00", "pubType": "trans", "pages": "56-65", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/apsec/2017/3681/0/3681a505", "title": "What-If Model Construction and Validation of Web Systems Based on Log Mining", "doi": null, "abstractUrl": "/proceedings-article/apsec/2017/3681a505/12OmNroijj0", "parentPublication": { "id": "proceedings/apsec/2017/3681/0", "title": "2017 24th Asia-Pacific Software Engineering Conference (APSEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2016/0252/0/07739669", "title": "Diagnostic visualization for non-expert machine learning practitioners: A design study", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2016/07739669/12OmNwKoZix", "parentPublication": { "id": "proceedings/vlhcc/2016/0252/0", "title": "2016 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2017/4722/0/4722a280", "title": "VRvisu: A Tool for Virtual Reality Based Visualization of Medical Data", "doi": null, "abstractUrl": "/proceedings-article/chase/2017/4722a280/12OmNyRg4D9", "parentPublication": { "id": "proceedings/chase/2017/4722/0", "title": "2017 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539404", "title": "Squares: Supporting Interactive Performance Analysis for Multiclass Classifiers", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539404/13rRUxAASVX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2021/5841/0/584100b574", "title": "Application Note: &#x03BC;Polar - An Interactive 2D Visualization Tool for Microscopic Time-Series Images", "doi": null, "abstractUrl": "/proceedings-article/csci/2021/584100b574/1EpLtUgQpuU", "parentPublication": { "id": "proceedings/csci/2021/5841/0", "title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a060", "title": "TimberTrek: Exploring and Curating Sparse Decision Trees with Interactive Visualization", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a060/1J6halInwS4", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/springsim/2020/370/0/09185463", "title": "The Mitre Maternal Mortality Interactive Dashboard (3mid): A Tool For Assessing The Effectiveness and Equity of Quality Improvement Toolkits on Maternal Care", "doi": null, "abstractUrl": "/proceedings-article/springsim/2020/09185463/1mP5ZJDpCHS", "parentPublication": { "id": "proceedings/springsim/2020/370/0", "title": "2020 Spring Simulation Conference (SpringSim)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222284", "title": "HypoML: Visual Analysis for Hypothesis-based Evaluation of Machine Learning Models", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222284/1nTqeahejo4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/deeptest/2021/4565/0/456500a025", "title": "TF-DM: Tool for Studying ML Model Resilience to Data Faults", "doi": null, "abstractUrl": "/proceedings-article/deeptest/2021/456500a025/1vcNFasA3MQ", "parentPublication": { "id": "proceedings/deeptest/2021/4565/0", "title": "2021 IEEE/ACM Third International Workshop on Deep Learning for Testing and Testing for Deep Learning (DeepTest)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iri/2021/3875/0/387500a240", "title": "A Methodology and Tool for the Predictive Analysis of Cost Growth in Construction Projects", "doi": null, "abstractUrl": "/proceedings-article/iri/2021/387500a240/1yBG9LvLSuc", "parentPublication": { "id": "proceedings/iri/2021/3875/0", "title": "2021 IEEE 22nd International Conference on Information Reuse and Integration for Data Science (IRI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08805461", "articleId": "1cG4ulCK5S8", "__typename": "AdjacentArticleType" }, "next": { "fno": "08805460", "articleId": "1cG4LZKJm4o", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1Fp5V6TYCEE", "title": "Sept.", "year": "2022", "issueNum": "09", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Sept.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1qmbp8bk4FO", "doi": "10.1109/TVCG.2020.3045560", "abstract": "Visual analytics enables the coupling of machine learning models and humans in a tightly integrated workflow, addressing various analysis tasks. Each task poses distinct demands to analysts and decision-makers. In this survey, we focus on one canonical technique for rule-based classification, namely decision tree classifiers. We provide an overview of available visualizations for decision trees with a focus on how visualizations differ with respect to 16 tasks. Further, we investigate the types of visual designs employed, and the quality measures presented. We find that (i) interactive visual analytics systems for classifier development offer a variety of visual designs, (ii) utilization tasks are sparsely covered, (iii) beyond classifier development, node-link diagrams are omnipresent, (iv) even systems designed for machine learning experts rarely feature visual representations of quality measures other than accuracy. In conclusion, we see a potential for integrating algorithmic techniques, mathematical quality measures, and tailored interactive visualizations to enable human experts to utilize their knowledge more effectively.", "abstracts": [ { "abstractType": "Regular", "content": "Visual analytics enables the coupling of machine learning models and humans in a tightly integrated workflow, addressing various analysis tasks. Each task poses distinct demands to analysts and decision-makers. In this survey, we focus on one canonical technique for rule-based classification, namely decision tree classifiers. We provide an overview of available visualizations for decision trees with a focus on how visualizations differ with respect to 16 tasks. Further, we investigate the types of visual designs employed, and the quality measures presented. We find that (i) interactive visual analytics systems for classifier development offer a variety of visual designs, (ii) utilization tasks are sparsely covered, (iii) beyond classifier development, node-link diagrams are omnipresent, (iv) even systems designed for machine learning experts rarely feature visual representations of quality measures other than accuracy. In conclusion, we see a potential for integrating algorithmic techniques, mathematical quality measures, and tailored interactive visualizations to enable human experts to utilize their knowledge more effectively.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual analytics enables the coupling of machine learning models and humans in a tightly integrated workflow, addressing various analysis tasks. Each task poses distinct demands to analysts and decision-makers. In this survey, we focus on one canonical technique for rule-based classification, namely decision tree classifiers. We provide an overview of available visualizations for decision trees with a focus on how visualizations differ with respect to 16 tasks. Further, we investigate the types of visual designs employed, and the quality measures presented. We find that (i) interactive visual analytics systems for classifier development offer a variety of visual designs, (ii) utilization tasks are sparsely covered, (iii) beyond classifier development, node-link diagrams are omnipresent, (iv) even systems designed for machine learning experts rarely feature visual representations of quality measures other than accuracy. In conclusion, we see a potential for integrating algorithmic techniques, mathematical quality measures, and tailored interactive visualizations to enable human experts to utilize their knowledge more effectively.", "title": "Task-Based Visual Interactive Modeling: Decision Trees and Rule-Based Classifiers", "normalizedTitle": "Task-Based Visual Interactive Modeling: Decision Trees and Rule-Based Classifiers", "fno": "09321557", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Decision Trees", "Interactive Systems", "Learning Artificial Intelligence", "Pattern Classification", "Decision Tree Classifiers", "Available Visualizations", "Decision Trees", "Visual Designs", "Interactive Visual Analytics Systems", "Classifier Development", "Utilization Tasks", "Machine Learning Experts", "Visual Representations", "Mathematical Quality Measures", "Interactive Visualizations", "Task Based Visual Interactive Modeling", "Rule Based Classifiers", "Machine Learning Models", "Tightly Integrated Workflow", "Analysis Tasks", "Distinct Demands", "Decision Makers", "Canonical Technique", "Rule Based Classification", "Decision Trees", "Task Analysis", "Visual Analytics", "Machine Learning", "Analytical Models", "Data Visualization", "Libraries", "Decision Trees", "Rule Based Classification", "Visual Analytics", "Interactive Machine Learning", "Interactive Model Analysis", "Survey", "Visualization" ], "authors": [ { "givenName": "Dirk", "surname": "Streeb", "fullName": "Dirk Streeb", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Yannick", "surname": "Metz", "fullName": "Yannick Metz", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Udo", "surname": "Schlegel", "fullName": "Udo Schlegel", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bruno", "surname": "Schneider", "fullName": "Bruno Schneider", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Mennatallah", "surname": "El-Assady", "fullName": "Mennatallah El-Assady", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Hansjörg", "surname": "Neth", "fullName": "Hansjörg Neth", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Min", "surname": "Chen", "fullName": "Min Chen", "affiliation": "University of Oxford, Oxford, U.K.", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel A.", "surname": "Keim", "fullName": "Daniel A. Keim", "affiliation": "Universität Konstanz, Konstanz, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "09", "pubDate": "2022-09-01 00:00:00", "pubType": "trans", "pages": "3307-3323", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/edocw/2016/9933/0/07584388", "title": "Social Set Visualizer (SoSeVi) II: Interactive Computational Set Analysis of Big Social Data", "doi": null, "abstractUrl": "/proceedings-article/edocw/2016/07584388/12OmNAqkSGE", "parentPublication": { "id": "proceedings/edocw/2016/9933/0", "title": "2016 IEEE 20th International Enterprise Distributed Object Computing Workshop (EDOCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vizsec/2017/2693/0/08062199", "title": "The goods, the bads and the uglies: Supporting decisions in malware detection through visual analytics", "doi": null, "abstractUrl": "/proceedings-article/vizsec/2017/08062199/12OmNCdk2G7", "parentPublication": { "id": "proceedings/vizsec/2017/2693/0", "title": "2017 IEEE Symposium on Visualization for Cyber Security (VizSec)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2014/6227/0/07042482", "title": "An insight- and task-based methodology for evaluating spatiotemporal visual analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2014/07042482/12OmNwp74wP", "parentPublication": { "id": "proceedings/vast/2014/6227/0", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2016/5670/0/5670b426", "title": "Introduction to the Minitrack on Interactive Visual Decision Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2016/5670b426/12OmNzWfoUn", "parentPublication": { "id": "proceedings/hicss/2016/5670/0", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2013/4796/0/06781845", "title": "KnowYourColors: Visual dashboards for blood metrics and healthcare analytics", "doi": null, "abstractUrl": "/proceedings-article/isspit/2013/06781845/12OmNzlly1J", "parentPublication": { "id": "proceedings/isspit/2013/4796/0", "title": "2013 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875941", "title": "Run Watchers: Automatic Simulation-Based Decision Support in Flood Management", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875941/13rRUxjQybU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a055", "title": "Visualizing Rule-based Classifiers for Clinical Risk Prognosis", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a055/1J6h7XhAuMo", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a060", "title": "TimberTrek: Exploring and Curating Sparse Decision Trees with Interactive Visualization", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a060/1J6halInwS4", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trex/2022/9356/0/935600a016", "title": "Kicking Analysts Out of the Meeting Room: Supporting Future Data-driven Decision Making with Intelligent Interactive Visualization Systems", "doi": null, "abstractUrl": "/proceedings-article/trex/2022/935600a016/1J9BlQvVmdq", "parentPublication": { "id": "proceedings/trex/2022/9356/0", "title": "2022 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09492011", "title": "A Survey of Perception-Based Visualization Studies by Task", "doi": null, "abstractUrl": "/journal/tg/2022/12/09492011/1volPuHGMdW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09795241", "articleId": "1Eb192ypuzS", "__typename": "AdjacentArticleType" }, "next": null, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1Fp6h8XY5MY", "name": "ttg202209-09321557s1-supp1-3045560.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202209-09321557s1-supp1-3045560.pdf", "extension": "pdf", "size": "241 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNwpGgK8", "title": "Dec.", "year": "2014", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "20", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUILLkvt", "doi": "10.1109/TVCG.2014.2346481", "abstract": "Visual analytics enables us to analyze huge information spaces in order to support complex decision making and data exploration. Humans play a central role in generating knowledge from the snippets of evidence emerging from visual data analysis. Although prior research provides frameworks that generalize this process, their scope is often narrowly focused so they do not encompass different perspectives at different levels. This paper proposes a knowledge generation model for visual analytics that ties together these diverse frameworks, yet retains previously developed models (e.g., KDD process) to describe individual segments of the overall visual analytic processes. To test its utility, a real world visual analytics system is compared against the model, demonstrating that the knowledge generation process model provides a useful guideline when developing and evaluating such systems. The model is used to effectively compare different data analysis systems. Furthermore, the model provides a common language and description of visual analytic processes, which can be used for communication between researchers. At the end, our model reflects areas of research that future researchers can embark on.", "abstracts": [ { "abstractType": "Regular", "content": "Visual analytics enables us to analyze huge information spaces in order to support complex decision making and data exploration. Humans play a central role in generating knowledge from the snippets of evidence emerging from visual data analysis. Although prior research provides frameworks that generalize this process, their scope is often narrowly focused so they do not encompass different perspectives at different levels. This paper proposes a knowledge generation model for visual analytics that ties together these diverse frameworks, yet retains previously developed models (e.g., KDD process) to describe individual segments of the overall visual analytic processes. To test its utility, a real world visual analytics system is compared against the model, demonstrating that the knowledge generation process model provides a useful guideline when developing and evaluating such systems. The model is used to effectively compare different data analysis systems. Furthermore, the model provides a common language and description of visual analytic processes, which can be used for communication between researchers. At the end, our model reflects areas of research that future researchers can embark on.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual analytics enables us to analyze huge information spaces in order to support complex decision making and data exploration. Humans play a central role in generating knowledge from the snippets of evidence emerging from visual data analysis. Although prior research provides frameworks that generalize this process, their scope is often narrowly focused so they do not encompass different perspectives at different levels. This paper proposes a knowledge generation model for visual analytics that ties together these diverse frameworks, yet retains previously developed models (e.g., KDD process) to describe individual segments of the overall visual analytic processes. To test its utility, a real world visual analytics system is compared against the model, demonstrating that the knowledge generation process model provides a useful guideline when developing and evaluating such systems. The model is used to effectively compare different data analysis systems. Furthermore, the model provides a common language and description of visual analytic processes, which can be used for communication between researchers. At the end, our model reflects areas of research that future researchers can embark on.", "title": "Knowledge Generation Model for Visual Analytics", "normalizedTitle": "Knowledge Generation Model for Visual Analytics", "fno": "06875967", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Visual Analytics", "Analytical Models", "Data Models", "Computational Modeling", "Interaction", "Visual Analytics", "Knowledge Generation", "Reasoning", "Visualization Taxonomies And Models" ], "authors": [ { "givenName": "Dominik", "surname": "Sacha", "fullName": "Dominik Sacha", "affiliation": "Data Analysis and Visualization Group, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Andreas", "surname": "Stoffel", "fullName": "Andreas Stoffel", "affiliation": "Data Analysis and Visualization Group, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Florian", "surname": "Stoffel", "fullName": "Florian Stoffel", "affiliation": "Data Analysis and Visualization Group, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Bum Chul", "surname": "Kwon", "fullName": "Bum Chul Kwon", "affiliation": "Data Analysis and Visualization Group, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Geoffrey", "surname": "Ellis", "fullName": "Geoffrey Ellis", "affiliation": "Data Analysis and Visualization Group, University of Konstanz", "__typename": "ArticleAuthorType" }, { "givenName": "Daniel A.", "surname": "Keim", "fullName": "Daniel A. Keim", "affiliation": "Data Analysis and Visualization Group, University of Konstanz", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2014-12-01 00:00:00", "pubType": "trans", "pages": "1604-1613", "year": "2014", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ldav/2014/5215/0/07013208", "title": "Visual analytics of large-scale climate model data", "doi": null, "abstractUrl": "/proceedings-article/ldav/2014/07013208/12OmNA14Aga", "parentPublication": { "id": "proceedings/ldav/2014/5215/0", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2011/9618/0/05718616", "title": "Pair Analytics: Capturing Reasoning Processes in Collaborative Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2011/05718616/12OmNvAiShB", "parentPublication": { "id": "proceedings/hicss/2011/9618/0", "title": "2011 44th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2016/5670/0/5670b426", "title": "Introduction to the Minitrack on Interactive Visual Decision Analytics", "doi": null, "abstractUrl": "/proceedings-article/hicss/2016/5670b426/12OmNzWfoUn", "parentPublication": { "id": "proceedings/hicss/2016/5670/0", "title": "2016 49th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/04/mcg2014040008", "title": "Semantic Interaction for Visual Analytics: Toward Coupling Cognition and Computation", "doi": null, "abstractUrl": "/magazine/cg/2014/04/mcg2014040008/13rRUwwslv3", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2009/02/mcg2009020084", "title": "Demystifying Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2009/02/mcg2009020084/13rRUy3gn3z", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06876049", "title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics", "doi": null, "abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585665", "title": "The Anchoring Effect in Decision-Making with Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585665/17D45WZZ7CL", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2017/3163/0/08585498", "title": "The Role of Explicit Knowledge: A Conceptual Model of Knowledge-Assisted Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2017/08585498/17D45XvMccM", "parentPublication": { "id": "proceedings/vast/2017/3163/0", "title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fie/2018/1174/0/08659299", "title": "Evaluation of Cyber Defense Exercises Using Visual Analytics Process", "doi": null, "abstractUrl": "/proceedings-article/fie/2018/08659299/18j9ko6L8uQ", "parentPublication": { "id": "proceedings/fie/2018/1174/0", "title": "2018 IEEE Frontiers in Education Conference (FIE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/08/09018081", "title": "Conceptual Model of Visual Analytics for Hands-on Cybersecurity Training", "doi": null, "abstractUrl": "/journal/tg/2021/08/09018081/1hN4BNhncqc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "06935102", "articleId": "13rRUy3xY8b", "__typename": "AdjacentArticleType" }, "next": { "fno": "06876047", "articleId": "13rRUxASuAw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvGPE8n", "title": "Jan.", "year": "2016", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "22", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwghd9b", "doi": "10.1109/TVCG.2015.2467411", "abstract": "An eddy is a feature associated with a rotating body of fluid, surrounded by a ring of shearing fluid. In the ocean, eddies are 10 to 150 km in diameter, are spawned by boundary currents and baroclinic instabilities, may live for hundreds of days, and travel for hundreds of kilometers. Eddies are important in climate studies because they transport heat, salt, and nutrients through the world's oceans and are vessels of biological productivity. The study of eddies in global ocean-climate models requires large-scale, high-resolution simulations. This poses a problem for feasible (timely) eddy analysis, as ocean simulations generate massive amounts of data, causing a bottleneck for traditional analysis workflows. To enable eddy studies, we have developed an in situ workflow for the quantitative and qualitative analysis of MPAS-Ocean, a high-resolution ocean climate model, in collaboration with the ocean model research and development process. Planned eddy analysis at high spatial and temporal resolutions will not be possible with a postprocessing workflow due to various constraints, such as storage size and I/O time, but the in situ workflow enables it and scales well to ten-thousand processing elements.", "abstracts": [ { "abstractType": "Regular", "content": "An eddy is a feature associated with a rotating body of fluid, surrounded by a ring of shearing fluid. In the ocean, eddies are 10 to 150 km in diameter, are spawned by boundary currents and baroclinic instabilities, may live for hundreds of days, and travel for hundreds of kilometers. Eddies are important in climate studies because they transport heat, salt, and nutrients through the world's oceans and are vessels of biological productivity. The study of eddies in global ocean-climate models requires large-scale, high-resolution simulations. This poses a problem for feasible (timely) eddy analysis, as ocean simulations generate massive amounts of data, causing a bottleneck for traditional analysis workflows. To enable eddy studies, we have developed an in situ workflow for the quantitative and qualitative analysis of MPAS-Ocean, a high-resolution ocean climate model, in collaboration with the ocean model research and development process. Planned eddy analysis at high spatial and temporal resolutions will not be possible with a postprocessing workflow due to various constraints, such as storage size and I/O time, but the in situ workflow enables it and scales well to ten-thousand processing elements.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An eddy is a feature associated with a rotating body of fluid, surrounded by a ring of shearing fluid. In the ocean, eddies are 10 to 150 km in diameter, are spawned by boundary currents and baroclinic instabilities, may live for hundreds of days, and travel for hundreds of kilometers. Eddies are important in climate studies because they transport heat, salt, and nutrients through the world's oceans and are vessels of biological productivity. The study of eddies in global ocean-climate models requires large-scale, high-resolution simulations. This poses a problem for feasible (timely) eddy analysis, as ocean simulations generate massive amounts of data, causing a bottleneck for traditional analysis workflows. To enable eddy studies, we have developed an in situ workflow for the quantitative and qualitative analysis of MPAS-Ocean, a high-resolution ocean climate model, in collaboration with the ocean model research and development process. Planned eddy analysis at high spatial and temporal resolutions will not be possible with a postprocessing workflow due to various constraints, such as storage size and I/O time, but the in situ workflow enables it and scales well to ten-thousand processing elements.", "title": "In Situ Eddy Analysis in a High-Resolution Ocean Climate Model", "normalizedTitle": "In Situ Eddy Analysis in a High-Resolution Ocean Climate Model", "fno": "07192723", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Climatology", "Oceanography", "Rotational Flow", "In Situ Eddy Analysis", "High Resolution Ocean Climate Model", "Fluid Rotation", "Shearing Fluid Ring", "Boundary Current", "Baroclinic Instability", "Climate Study", "Heat Transport", "Salt Transport", "Nutrient Transport", "Biological Productivity", "Global Ocean Climate Model", "High Resolution Simulation", "Ocean Simulation", "MPAS Ocean Analysis", "Model For Prediction Across Scales Ocean", "Ocean Model Research", "High Spatiotemporal Resolution", "Oceans", "Analytical Models", "Computational Modeling", "Meteorology", "Computer Architecture", "Atmospheric Modeling", "Microprocessors", "In Situ Analysis", "Online Analysis", "Mesoscale Eddies", "Ocean Modeling", "Climate Modeling", "Simulation", "Feature Extraction", "Feature Analysis", "High Performance Computing", "Supercomputing", "Software Engineering", "Collaborative Development", "Revision Control", "In Situ Analysis", "Online Analysis", "Mesoscale Eddies", "Ocean Modeling", "Climate Modeling", "Simulation", "Feature Extraction", "Feature Analysis", "High Performance Computing", "Supercomputing", "Software Engineering", "Collaborative Development", "Revision Control" ], "authors": [ { "givenName": "Jonathan", "surname": "Woodring", "fullName": "Jonathan Woodring", "affiliation": ", Los Alamos National Laboratory, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Mark", "surname": "Petersen", "fullName": "Mark Petersen", "affiliation": ", Los Alamos National Laboratory, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Andre", "surname": "Schmeiβer", "fullName": "Andre Schmeiβer", "affiliation": ", Computer Graphics and HCI Group, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "John", "surname": "Patchett", "fullName": "John Patchett", "affiliation": ", Los Alamos National Laboratory, USA", "__typename": "ArticleAuthorType" }, { "givenName": "James", "surname": "Ahrens", "fullName": "James Ahrens", "affiliation": ", Los Alamos National Laboratory, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Hans", "surname": "Hagen", "fullName": "Hans Hagen", "affiliation": ", Computer Graphics and HCI Group, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2016-01-01 00:00:00", "pubType": "trans", "pages": "857-866", "year": "2016", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2013/5108/0/5108a151", "title": "A Parameter-Free Spatio-Temporal Pattern Mining Model to Catalog Global Ocean Dynamics", "doi": null, "abstractUrl": "/proceedings-article/icdm/2013/5108a151/12OmNB9KHts", "parentPublication": { "id": "proceedings/icdm/2013/5108/0", "title": "2013 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/2015/3723/0/2807596", "title": "Improving the scalability of the ocean barotropic solver in the community earth system model", "doi": null, "abstractUrl": "/proceedings-article/sc/2015/2807596/12OmNBbsihQ", "parentPublication": { "id": "proceedings/sc/2015/3723/0", "title": "SC15: International Conference for High-Performance Computing, Networking, Storage and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ettandgrs/2008/3563/1/3563a373", "title": "A Dissymmetrical Ocean Mesoscale Eddy Model for Remote Sensing Data Assimilation", "doi": null, "abstractUrl": "/proceedings-article/ettandgrs/2008/3563a373/12OmNBsLPcJ", "parentPublication": { "id": "proceedings/ettandgrs/2008/3563/1", "title": "Education Technology and Training &amp; Geoscience and Remote Sensing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sc/1988/8923/0/00074148", "title": "Breakthroughs in ocean and climate modeling made possible by supercomputers of today and tomorrow", "doi": null, "abstractUrl": "/proceedings-article/sc/1988/00074148/12OmNqzu6Pu", "parentPublication": { "id": "proceedings/sc/1988/8923/0", "title": "Proceedings of the 1988 ACM/IEEE conference on Supercomputing vol. 2", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2022/03/09787979", "title": "Narrative <italic>In Situ</italic> Visual Analysis for Large-Scale Ocean Eddy Evolution", "doi": null, "abstractUrl": "/magazine/cg/2022/03/09787979/1DUa6nnWNsQ", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600g384", "title": "Computer Vision for Ocean Eddy Detection in Infrared Imagery", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600g384/1KxVBWTVXZC", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icftic/2022/2195/0/10075163", "title": "Research on ocean eddy detection method based on SAR images", "doi": null, "abstractUrl": "/proceedings-article/icftic/2022/10075163/1LRl4GJnm9y", "parentPublication": { "id": "proceedings/icftic/2022/2195/0", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scivis/2018/6882/0/08823600", "title": "A Lagrangian Method for Extracting Eddy Boundaries in the Red Sea and the Gulf of Aden", "doi": null, "abstractUrl": "/proceedings-article/scivis/2018/08823600/1d5kxJo29nW", "parentPublication": { "id": "proceedings/scivis/2018/6882/0", "title": "2018 IEEE Scientific Visualization Conference (SciVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/03/09384262", "title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations", "doi": null, "abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2020/8666/0/866600a127", "title": "Mesoscale Eddy Detection Based on the Deep Learning Method", "doi": null, "abstractUrl": "/proceedings-article/icicta/2020/866600a127/1wRIyoFRJN6", "parentPublication": { "id": "proceedings/icicta/2020/8666/0", "title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07192672", "articleId": "13rRUxASuhB", "__typename": "AdjacentArticleType" }, "next": { "fno": "07192663", "articleId": "13rRUxASupB", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNrMZprc", "title": "March", "year": "2019", "issueNum": "03", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "March", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45WWzW55", "doi": "10.1109/TVCG.2018.2810919", "abstract": "Ensemble simulations are used in climate research to account for natural variability. For medium-term decadal predictions, each simulation run is initialized with real observations from a different day resulting in a set of possible climatic futures. Understanding the variability and the predictive power in this wealth of data is still a challenging task. In this paper, we introduce a visual analytics system to explore variability within ensembles of decadal climate predictions. We propose a new interactive visualization technique (clustering timeline) based on the Sankey diagram, which conveys a concise summary of data similarity and its changes over time. We augment the system with two additional visualizations, filled contour maps and heatmaps, to provide analysts with additional information relating the new diagram to raw data and automatic clustering results. The usefulness of the technique is demonstrated by case studies and user interviews.", "abstracts": [ { "abstractType": "Regular", "content": "Ensemble simulations are used in climate research to account for natural variability. For medium-term decadal predictions, each simulation run is initialized with real observations from a different day resulting in a set of possible climatic futures. Understanding the variability and the predictive power in this wealth of data is still a challenging task. In this paper, we introduce a visual analytics system to explore variability within ensembles of decadal climate predictions. We propose a new interactive visualization technique (clustering timeline) based on the Sankey diagram, which conveys a concise summary of data similarity and its changes over time. We augment the system with two additional visualizations, filled contour maps and heatmaps, to provide analysts with additional information relating the new diagram to raw data and automatic clustering results. The usefulness of the technique is demonstrated by case studies and user interviews.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Ensemble simulations are used in climate research to account for natural variability. For medium-term decadal predictions, each simulation run is initialized with real observations from a different day resulting in a set of possible climatic futures. Understanding the variability and the predictive power in this wealth of data is still a challenging task. In this paper, we introduce a visual analytics system to explore variability within ensembles of decadal climate predictions. We propose a new interactive visualization technique (clustering timeline) based on the Sankey diagram, which conveys a concise summary of data similarity and its changes over time. We augment the system with two additional visualizations, filled contour maps and heatmaps, to provide analysts with additional information relating the new diagram to raw data and automatic clustering results. The usefulness of the technique is demonstrated by case studies and user interviews.", "title": "Exploring Variability within Ensembles of Decadal Climate Predictions", "normalizedTitle": "Exploring Variability within Ensembles of Decadal Climate Predictions", "fno": "08305502", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualization", "Meteorology", "Atmospheric Modeling", "Data Models", "Computational Modeling", "Analytical Models", "Predictive Models", "Clustering", "Ensemble Simulations", "Climate Research", "Visual Analysis" ], "authors": [ { "givenName": "Christopher P.", "surname": "Kappe", "fullName": "Christopher P. Kappe", "affiliation": "Department of Computer Science, TU Kaiserslautern, Kaiserslautern, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Böttinger", "fullName": "Michael Böttinger", "affiliation": "Deutsches Klimarechenzentrum GmbH, Hamburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Heike", "surname": "Leitte", "fullName": "Heike Leitte", "affiliation": "Department of Computer Science, TU Kaiserslautern, Kaiserslautern, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2019-03-01 00:00:00", "pubType": "trans", "pages": "1499-1512", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/e-science/2015/9325/0/9325a108", "title": "From HPC Performance to Climate Modeling: Transforming Methods for HPC Predictions into Models of Extreme Climate Conditions", "doi": null, "abstractUrl": "/proceedings-article/e-science/2015/9325a108/12OmNB06l60", "parentPublication": { "id": "proceedings/e-science/2015/9325/0", "title": "2015 IEEE 11th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2016/9005/0/07841098", "title": "HPC infrastructure to support the next-generation ARM facility data operations", "doi": null, "abstractUrl": "/proceedings-article/big-data/2016/07841098/12OmNvk7JMZ", "parentPublication": { "id": "proceedings/big-data/2016/9005/0", "title": "2016 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2011/06/mso2011060043", "title": "Managing Software Complexity and Variability in Coupled Climate Models", "doi": null, "abstractUrl": "/magazine/so/2011/06/mso2011060043/13rRUwghd7n", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060027", "title": "Identifying Physical Interactions from Climate Data: Challenges and Opportunities", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060027/13rRUxjyWZB", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/05/mcs2015050049", "title": "Putting Regional Climate Prediction in Reach", "doi": null, "abstractUrl": "/magazine/cs/2015/05/mcs2015050049/13rRUy0ZzWh", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060043", "title": "Can Topic Modeling Shed Light on Climate Extremes?", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060043/13rRUyYBlcf", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060009", "title": "Climate Computing: The State of Play", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060009/13rRUyZaxu4", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2018/9288/0/928800a758", "title": "Extreme Values from Spatiotemporal Chaos: Precipitation Extremes and Climate Variability", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2018/928800a758/18jXFBH7KI8", "parentPublication": { "id": "proceedings/icdmw/2018/9288/0", "title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/escience/2019/2451/0/245100a498", "title": "ESiWACE: On European Infrastructure Efforts for Weather and Climate Modeling at Exascale", "doi": null, "abstractUrl": "/proceedings-article/escience/2019/245100a498/1ike1oTsoZa", "parentPublication": { "id": "proceedings/escience/2019/2451/0", "title": "2019 15th International Conference on eScience (eScience)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/06/09573307", "title": "On Preserving Scientific Integrity for Climate Model Data in the HPC Era", "doi": null, "abstractUrl": "/magazine/cs/2021/06/09573307/1xH5FqO6mtO", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08314702", "articleId": "17D45VUZMUW", "__typename": "AdjacentArticleType" }, "next": { "fno": "08283576", "articleId": "17D45XcttjZ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1qnQQML0BmE", "title": "Jan.-Feb.", "year": "2021", "issueNum": "01", "idPrefix": "cg", "pubType": "magazine", "volume": "41", "label": "Jan.-Feb.", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1qnQT22F5zq", "doi": "10.1109/MCG.2020.3043987", "abstract": "Climate simulations belong to the most data-intensive scientific disciplines and are—in relation to one of humankind's largest challenges, i.e., facing anthropogenic climate change—ever more important. Not only are the outputs generated by current models increasing in size, due to an increase in resolution and the use of ensembles, but the complexity is also rising as a result of maturing models that are able to better describe the intricacies of our climate system. This article focuses on developments and trends in the scientific workflow for the analysis and visualization of climate simulation data, as well as on changes in the visualization techniques and tools that are available.", "abstracts": [ { "abstractType": "Regular", "content": "Climate simulations belong to the most data-intensive scientific disciplines and are—in relation to one of humankind's largest challenges, i.e., facing anthropogenic climate change—ever more important. Not only are the outputs generated by current models increasing in size, due to an increase in resolution and the use of ensembles, but the complexity is also rising as a result of maturing models that are able to better describe the intricacies of our climate system. This article focuses on developments and trends in the scientific workflow for the analysis and visualization of climate simulation data, as well as on changes in the visualization techniques and tools that are available.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Climate simulations belong to the most data-intensive scientific disciplines and are—in relation to one of humankind's largest challenges, i.e., facing anthropogenic climate change—ever more important. Not only are the outputs generated by current models increasing in size, due to an increase in resolution and the use of ensembles, but the complexity is also rising as a result of maturing models that are able to better describe the intricacies of our climate system. This article focuses on developments and trends in the scientific workflow for the analysis and visualization of climate simulation data, as well as on changes in the visualization techniques and tools that are available.", "title": "Visualization of Climate Science Simulation Data", "normalizedTitle": "Visualization of Climate Science Simulation Data", "fno": "09325132", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Climatology", "Data Visualisation", "Geophysics Computing", "Visualization Techniques", "Climate Science Simulation Data", "Climate Simulations", "Data Intensive Scientific Disciplines", "Anthropogenic Climate Change", "Scientific Workflow", "Climate Simulation Data", "Computational Modeling", "Data Visualization", "Climate Change", "Market Research", "Data Models", "Complexity Theory", "Meteorology", "Simulation" ], "authors": [ { "givenName": "Niklas", "surname": "Röber", "fullName": "Niklas Röber", "affiliation": "German Climate Computing Center, Hamburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Michael", "surname": "Böttinger", "fullName": "Michael Böttinger", "affiliation": "German Climate Computing Center, Hamburg, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Bjorn", "surname": "Stevens", "fullName": "Bjorn Stevens", "affiliation": "Max Planck Institute for Meteorology, Hamburg, Germany", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "01", "pubDate": "2021-01-01 00:00:00", "pubType": "mags", "pages": "42-48", "year": "2021", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdcs/2017/1792/0/1792b639", "title": "A Multi-agent Parallel Approach to Analyzing Large Climate Data Sets", "doi": null, "abstractUrl": "/proceedings-article/icdcs/2017/1792b639/12OmNAYGlxi", "parentPublication": { "id": "proceedings/icdcs/2017/1792/0", "title": "2017 IEEE 37th International Conference on Distributed Computing Systems (ICDCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571316", "title": "Evaluating Climate Visualization: An Information Visualization Approach", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571316/12OmNwbukeD", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2011/0868/0/06004058", "title": "Information Visualization in Climate Research", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004058/12OmNyO8tVC", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2014/11/mco2014110074", "title": "Theory-Guided Data Science for Climate Change", "doi": null, "abstractUrl": "/magazine/co/2014/11/mco2014110074/13rRUxC0Srh", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/05/mcs2013050032", "title": "Climate Informatics: Accelerating Discovering in Climate Science with Machine Learning", "doi": null, "abstractUrl": "/magazine/cs/2013/05/mcs2013050032/13rRUy2YLOR", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600a296", "title": "Climate Data Analytics Applied to Sugar Cane Crop in the French West Indies", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600a296/1gAx0WpNpm0", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/01/09281098", "title": "Dynamic 3-D Visualization of Climate Model Development and Results", "doi": null, "abstractUrl": "/magazine/cg/2021/01/09281098/1phO0N1Fhte", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/01/09325141", "title": "Visualization of Climate Change", "doi": null, "abstractUrl": "/magazine/cg/2021/01/09325141/1qnQSeB3gME", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2021/03/09384262", "title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations", "doi": null, "abstractUrl": "/magazine/cs/2021/03/09384262/1scDqsJ2diM", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2020/1056/0/09381419", "title": "Affective Polarization in Online Climate Change Discourse on Twitter", "doi": null, "abstractUrl": "/proceedings-article/asonam/2020/09381419/1semx89mBhK", "parentPublication": { "id": "proceedings/asonam/2020/1056/0", "title": "2020 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09325139", "articleId": "1qnQTGtVVvi", "__typename": "AdjacentArticleType" }, "next": { "fno": "09325131", "articleId": "1qnR80v9yX6", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1uvzW506lLq", "title": "May-June", "year": "2021", "issueNum": "03", "idPrefix": "cs", "pubType": "magazine", "volume": "23", "label": "May-June", "downloadables": { "hasCover": true, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1scDqsJ2diM", "doi": "10.1109/MCSE.2021.3068244", "abstract": "Future climate projections made with high-resolution climate models carry the promise of bridging the gap between weather and climate scales, but the scientific and technical hurdles remain daunting. New simulations carried out on Frontera by a team of climate scientists and software engineers are yielding novel insights into how anthropogenic climate change will impact weather extremes at regional scales over the course of the twenty-first century.", "abstracts": [ { "abstractType": "Regular", "content": "Future climate projections made with high-resolution climate models carry the promise of bridging the gap between weather and climate scales, but the scientific and technical hurdles remain daunting. New simulations carried out on Frontera by a team of climate scientists and software engineers are yielding novel insights into how anthropogenic climate change will impact weather extremes at regional scales over the course of the twenty-first century.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Future climate projections made with high-resolution climate models carry the promise of bridging the gap between weather and climate scales, but the scientific and technical hurdles remain daunting. New simulations carried out on Frontera by a team of climate scientists and software engineers are yielding novel insights into how anthropogenic climate change will impact weather extremes at regional scales over the course of the twenty-first century.", "title": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations", "normalizedTitle": "Bringing the Future Into Focus: Benefits and Challenges of High-Resolution Global Climate Change Simulations", "fno": "09384262", "hasPdf": true, "idPrefix": "cs", "keywords": [ "Atmospheric Temperature", "Climatology", "High Resolution Global Climate Change Simulations", "Future Climate Projections", "High Resolution Climate Models", "Weather", "Climate Scales", "Scientific Hurdles", "Technical Hurdles", "Climate Scientists", "Anthropogenic Climate Change", "Atmospheric Modeling", "Meteorology", "Computational Modeling", "Oceans", "Mathematical Model", "Climate Change", "Earth" ], "authors": [ { "givenName": "Stephen G.", "surname": "Yeager", "fullName": "Stephen G. Yeager", "affiliation": "National Center for Atmospheric Research (NCAR), Boulder, CO, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Ping", "surname": "Chang", "fullName": "Ping Chang", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Gokhan", "surname": "Danabasoglu", "fullName": "Gokhan Danabasoglu", "affiliation": "National Center for Atmospheric Research (NCAR), Boulder, CO, USA", "__typename": "ArticleAuthorType" }, { "givenName": "James", "surname": "Edwards", "fullName": "James Edwards", "affiliation": "National Center for Atmospheric Research (NCAR), Boulder, CO, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Nan", "surname": "Rosenbloom", "fullName": "Nan Rosenbloom", "affiliation": "National Center for Atmospheric Research (NCAR), Boulder, CO, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Qiuying", "surname": "Zhang", "fullName": "Qiuying Zhang", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Dan", "surname": "Fu", "fullName": "Dan Fu", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Xue", "surname": "Liu", "fullName": "Xue Liu", "affiliation": "Texas A&M University, College Station, TX, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Fred", "surname": "Castruccio", "fullName": "Fred Castruccio", "affiliation": "National Center for Atmospheric Research (NCAR), Boulder, CO, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "03", "pubDate": "2021-05-01 00:00:00", "pubType": "mags", "pages": "34-41", "year": "2021", "issn": "1521-9615", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ccgrid/2010/4039/0/4039a797", "title": "Accelerating Climate and Weather Simulations Through Hybrid Computing", "doi": null, "abstractUrl": "/proceedings-article/ccgrid/2010/4039a797/12OmNAS9zKI", "parentPublication": { "id": "proceedings/ccgrid/2010/4039/0", "title": "Cluster Computing and the Grid, IEEE International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/arith/2017/1965/0/1965a122", "title": "Large Scale Numerical Simulations of the Climate", "doi": null, "abstractUrl": "/proceedings-article/arith/2017/1965a122/12OmNAkWvoH", "parentPublication": { "id": "proceedings/arith/2017/1965/0", "title": "2017 IEEE 24th Symposium on Computer Arithmetic (ARITH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2011/06/mso2011060032", "title": "Guest Editors' Introduction: Climate Change - Science and Software", "doi": null, "abstractUrl": "/magazine/so/2011/06/mso2011060032/13rRUwgQpBi", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2014/11/mco2014110074", "title": "Theory-Guided Data Science for Climate Change", "doi": null, "abstractUrl": "/magazine/co/2014/11/mco2014110074/13rRUxC0Srh", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/05/mcs2013050032", "title": "Climate Informatics: Accelerating Discovering in Climate Science with Machine Learning", "doi": null, "abstractUrl": "/magazine/cs/2013/05/mcs2013050032/13rRUy2YLOR", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2015/06/mcs2015060009", "title": "Climate Computing: The State of Play", "doi": null, "abstractUrl": "/magazine/cs/2015/06/mcs2015060009/13rRUyZaxu4", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/escience/2019/2451/0/245100a586", "title": "Ease Access to Climate Simulations for Researchers: IS-ENES Climate4Impact", "doi": null, "abstractUrl": "/proceedings-article/escience/2019/245100a586/1ike4skqW0E", "parentPublication": { "id": "proceedings/escience/2019/2451/0", "title": "2019 15th International Conference on eScience (eScience)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/01/09325141", "title": "Visualization of Climate Change", "doi": null, "abstractUrl": "/magazine/cg/2021/01/09325141/1qnQSeB3gME", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2021/01/09325132", "title": "Visualization of Climate Science Simulation Data", "doi": null, "abstractUrl": "/magazine/cg/2021/01/09325132/1qnQT22F5zq", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2020/1056/0/09381419", "title": "Affective Polarization in Online Climate Change Discourse on Twitter", "doi": null, "abstractUrl": "/proceedings-article/asonam/2020/09381419/1semx89mBhK", "parentPublication": { "id": "proceedings/asonam/2020/1056/0", "title": "2020 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09388894", "articleId": "1smZR2zRFyo", "__typename": "AdjacentArticleType" }, "next": { "fno": "09380938", "articleId": "1s2G9ZgJ07m", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBCZnUr", "title": "July", "year": "2019", "issueNum": "07", "idPrefix": "tp", "pubType": "journal", "volume": "41", "label": "July", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwdIOTs", "doi": "10.1109/TPAMI.2018.2843329", "abstract": "Feedback is a fundamental mechanism existing in the human visual system, but has not been explored deeply in designing computer vision algorithms. In this paper, we claim that feedback plays a critical role in understanding convolutional neural networks (CNNs), e.g., how a neuron in CNNs describes an object's pattern, and how a collection of neurons form comprehensive perception to an object. To model the feedback in CNNs, we propose a novel model named Feedback CNN and develop two new processing algorithms, i.e., neural pathway pruning and pattern recovering. We mathematically prove that the proposed method can reach local optimum. Note that Feedback CNN belongs to weakly supervised methods and can be trained only using category-level labels. But it possesses a powerful capability to accurately localize and segment category-specific objects. We conduct extensive visualization analysis, and the results reveal the close relationship between neurons and object parts in Feedback CNN. Finally, we evaluate the proposed Feedback CNN over the tasks of weakly supervised object localization and segmentation, and the experimental results on ImageNet and Pascal VOC show that our method remarkably outperforms the state-of-the-art ones.", "abstracts": [ { "abstractType": "Regular", "content": "Feedback is a fundamental mechanism existing in the human visual system, but has not been explored deeply in designing computer vision algorithms. In this paper, we claim that feedback plays a critical role in understanding convolutional neural networks (CNNs), e.g., how a neuron in CNNs describes an object's pattern, and how a collection of neurons form comprehensive perception to an object. To model the feedback in CNNs, we propose a novel model named Feedback CNN and develop two new processing algorithms, i.e., neural pathway pruning and pattern recovering. We mathematically prove that the proposed method can reach local optimum. Note that Feedback CNN belongs to weakly supervised methods and can be trained only using category-level labels. But it possesses a powerful capability to accurately localize and segment category-specific objects. We conduct extensive visualization analysis, and the results reveal the close relationship between neurons and object parts in Feedback CNN. Finally, we evaluate the proposed Feedback CNN over the tasks of weakly supervised object localization and segmentation, and the experimental results on ImageNet and Pascal VOC show that our method remarkably outperforms the state-of-the-art ones.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Feedback is a fundamental mechanism existing in the human visual system, but has not been explored deeply in designing computer vision algorithms. In this paper, we claim that feedback plays a critical role in understanding convolutional neural networks (CNNs), e.g., how a neuron in CNNs describes an object's pattern, and how a collection of neurons form comprehensive perception to an object. To model the feedback in CNNs, we propose a novel model named Feedback CNN and develop two new processing algorithms, i.e., neural pathway pruning and pattern recovering. We mathematically prove that the proposed method can reach local optimum. Note that Feedback CNN belongs to weakly supervised methods and can be trained only using category-level labels. But it possesses a powerful capability to accurately localize and segment category-specific objects. We conduct extensive visualization analysis, and the results reveal the close relationship between neurons and object parts in Feedback CNN. Finally, we evaluate the proposed Feedback CNN over the tasks of weakly supervised object localization and segmentation, and the experimental results on ImageNet and Pascal VOC show that our method remarkably outperforms the state-of-the-art ones.", "title": "Feedback Convolutional Neural Network for Visual Localization and Segmentation", "normalizedTitle": "Feedback Convolutional Neural Network for Visual Localization and Segmentation", "fno": "08370896", "hasPdf": true, "idPrefix": "tp", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Image Representation", "Image Segmentation", "Learning Artificial Intelligence", "Object Detection", "Object Recognition", "Feedback Convolutional Neural Network", "Visual Localization", "Human Visual System", "Computer Vision Algorithms", "Neuron", "Feedback CNN", "Neural Pathway", "Segment Category Specific Objects", "Extensive Visualization Analysis", "Weakly Supervised Object Localization", "Neurons", "Visualization", "Image Segmentation", "Semantics", "Convolutional Neural Networks", "Task Analysis", "Computational Modeling", "Feedback", "Convolutional Neural Networks CN Ns", "Weakly Supervised", "Object Localization", "Object Segmentation" ], "authors": [ { "givenName": "Chunshui", "surname": "Cao", "fullName": "Chunshui Cao", "affiliation": "University of Science and Technology of China, Hefei Shi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yongzhen", "surname": "Huang", "fullName": "Yongzhen Huang", "affiliation": "University of Chinese Academy of Sciences, Huairou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yi", "surname": "Yang", "fullName": "Yi Yang", "affiliation": "Baidu Research, Sunnyvale, CA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Liang", "surname": "Wang", "fullName": "Liang Wang", "affiliation": "University of Chinese Academy of Sciences, Huairou, China", "__typename": "ArticleAuthorType" }, { "givenName": "Zilei", "surname": "Wang", "fullName": "Zilei Wang", "affiliation": "University of Science and Technology of China, Hefei Shi, China", "__typename": "ArticleAuthorType" }, { "givenName": "Tieniu", "surname": "Tan", "fullName": "Tieniu Tan", "affiliation": "University of Chinese Academy of Sciences, Huairou, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "07", "pubDate": "2019-07-01 00:00:00", "pubType": "trans", "pages": "1627-1640", "year": "2019", "issn": "0162-8828", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032f048", "title": "Segmentation-Aware Convolutional Networks Using Local Attention Masks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032f048/12OmNAoUT7J", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/das/2012/4661/0/06195348", "title": "Offline handwritten English character recognition based on convolutional neural network", "doi": null, "abstractUrl": "/proceedings-article/das/2012/06195348/12OmNqH9hmf", "parentPublication": { "id": "proceedings/das/2012/4661/0", "title": "Document Analysis Systems, IAPR International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457h282", "title": "Combining Bottom-Up, Top-Down, and Smoothness Cues for Weakly Supervised Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457h282/12OmNrIae8x", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c956", "title": "Look and Think Twice: Capturing Top-Down Visual Attention with Feedback Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c956/12OmNwswg2b", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391b796", "title": "Constrained Convolutional Neural Networks for Weakly Supervised Segmentation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391b796/12OmNyL0TEK", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i827", "title": "Interpretable Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i827/17D45Wc1IJL", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2021/2420/0/242000a536", "title": "Synchronous Dropout for Convolutional Neural Network", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2021/242000a536/1Eb2u3OH408", "parentPublication": { "id": "proceedings/iiai-aai/2021/2420/0", "title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300d404", "title": "Information Entropy Based Feature Pooling for Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300d404/1hQqvtj3Eha", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150812", "title": "Feedback U-net for Cell Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150812/1lPHbVRO3NS", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/micro/2020/7383/0/738300a229", "title": "Fast-BCNN: Massive Neuron Skipping in Bayesian Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/micro/2020/738300a229/1oFGE53o78A", "parentPublication": { "id": "proceedings/micro/2020/7383/0", "title": "2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08371271", "articleId": "13rRUwghd6h", "__typename": "AdjacentArticleType" }, "next": { "fno": "08374906", "articleId": "13rRUx0xPok", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1aAFQeRBQu4", "name": "ttp201907-08370896s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttp201907-08370896s1.zip", "extension": "zip", "size": "128 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNCaLEju", "title": "Jan.", "year": "2018", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "24", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxAATgA", "doi": "10.1109/TVCG.2017.2744938", "abstract": "Among the many types of deep models, deep generative models (DGMs) provide a solution to the important problem of unsupervised and semi-supervised learning. However, training DGMs requires more skill, experience, and know-how because their training is more complex than other types of deep models such as convolutional neural networks (CNNs). We develop a visual analytics approach for better understanding and diagnosing the training process of a DGM. To help experts understand the overall training process, we first extract a large amount of time series data that represents training dynamics (e.g., activation changes over time). A blue-noise polyline sampling scheme is then introduced to select time series samples, which can both preserve outliers and reduce visual clutter. To further investigate the root cause of a failed training process, we propose a credit assignment algorithm that indicates how other neurons contribute to the output of the neuron causing the training failure. Two case studies are conducted with machine learning experts to demonstrate how our approach helps understand and diagnose the training processes of DGMs. We also show how our approach can be directly used to analyze other types of deep models, such as CNNs.", "abstracts": [ { "abstractType": "Regular", "content": "Among the many types of deep models, deep generative models (DGMs) provide a solution to the important problem of unsupervised and semi-supervised learning. However, training DGMs requires more skill, experience, and know-how because their training is more complex than other types of deep models such as convolutional neural networks (CNNs). We develop a visual analytics approach for better understanding and diagnosing the training process of a DGM. To help experts understand the overall training process, we first extract a large amount of time series data that represents training dynamics (e.g., activation changes over time). A blue-noise polyline sampling scheme is then introduced to select time series samples, which can both preserve outliers and reduce visual clutter. To further investigate the root cause of a failed training process, we propose a credit assignment algorithm that indicates how other neurons contribute to the output of the neuron causing the training failure. Two case studies are conducted with machine learning experts to demonstrate how our approach helps understand and diagnose the training processes of DGMs. We also show how our approach can be directly used to analyze other types of deep models, such as CNNs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Among the many types of deep models, deep generative models (DGMs) provide a solution to the important problem of unsupervised and semi-supervised learning. However, training DGMs requires more skill, experience, and know-how because their training is more complex than other types of deep models such as convolutional neural networks (CNNs). We develop a visual analytics approach for better understanding and diagnosing the training process of a DGM. To help experts understand the overall training process, we first extract a large amount of time series data that represents training dynamics (e.g., activation changes over time). A blue-noise polyline sampling scheme is then introduced to select time series samples, which can both preserve outliers and reduce visual clutter. To further investigate the root cause of a failed training process, we propose a credit assignment algorithm that indicates how other neurons contribute to the output of the neuron causing the training failure. Two case studies are conducted with machine learning experts to demonstrate how our approach helps understand and diagnose the training processes of DGMs. We also show how our approach can be directly used to analyze other types of deep models, such as CNNs.", "title": "Analyzing the Training Processes of Deep Generative Models", "normalizedTitle": "Analyzing the Training Processes of Deep Generative Models", "fno": "08019879", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Training", "Neurons", "Time Series Analysis", "Tools", "Visual Analytics", "Analytical Models", "Deep Learning", "Deep Generative Models", "Blue Noise Sampling", "Credit Assignment" ], "authors": [ { "givenName": "Mengchen", "surname": "Liu", "fullName": "Mengchen Liu", "affiliation": "Tsinghua UniversityNational Engineering Lab for Big Data Software", "__typename": "ArticleAuthorType" }, { "givenName": "Jiaxin", "surname": "Shi", "fullName": "Jiaxin Shi", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" }, { "givenName": "Kelei", "surname": "Cao", "fullName": "Kelei Cao", "affiliation": "Tsinghua UniversityNational Engineering Lab for Big Data Software", "__typename": "ArticleAuthorType" }, { "givenName": "Jun", "surname": "Zhu", "fullName": "Jun Zhu", "affiliation": "Tsinghua University", "__typename": "ArticleAuthorType" }, { "givenName": "Shixia", "surname": "Liu", "fullName": "Shixia Liu", "affiliation": "Tsinghua UniversityNational Engineering Lab for Big Data Software", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2018-01-01 00:00:00", "pubType": "trans", "pages": "77-87", "year": "2018", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ipdpsw/2015/7684/0/7684b172", "title": "Scaling Up the Training of Deep CNNs for Human Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2015/7684b172/12OmNAle6p4", "parentPublication": { "id": "proceedings/ipdpsw/2015/7684/0", "title": "2015 IEEE International Parallel and Distributed Processing Symposium Workshop (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/08099918", "title": "Deep Quantization: Encoding Convolutional Activations with Deep Generative Model", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/08099918/12OmNBC8At2", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019872", "title": "DeepEyes: Progressive Visual Analytics for Designing Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019872/13rRUxlgxTs", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07536654", "title": "Towards Better Analysis of Deep Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2017/01/07536654/13rRUygT7sJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/11/08081757", "title": "Max-Margin Deep Generative Models for (Semi-)Supervised Learning", "doi": null, "abstractUrl": "/journal/tp/2018/11/08081757/143fgZJUyze", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiswc/2018/6780/0/08573476", "title": "Benchmarking and Analyzing Deep Neural Network Training", "doi": null, "abstractUrl": "/proceedings-article/iiswc/2018/08573476/17D45WwsQ7r", "parentPublication": { "id": "proceedings/iiswc/2018/6780/0", "title": "2018 IEEE International Symposium on Workload Characterization (IISWC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802509", "title": "Analyzing the Noise Robustness of Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802509/1cJ6WWAb0wo", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2019/4604/0/460400a658", "title": "Discriminative Regularized Deep Generative Models for Semi-Supervised Learning", "doi": null, "abstractUrl": "/proceedings-article/icdm/2019/460400a658/1h5XHy7IHu0", "parentPublication": { "id": "proceedings/icdm/2019/4604/0", "title": "2019 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09219240", "title": "A Visual Analytics Framework for Explaining and Diagnosing Transfer Learning Processes", "doi": null, "abstractUrl": "/journal/tg/2021/02/09219240/1nMMmribStW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/tase/2020/4086/0/408600a073", "title": "Feature-oriented Design of Visual Analytics System for Interpretable Deep Learning based Intrusion Detection", "doi": null, "abstractUrl": "/proceedings-article/tase/2020/408600a073/1t0HAB8lCE0", "parentPublication": { "id": "proceedings/tase/2020/4086/0", "title": "2020 International Symposium on Theoretical Aspects of Software Engineering (TASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08022969", "articleId": "13rRUygBw7g", "__typename": "AdjacentArticleType" }, "next": { "fno": "08022871", "articleId": "13rRUyogGAh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRL5", "name": "ttg201801-08019879s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201801-08019879s1.zip", "extension": "zip", "size": "21.4 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1qL5hsvvVkc", "title": "Feb.", "year": "2021", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1nTrIuXAPRe", "doi": "10.1109/TVCG.2020.3030461", "abstract": "Convolutional neural networks (CNNs) have demonstrated extraordinarily good performance in many computer vision tasks. The increasing size of CNN models, however, prevents them from being widely deployed to devices with limited computational resources, e.g., mobile/embedded devices. The emerging topic of model pruning strives to address this problem by removing less important neurons and fine-tuning the pruned networks to minimize the accuracy loss. Nevertheless, existing automated pruning solutions often rely on a numerical threshold of the pruning criteria, lacking the flexibility to optimally balance the trade-off between efficiency and accuracy. Moreover, the complicated interplay between the stages of neuron pruning and model fine-tuning makes this process opaque, and therefore becomes difficult to optimize. In this paper, we address these challenges through a visual analytics approach, named CNNPruner. It considers the importance of convolutional filters through both instability and sensitivity, and allows users to interactively create pruning plans according to a desired goal on model size or accuracy. Also, CNNPruner integrates state-of-the-art filter visualization techniques to help users understand the roles that different filters played and refine their pruning plans. Through comprehensive case studies on CNNs with real-world sizes, we validate the effectiveness of CNNPruner.", "abstracts": [ { "abstractType": "Regular", "content": "Convolutional neural networks (CNNs) have demonstrated extraordinarily good performance in many computer vision tasks. The increasing size of CNN models, however, prevents them from being widely deployed to devices with limited computational resources, e.g., mobile/embedded devices. The emerging topic of model pruning strives to address this problem by removing less important neurons and fine-tuning the pruned networks to minimize the accuracy loss. Nevertheless, existing automated pruning solutions often rely on a numerical threshold of the pruning criteria, lacking the flexibility to optimally balance the trade-off between efficiency and accuracy. Moreover, the complicated interplay between the stages of neuron pruning and model fine-tuning makes this process opaque, and therefore becomes difficult to optimize. In this paper, we address these challenges through a visual analytics approach, named CNNPruner. It considers the importance of convolutional filters through both instability and sensitivity, and allows users to interactively create pruning plans according to a desired goal on model size or accuracy. Also, CNNPruner integrates state-of-the-art filter visualization techniques to help users understand the roles that different filters played and refine their pruning plans. Through comprehensive case studies on CNNs with real-world sizes, we validate the effectiveness of CNNPruner.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Convolutional neural networks (CNNs) have demonstrated extraordinarily good performance in many computer vision tasks. The increasing size of CNN models, however, prevents them from being widely deployed to devices with limited computational resources, e.g., mobile/embedded devices. The emerging topic of model pruning strives to address this problem by removing less important neurons and fine-tuning the pruned networks to minimize the accuracy loss. Nevertheless, existing automated pruning solutions often rely on a numerical threshold of the pruning criteria, lacking the flexibility to optimally balance the trade-off between efficiency and accuracy. Moreover, the complicated interplay between the stages of neuron pruning and model fine-tuning makes this process opaque, and therefore becomes difficult to optimize. In this paper, we address these challenges through a visual analytics approach, named CNNPruner. It considers the importance of convolutional filters through both instability and sensitivity, and allows users to interactively create pruning plans according to a desired goal on model size or accuracy. Also, CNNPruner integrates state-of-the-art filter visualization techniques to help users understand the roles that different filters played and refine their pruning plans. Through comprehensive case studies on CNNs with real-world sizes, we validate the effectiveness of CNNPruner.", "title": "<italic>CNN</italic>Pruner: Pruning Convolutional Neural Networks with Visual Analytics", "normalizedTitle": "CNNPruner: Pruning Convolutional Neural Networks with Visual Analytics", "fno": "09222510", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Data Analysis", "Data Visualisation", "Learning Artificial Intelligence", "Mobile Computing", "Neuron Pruning", "Visual Analytics Approach", "Convolutional Filters", "Pruning Plans", "CN Ns", "Convolutional Neural Networks", "Computer Vision Tasks", "CNN Models", "Pruned Networks", "Pruning Criteria", "Filter Visualization Techniques", "CNN Pruner", "Computational Modeling", "Numerical Models", "Analytical Models", "Visual Analytics", "Predictive Models", "Deep Learning", "Visualization", "Model Pruning", "Convolutional Neural Network", "Explainable Artificial Intelligence" ], "authors": [ { "givenName": "Guan", "surname": "Li", "fullName": "Guan Li", "affiliation": "Computer Network Information CenterChinese Academy of Sciences", "__typename": "ArticleAuthorType" }, { "givenName": "Junpeng", "surname": "Wang", "fullName": "Junpeng Wang", "affiliation": "Visa Research", "__typename": "ArticleAuthorType" }, { "givenName": "Han-Wei", "surname": "Shen", "fullName": "Han-Wei Shen", "affiliation": "Ohio State University", "__typename": "ArticleAuthorType" }, { "givenName": "Kaixin", "surname": "Chen", "fullName": "Kaixin Chen", "affiliation": "Computer Network Information CenterChinese Academy of Sciences", "__typename": "ArticleAuthorType" }, { "givenName": "Guihua", "surname": "Shan", "fullName": "Guihua Shan", "affiliation": "Computer Network Information CenterChinese Academy of Sciences", "__typename": "ArticleAuthorType" }, { "givenName": "Zhonghua", "surname": "Lu", "fullName": "Zhonghua Lu", "affiliation": "Computer Network Information CenterChinese Academy of Sciences", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2021-02-01 00:00:00", "pubType": "trans", "pages": "1364-1373", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/ismvl/2018/4464/0/446401a180", "title": "Efficient Hardware Realization of Convolutional Neural Networks Using Intra-Kernel Regular Pruning", "doi": null, "abstractUrl": "/proceedings-article/ismvl/2018/446401a180/12OmNyeWdDq", "parentPublication": { "id": "proceedings/ismvl/2018/4464/0", "title": "2018 IEEE 48th International Symposium on Multiple-Valued Logic (ISMVL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/06/09705076", "title": "<italic>GNNLens</italic>: A Visual Analytics Approach for Prediction Error Diagnosis of Graph Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2023/06/09705076/1AIIbJW1goU", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09751204", "title": "VAC-CNN: A Visual Analytics System for Comparative Studies of Deep Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2022/06/09751204/1CnxO1uqCuQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2022/12/09923417", "title": "A Sparse CNN Accelerator for Eliminating Redundant Computations in Intra- and Inter-Convolutional/Pooling Layers", "doi": null, "abstractUrl": "/journal/si/2022/12/09923417/1HzxdAtmVUI", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-smartcity-dss/2019/2058/0/205800b533", "title": "Structure Characteristic-Aware Pruning Strategy for Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/hpcc-smartcity-dss/2019/205800b533/1dPok4Ssd3i", "parentPublication": { "id": "proceedings/hpcc-smartcity-dss/2019/2058/0", "title": "2019 IEEE 21st International Conference on High Performance Computing and Communications; IEEE 17th International Conference on Smart City; IEEE 5th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300e938", "title": "Centripetal SGD for Pruning Very Deep Convolutional Networks With Complicated Structure", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300e938/1gys3J2OnJe", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09222338", "title": "HyperTendril: Visual Analytics for User-Driven Hyperparameter Optimization of Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2021/02/09222338/1nTrGnbsuYg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/02/09233993", "title": "<italic>VATLD</italic>: A <italic>V</italic>isual <italic>A</italic>nalytics System to Assess, Understand and Improve <italic>T</italic>raffic <italic>L</italic>ight <italic>D</italic>etection", "doi": null, "abstractUrl": "/journal/tg/2021/02/09233993/1o53W7V42CQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412294", "title": "HFP: Hardware-Aware Filter Pruning for Deep Convolutional Neural Networks Acceleration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412294/1tmi8dkUdEY", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candar/2021/4246/0/424600a001", "title": "Accelerate CNN Models via Filter Pruning and Sparse Tensor Core", "doi": null, "abstractUrl": "/proceedings-article/candar/2021/424600a001/1zzqv8rMDeg", "parentPublication": { "id": "proceedings/candar/2021/4246/0", "title": "2021 Ninth International Symposium on Computing and Networking (CANDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09231271", "articleId": "1o3nzyjYTbG", "__typename": "AdjacentArticleType" }, "next": { "fno": "09222060", "articleId": "1nTquHN7hbq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1qLh68y0Oxq", "name": "ttg202102-09222510s1-supp1-3030461.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09222510s1-supp1-3030461.mp4", "extension": "mp4", "size": "94.5 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1qL5hsvvVkc", "title": "Feb.", "year": "2021", "issueNum": "02", "idPrefix": "tg", "pubType": "journal", "volume": "27", "label": "Feb.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1nTrMkbZAQg", "doi": "10.1109/TVCG.2020.3030418", "abstract": "Deep learning's great success motivates many practitioners and students to learn about this exciting technology. However, it is often challenging for beginners to take their first step due to the complexity of understanding and applying deep learning. We present CNN Explainer, an interactive visualization tool designed for non-experts to learn and examine convolutional neural networks (CNNs), a foundational deep learning model architecture. Our tool addresses key challenges that novices face while learning about CNNs, which we identify from interviews with instructors and a survey with past students. CNN Explainer tightly integrates a model overview that summarizes a CNN's structure, and on-demand, dynamic visual explanation views that help users understand the underlying components of CNNs. Through smooth transitions across levels of abstraction, our tool enables users to inspect the interplay between low-level mathematical operations and high-level model structures. A qualitative user study shows that CNN Explainer helps users more easily understand the inner workings of CNNs, and is engaging and enjoyable to use. We also derive design lessons from our study. Developed using modern web technologies, CNN Explainer runs locally in users' web browsers without the need for installation or specialized hardware, broadening the public's education access to modern deep learning techniques.", "abstracts": [ { "abstractType": "Regular", "content": "Deep learning's great success motivates many practitioners and students to learn about this exciting technology. However, it is often challenging for beginners to take their first step due to the complexity of understanding and applying deep learning. We present CNN Explainer, an interactive visualization tool designed for non-experts to learn and examine convolutional neural networks (CNNs), a foundational deep learning model architecture. Our tool addresses key challenges that novices face while learning about CNNs, which we identify from interviews with instructors and a survey with past students. CNN Explainer tightly integrates a model overview that summarizes a CNN's structure, and on-demand, dynamic visual explanation views that help users understand the underlying components of CNNs. Through smooth transitions across levels of abstraction, our tool enables users to inspect the interplay between low-level mathematical operations and high-level model structures. A qualitative user study shows that CNN Explainer helps users more easily understand the inner workings of CNNs, and is engaging and enjoyable to use. We also derive design lessons from our study. Developed using modern web technologies, CNN Explainer runs locally in users' web browsers without the need for installation or specialized hardware, broadening the public's education access to modern deep learning techniques.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Deep learning's great success motivates many practitioners and students to learn about this exciting technology. However, it is often challenging for beginners to take their first step due to the complexity of understanding and applying deep learning. We present CNN Explainer, an interactive visualization tool designed for non-experts to learn and examine convolutional neural networks (CNNs), a foundational deep learning model architecture. Our tool addresses key challenges that novices face while learning about CNNs, which we identify from interviews with instructors and a survey with past students. CNN Explainer tightly integrates a model overview that summarizes a CNN's structure, and on-demand, dynamic visual explanation views that help users understand the underlying components of CNNs. Through smooth transitions across levels of abstraction, our tool enables users to inspect the interplay between low-level mathematical operations and high-level model structures. A qualitative user study shows that CNN Explainer helps users more easily understand the inner workings of CNNs, and is engaging and enjoyable to use. We also derive design lessons from our study. Developed using modern web technologies, CNN Explainer runs locally in users' web browsers without the need for installation or specialized hardware, broadening the public's education access to modern deep learning techniques.", "title": "CNN Explainer: Learning Convolutional Neural Networks with Interactive Visualization", "normalizedTitle": "CNN Explainer: Learning Convolutional Neural Networks with Interactive Visualization", "fno": "09222325", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Computer Aided Instruction", "Convolutional Neural Nets", "Data Visualisation", "Interactive Systems", "Internet", "Online Front Ends", "Convolutional Neural Networks", "Foundational Deep Learning Model Architecture", "CNN Explainer", "Dynamic Visual Explanation Views", "High Level Model Structures", "Interactive Visualization Tool", "Deep Learning", "Tools", "Visualization", "Mathematical Model", "Neurons", "Convolutional Neural Networks", "Deep Learning", "Machine Learning", "Convolutional Neural Networks", "Visual Analytics" ], "authors": [ { "givenName": "Zijie J.", "surname": "Wang", "fullName": "Zijie J. Wang", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Robert", "surname": "Turko", "fullName": "Robert Turko", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Omar", "surname": "Shaikh", "fullName": "Omar Shaikh", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Haekyu", "surname": "Park", "fullName": "Haekyu Park", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Nilaksh", "surname": "Das", "fullName": "Nilaksh Das", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Fred", "surname": "Hohman", "fullName": "Fred Hohman", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Minsuk", "surname": "Kahng", "fullName": "Minsuk Kahng", "affiliation": "Oregon State University", "__typename": "ArticleAuthorType" }, { "givenName": "Duen Horng", "surname": "Polo Chau", "fullName": "Duen Horng Polo Chau", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "02", "pubDate": "2021-02-01 00:00:00", "pubType": "trans", "pages": "1396-1406", "year": "2021", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2015/9711/0/5720a384", "title": "When Face Recognition Meets with Deep Learning: An Evaluation of Convolutional Neural Networks for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a384/12OmNxEBz4G", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/07/08370896", "title": "Feedback Convolutional Neural Network for Visual Localization and Segmentation", "doi": null, "abstractUrl": "/journal/tp/2019/07/08370896/13rRUwdIOTs", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07536654", "title": "Towards Better Analysis of Deep Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2017/01/07536654/13rRUygT7sJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i827", "title": "Interpretable Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i827/17D45Wc1IJL", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/saner/2019/0591/0/08668002", "title": "CNN-FL: An Effective Approach for Localizing Faults using Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/saner/2019/08668002/18uSuIgw0sU", "parentPublication": { "id": "proceedings/saner/2019/0591/0", "title": "2019 IEEE 26th International Conference on Software Analysis, Evolution and Reengineering (SANER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccabs/2017/2594/0/08114310", "title": "Survey on deep convolutional neural networks in mammography", "doi": null, "abstractUrl": "/proceedings-article/iccabs/2017/08114310/1DICe5bFQJ2", "parentPublication": { "id": "proceedings/iccabs/2017/2594/0", "title": "2017 IEEE 7th International Conference on Computational Advances in Bio- and Medical Sciences (ICCABS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09005707", "title": "Hierarchical Transfer Convolutional Neural Networks for Image Classification", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09005707/1hJrXxdgW2I", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300d868", "title": "Visualization of Convolutional Neural Networks for Monocular Depth Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300d868/1hVla27O6WY", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsd/2020/9535/0/09217880", "title": "Evaluating Convolutional Neural Networks Reliability depending on their Data Representation", "doi": null, "abstractUrl": "/proceedings-article/dsd/2020/09217880/1nLbLumUSVW", "parentPublication": { "id": "proceedings/dsd/2020/9535/0", "title": "2020 23rd Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/micro/2020/7383/0/738300a229", "title": "Fast-BCNN: Massive Neuron Skipping in Bayesian Convolutional Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/micro/2020/738300a229/1oFGE53o78A", "parentPublication": { "id": "proceedings/micro/2020/7383/0", "title": "2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09219240", "articleId": "1nMMmribStW", "__typename": "AdjacentArticleType" }, "next": { "fno": "09222338", "articleId": "1nTrGnbsuYg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1qRITbA97aw", "name": "ttg202102-09222325s1-tvcg-3030418-mm.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09222325s1-tvcg-3030418-mm.zip", "extension": "zip", "size": "144 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNCwUmt2", "title": "Oct.-Dec.", "year": "2016", "issueNum": "04", "idPrefix": "mc", "pubType": "journal", "volume": "2", "label": "Oct.-Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwdIOWW", "doi": "10.1109/TMSCS.2016.2555303", "abstract": "Artificial Neural Networks (ANNs) have been widely used to deal with various classification problems for decades. Different algorithms for synthesizing ANNs have been proposed as well. The number of neurons in an ANN usually controls the tradeoff between classification ability and computational efficiency. That is, more neurons tend to yield better results but are less efficient in either the training or recalling phase. Furthermore, if the neurons are implemented by physical devices, the implementation cost can be effectively reduced with fewer number of neurons in an ANN. In this paper, we propose a method to minimize the number of neurons used in an ANN that is built by using Voronoi diagrams without suffering any capability loss. We have conducted experiments on a set of benchmarks. The experimental results show that the resultant ANNs reduce the number of neurons by up to 94 percent.", "abstracts": [ { "abstractType": "Regular", "content": "Artificial Neural Networks (ANNs) have been widely used to deal with various classification problems for decades. Different algorithms for synthesizing ANNs have been proposed as well. The number of neurons in an ANN usually controls the tradeoff between classification ability and computational efficiency. That is, more neurons tend to yield better results but are less efficient in either the training or recalling phase. Furthermore, if the neurons are implemented by physical devices, the implementation cost can be effectively reduced with fewer number of neurons in an ANN. In this paper, we propose a method to minimize the number of neurons used in an ANN that is built by using Voronoi diagrams without suffering any capability loss. We have conducted experiments on a set of benchmarks. The experimental results show that the resultant ANNs reduce the number of neurons by up to 94 percent.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Artificial Neural Networks (ANNs) have been widely used to deal with various classification problems for decades. Different algorithms for synthesizing ANNs have been proposed as well. The number of neurons in an ANN usually controls the tradeoff between classification ability and computational efficiency. That is, more neurons tend to yield better results but are less efficient in either the training or recalling phase. Furthermore, if the neurons are implemented by physical devices, the implementation cost can be effectively reduced with fewer number of neurons in an ANN. In this paper, we propose a method to minimize the number of neurons used in an ANN that is built by using Voronoi diagrams without suffering any capability loss. We have conducted experiments on a set of benchmarks. The experimental results show that the resultant ANNs reduce the number of neurons by up to 94 percent.", "title": "Minimization of Number of Neurons in Voronoi Diagram-Based Artificial Neural Networks", "normalizedTitle": "Minimization of Number of Neurons in Voronoi Diagram-Based Artificial Neural Networks", "fno": "07454719", "hasPdf": true, "idPrefix": "mc", "keywords": [ "Neurons", "Artificial Neural Networks", "Training", "Training Data", "Biological Neural Networks", "Topology", "Network Topology", "Voronoi Diagram", "Minimization", "Artificial Neural Networks" ], "authors": [ { "givenName": "Chen-Yu", "surname": "Lin", "fullName": "Chen-Yu Lin", "affiliation": "Department of Computer Science, National Tsing Hua University, Hsinchu, R.O.C, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Yung-Chih", "surname": "Chen", "fullName": "Yung-Chih Chen", "affiliation": "Department of Computer Science and Engineering, Yuan Ze University, Taoyuan, R.O.C, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Chun-Yao", "surname": "Wang", "fullName": "Chun-Yao Wang", "affiliation": "Department of Computer Science, National Tsing Hua University, Hsinchu, R.O.C, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Ching-Yi", "surname": "Huang", "fullName": "Ching-Yi Huang", "affiliation": "Department of Computer Science, National Tsing Hua University, Hsinchu, R.O.C, Taiwan", "__typename": "ArticleAuthorType" }, { "givenName": "Chiou-Ting", "surname": "Hsu", "fullName": "Chiou-Ting Hsu", "affiliation": "Department of Computer Science, National Tsing Hua University, Hsinchu, R.O.C, Taiwan", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2016-10-01 00:00:00", "pubType": "trans", "pages": "225-233", "year": "2016", "issn": "2332-7766", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icat/2017/3337/0/08171610", "title": "Distinguishing physical actions using an artificial neural network", "doi": null, "abstractUrl": "/proceedings-article/icat/2017/08171610/12OmNAWH9z5", "parentPublication": { "id": "proceedings/icat/2017/3337/0", "title": "2017 XXVI International Conference on Information, Communication and Automation Technologies (ICAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/1992/2742/0/00245037", "title": "Application of artificial neural networks (ANNs) to complex oscillations in the human pupil light reflex", "doi": null, "abstractUrl": "/proceedings-article/cbms/1992/00245037/12OmNB836Nl", "parentPublication": { "id": "proceedings/cbms/1992/2742/0", "title": "Proceedings Fifth Annual IEEE Symposium on Computer-Based Medical Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dmia/2015/8111/0/8111a044", "title": "Investigating the Role of Individual Neurons as Outlier Detectors", "doi": null, "abstractUrl": "/proceedings-article/dmia/2015/8111a044/12OmNrJAdTC", "parentPublication": { "id": "proceedings/dmia/2015/8111/0", "title": "2015 International Workshop on Data Mining with Industrial Applications (DMIA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csci/2016/5510/0/07881508", "title": "Artificial Intelligent Models for New Product Design: An Application Study", "doi": null, "abstractUrl": "/proceedings-article/csci/2016/07881508/12OmNvAiSiU", "parentPublication": { "id": "proceedings/csci/2016/5510/0", "title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ci/2013/3194/0/06855890", "title": "A Comparative Analysis of Methodologies for Automatic Design of Artificial Neural Networks from the Beginnings until Today", "doi": null, "abstractUrl": "/proceedings-article/ci/2013/06855890/12OmNxZTtIA", "parentPublication": { "id": "proceedings/ci/2013/3194/0", "title": "2013 BRICS Congress on Computational Intelligence & 11th Brazilian Congress on Computational Intelligence (BRICS-CCI & CBIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2008/3497/1/04739544", "title": "The Lower Bound on the Number of Hidden Neurons in Multi-Valued Multi-Threshold Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/iita/2008/04739544/12OmNyqRndD", "parentPublication": { "id": "proceedings/iita/2008/3497/3", "title": "2008 Second International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdps/2017/3914/0/07967193", "title": "When Neurons Fail", "doi": null, "abstractUrl": "/proceedings-article/ipdps/2017/07967193/12OmNzBOict", "parentPublication": { "id": "proceedings/ipdps/2017/3914/0", "title": "2017 IEEE International Parallel and Distributed Processing Symposium (IPDPS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ises/2021/8753/0/875300a153", "title": "Efficient Design of Artificial Neural Networks using Approximate Compressors and Multipliers", "doi": null, "abstractUrl": "/proceedings-article/ises/2021/875300a153/1APpQzhMOas", "parentPublication": { "id": "proceedings/ises/2021/8753/0", "title": "2021 IEEE International Symposium on Smart Electronic Systems (iSES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995647", "title": "Determination of Neuron Activation States Facilitated by Artificial Intelligence", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995647/1JC2pJdL93G", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci*cc/2022/9084/0/10101500", "title": "Adaptive Chaotic Injection to Reduce Overfitting in Artificial Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icci*cc/2022/10101500/1MwEzy9rZsc", "parentPublication": { "id": "proceedings/icci*cc/2022/9084/0", "title": "2022 IEEE 21st International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "07792817", "articleId": "13rRUynZ5sL", "__typename": "AdjacentArticleType" }, "next": { "fno": "07604135", "articleId": "13rRUwh80DC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvsDHDY", "title": "Jan.", "year": "2020", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "26", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1cG6pGxvLOg", "doi": "10.1109/TVCG.2019.2934659", "abstract": "Deep learning is increasingly used in decision-making tasks. However, understanding how neural networks produce final predictions remains a fundamental challenge. Existing work on interpreting neural network predictions for images often focuses on explaining predictions for single images or neurons. As predictions are often computed from millions of weights that are optimized over millions of images, such explanations can easily miss a bigger picture. We present Summit, an interactive system that scalably and systematically summarizes and visualizes what features a deep learning model has learned and how those features interact to make predictions. Summit introduces two new scalable summarization techniques: (1) activation aggregation discovers important neurons, and (2) neuron-influence aggregation identifies relationships among such neurons. Summit combines these techniques to create the novel attribution graph that reveals and summarizes crucial neuron associations and substructures that contribute to a model's outcomes. Summit scales to large data, such as the ImageNet dataset with 1.2M images, and leverages neural network feature visualization and dataset examples to help users distill large, complex neural network models into compact, interactive visualizations. We present neural network exploration scenarios where Summit helps us discover multiple surprising insights into a prevalent, large-scale image classifier's learned representations and informs future neural network architecture design. The Summit visualization runs in modern web browsers and is open-sourced.", "abstracts": [ { "abstractType": "Regular", "content": "Deep learning is increasingly used in decision-making tasks. However, understanding how neural networks produce final predictions remains a fundamental challenge. Existing work on interpreting neural network predictions for images often focuses on explaining predictions for single images or neurons. As predictions are often computed from millions of weights that are optimized over millions of images, such explanations can easily miss a bigger picture. We present Summit, an interactive system that scalably and systematically summarizes and visualizes what features a deep learning model has learned and how those features interact to make predictions. Summit introduces two new scalable summarization techniques: (1) activation aggregation discovers important neurons, and (2) neuron-influence aggregation identifies relationships among such neurons. Summit combines these techniques to create the novel attribution graph that reveals and summarizes crucial neuron associations and substructures that contribute to a model's outcomes. Summit scales to large data, such as the ImageNet dataset with 1.2M images, and leverages neural network feature visualization and dataset examples to help users distill large, complex neural network models into compact, interactive visualizations. We present neural network exploration scenarios where Summit helps us discover multiple surprising insights into a prevalent, large-scale image classifier's learned representations and informs future neural network architecture design. The Summit visualization runs in modern web browsers and is open-sourced.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Deep learning is increasingly used in decision-making tasks. However, understanding how neural networks produce final predictions remains a fundamental challenge. Existing work on interpreting neural network predictions for images often focuses on explaining predictions for single images or neurons. As predictions are often computed from millions of weights that are optimized over millions of images, such explanations can easily miss a bigger picture. We present Summit, an interactive system that scalably and systematically summarizes and visualizes what features a deep learning model has learned and how those features interact to make predictions. Summit introduces two new scalable summarization techniques: (1) activation aggregation discovers important neurons, and (2) neuron-influence aggregation identifies relationships among such neurons. Summit combines these techniques to create the novel attribution graph that reveals and summarizes crucial neuron associations and substructures that contribute to a model's outcomes. Summit scales to large data, such as the ImageNet dataset with 1.2M images, and leverages neural network feature visualization and dataset examples to help users distill large, complex neural network models into compact, interactive visualizations. We present neural network exploration scenarios where Summit helps us discover multiple surprising insights into a prevalent, large-scale image classifier's learned representations and informs future neural network architecture design. The Summit visualization runs in modern web browsers and is open-sourced.", "title": "S<sc>ummit</sc>: Scaling Deep Learning Interpretability by Visualizing Activation and Attribution Summarizations", "normalizedTitle": "Summit: Scaling Deep Learning Interpretability by Visualizing Activation and Attribution Summarizations", "fno": "08807294", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Visualisation", "Feature Extraction", "Graph Theory", "Image Classification", "Image Representation", "Interactive Systems", "Internet", "Learning Artificial Intelligence", "Neural Net Architecture", "Public Domain Software", "Web Browsers", "Computer Vision Model", "Representation Learning", "Summit Visualization", "Image Classifier", "Interactive Visualizations", "Neural Network Feature Visualization", "Deep Learning Model", "Interactive System", "Neural Network Predictions", "Decision Making Tasks", "Attribution Summarizations", "Deep Learning Interpretability", "Neural Network Architecture", "Neurons", "Biological Neural Networks", "Feature Extraction", "Data Visualization", "Computational Modeling", "Predictive Models", "Visualization", "Deep Learning Interpretability", "Visual Analytics", "Scalable Summarization", "Attribution Graph" ], "authors": [ { "givenName": "Fred", "surname": "Hohman", "fullName": "Fred Hohman", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Haekyu", "surname": "Park", "fullName": "Haekyu Park", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Caleb", "surname": "Robinson", "fullName": "Caleb Robinson", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" }, { "givenName": "Duen Horng", "surname": "Polo Chau", "fullName": "Duen Horng Polo Chau", "affiliation": "Georgia Tech.", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2020-01-01 00:00:00", "pubType": "trans", "pages": "1096-1106", "year": "2020", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/8851a270", "title": "InterActive: Inter-Layer Activeness Propagation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851a270/12OmNAZx8SG", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742370", "title": "The Neuron Navigator: Exploring the information pathway through the neural maze", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742370/12OmNzxgHEL", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000j194", "title": "NISP: Pruning Networks Using Neuron Importance Score Propagation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000j194/17D45WwsQ4y", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600o4973", "title": "Improving Adversarial Transferability via Neuron Attribution-based Attacks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600o4973/1H1hUnBlWbC", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/td/2023/02/09983514", "title": "Spiking Neural P Systems With Communication on Request and Mute Rules", "doi": null, "abstractUrl": "/journal/td/2023/02/09983514/1J4y4ueBeQU", "parentPublication": { "id": "trans/td", "title": "IEEE Transactions on Parallel & Distributed Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995647", "title": "Determination of Neuron Activation States Facilitated by Artificial Intelligence", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995647/1JC2pJdL93G", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icstw/2019/0888/0/088800a281", "title": "Variable Strength Combinatorial Testing for Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icstw/2019/088800a281/1aDTat6htKM", "parentPublication": { "id": "proceedings/icstw/2019/0888/0", "title": "2019 IEEE International Conference on Software Testing, Verification and Validation Workshops (ICSTW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cse-euc/2019/1664/0/166400a016", "title": "Test4Deep: an Effective White-Box Testing for Deep Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cse-euc/2019/166400a016/1fHkwAfOQKI", "parentPublication": { "id": "proceedings/cse-euc/2019/1664/0", "title": "2019 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/07/08967166", "title": "Analyzing the Noise Robustness of Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2021/07/08967166/1gPjyNWFSgg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/01/09552879", "title": "NeuroCartography: Scalable Automatic Visual Summarization of Concepts in Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tg/2022/01/09552879/1xibY2EaE80", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08805420", "articleId": "1cG4psmkNQA", "__typename": "AdjacentArticleType" }, "next": { "fno": "08807232", "articleId": "1cG6oZu9YBy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNBfIhaQ", "title": "July-Aug.", "year": "2012", "issueNum": "04", "idPrefix": "cg", "pubType": "magazine", "volume": "32", "label": "July-Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxjQyxF", "doi": "10.1109/MCG.2012.73", "abstract": "Extreme-scale visual analytics (VA) is about applying VA to extreme-scale data. The articles in this special issue examine advances related to extreme-scale VA problems, their analytical and computational challenges, and their real-world applications.", "abstracts": [ { "abstractType": "Regular", "content": "Extreme-scale visual analytics (VA) is about applying VA to extreme-scale data. The articles in this special issue examine advances related to extreme-scale VA problems, their analytical and computational challenges, and their real-world applications.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Extreme-scale visual analytics (VA) is about applying VA to extreme-scale data. The articles in this special issue examine advances related to extreme-scale VA problems, their analytical and computational challenges, and their real-world applications.", "title": "Extreme-Scale Visual Analytics", "normalizedTitle": "Extreme-Scale Visual Analytics", "fno": "mcg2012040023", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Special Issues And Sections", "Visual Analytics", "Large Scale Systems", "Turbulent Flow", "Special Issues And Sections", "Visual Analytics", "Large Scale Systems", "Flow Fields", "Extreme Scale Visual Analytics", "Visual Analytics", "Computer Graphics", "Scientific Discovery Through Advanced Computing", "Sci DAC", "Graph Algebra" ], "authors": [ { "givenName": "Pak Chung", "surname": "Wong", "fullName": "Pak Chung Wong", "affiliation": "Pacific Northwest National Laboratory", "__typename": "ArticleAuthorType" }, { "givenName": "Han-Wei", "surname": "Shen", "fullName": "Han-Wei Shen", "affiliation": "Ohio State University", "__typename": "ArticleAuthorType" }, { "givenName": "Valerio", "surname": "Pascucci", "fullName": "Valerio Pascucci", "affiliation": "University of Utah", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "04", "pubDate": "2012-07-01 00:00:00", "pubType": "mags", "pages": "23-25", "year": "2012", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2006/0591/0/04035767", "title": "Visual Analytics Education", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035767/12OmNA14Aii", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/re/2013/5765/0/06636762", "title": "Visual analytics for software requirements engineering", "doi": null, "abstractUrl": "/proceedings-article/re/2013/06636762/12OmNrJ11yp", "parentPublication": { "id": "proceedings/re/2013/5765/0", "title": "2013 IEEE 21st International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892c416", "title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2014/5666/0/07004255", "title": "Web-based visual analytics for extreme scale climate science", "doi": null, "abstractUrl": "/proceedings-article/big-data/2014/07004255/12OmNyFU7bR", "parentPublication": { "id": "proceedings/big-data/2014/5666/0", "title": "2014 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2013/05/mso2013050028", "title": "The Many Faces of Software Analytics", "doi": null, "abstractUrl": "/magazine/so/2013/05/mso2013050028/13rRUwfZC4h", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/05/ttg2012050660", "title": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)", "doi": null, "abstractUrl": "/journal/tg/2012/05/ttg2012050660/13rRUxBa5bV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040063", "title": "The Top 10 Challenges in Extreme-Scale Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040063/13rRUxC0SGA", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/07/ttg2013071076", "title": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)", "doi": null, "abstractUrl": "/journal/tg/2013/07/ttg2013071076/13rRUxOdD2D", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2015/02/mcg2015020016", "title": "Preparing Undergraduates for Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2015/02/mcg2015020016/13rRUxjQyjN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2014/02/mcg2014020024", "title": "Visual Analytics for Biological Data", "doi": null, "abstractUrl": "/magazine/cg/2014/02/mcg2014020024/13rRUygT7hB", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2012040020", "articleId": "13rRUwwslv0", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2012040026", "articleId": "13rRUILLkpN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "1J9y2mtpt3a", "title": "Jan.", "year": "2023", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "29", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1HmgceWU2bK", "doi": "10.1109/TVCG.2022.3209393", "abstract": "Co-adaptive guidance aims to enable efficient human-machine collaboration in visual analytics, as proposed by multiple theoretical frameworks. This paper bridges the gap between such conceptual frameworks and practical implementation by introducing an accessible model of guidance and an accompanying guidance library, mapping theory into practice. We contribute a model of system-provided guidance based on design templates and derived strategies. We instantiate the model in a library called Lotse that allows specifying guidance strategies in definition files and generates running code from them. Lotse is the first guidance library using such an approach. It supports the creation of reusable guidance strategies to retrofit existing applications with guidance and fosters the creation of general guidance strategy patterns. We demonstrate its effectiveness through first-use case studies with VA researchers of varying guidance design expertise and find that they are able to effectively and quickly implement guidance with Lotse. Further, we analyze our framework&#x0027;s cognitive dimensions to evaluate its expressiveness and outline a summary of open research questions for aligning guidance practice with its intricate theory.", "abstracts": [ { "abstractType": "Regular", "content": "Co-adaptive guidance aims to enable efficient human-machine collaboration in visual analytics, as proposed by multiple theoretical frameworks. This paper bridges the gap between such conceptual frameworks and practical implementation by introducing an accessible model of guidance and an accompanying guidance library, mapping theory into practice. We contribute a model of system-provided guidance based on design templates and derived strategies. We instantiate the model in a library called Lotse that allows specifying guidance strategies in definition files and generates running code from them. Lotse is the first guidance library using such an approach. It supports the creation of reusable guidance strategies to retrofit existing applications with guidance and fosters the creation of general guidance strategy patterns. We demonstrate its effectiveness through first-use case studies with VA researchers of varying guidance design expertise and find that they are able to effectively and quickly implement guidance with Lotse. Further, we analyze our framework&#x0027;s cognitive dimensions to evaluate its expressiveness and outline a summary of open research questions for aligning guidance practice with its intricate theory.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Co-adaptive guidance aims to enable efficient human-machine collaboration in visual analytics, as proposed by multiple theoretical frameworks. This paper bridges the gap between such conceptual frameworks and practical implementation by introducing an accessible model of guidance and an accompanying guidance library, mapping theory into practice. We contribute a model of system-provided guidance based on design templates and derived strategies. We instantiate the model in a library called Lotse that allows specifying guidance strategies in definition files and generates running code from them. Lotse is the first guidance library using such an approach. It supports the creation of reusable guidance strategies to retrofit existing applications with guidance and fosters the creation of general guidance strategy patterns. We demonstrate its effectiveness through first-use case studies with VA researchers of varying guidance design expertise and find that they are able to effectively and quickly implement guidance with Lotse. Further, we analyze our framework's cognitive dimensions to evaluate its expressiveness and outline a summary of open research questions for aligning guidance practice with its intricate theory.", "title": "Lotse: A Practical Framework for Guidance in Visual Analytics", "normalizedTitle": "Lotse: A Practical Framework for Guidance in Visual Analytics", "fno": "09915529", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Cognition", "Data Analysis", "Data Visualisation", "Human Computer Interaction", "Interactive Systems", "Man Machine Systems", "Program Testing", "Accessible Model", "Accompanying Guidance Library", "Aligning Guidance Practice", "Co Adaptive Guidance", "Conceptual Frameworks", "Efficient Human Machine Collaboration", "General Guidance Strategy Patterns", "Guidance Design Expertise", "Lotse", "Multiple Theoretical Frameworks", "Practical Framework", "Reusable Guidance Strategies", "System Provided Guidance", "Visual Analytics", "Analytical Models", "Codes", "Visual Analytics", "Collaboration", "Libraries", "Man Machine Systems", "Guidance Theory", "Guidance Implementation" ], "authors": [ { "givenName": "Fabian", "surname": "Sperrle", "fullName": "Fabian Sperrle", "affiliation": "University of Konstanz, Germany", "__typename": "ArticleAuthorType" }, { "givenName": "Davide", "surname": "Ceneda", "fullName": "Davide Ceneda", "affiliation": "TU Wien, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Mennatallah", "surname": "El-Assady", "fullName": "Mennatallah El-Assady", "affiliation": "ETH AI Center, Switzerland", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2023-01-01 00:00:00", "pubType": "trans", "pages": "1124-1134", "year": "2023", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/vast/2006/0591/0/04035767", "title": "Visual Analytics Education", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035767/12OmNA14Aii", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2006/0591/0/04035757", "title": "Toward a Multi-Analyst, Collaborative Framework for Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/vast/2006/04035757/12OmNqAU6pq", "parentPublication": { "id": "proceedings/vast/2006/0591/0", "title": "2006 IEEE Symposium On Visual Analytics Science And Technology", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2016/8942/0/8942a261", "title": "A Need for Exploratory Visual Analytics in Big Data Research and for Open Science", "doi": null, "abstractUrl": "/proceedings-article/iv/2016/8942a261/12OmNz4SOrY", "parentPublication": { "id": "proceedings/iv/2016/8942/0", "title": "2016 20th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2017/0831/0/0831a281", "title": "Visual Analytics Solutions as ‘off-the-Shelf’ Libraries", "doi": null, "abstractUrl": "/proceedings-article/iv/2017/0831a281/12OmNzmLxRg", "parentPublication": { "id": "proceedings/iv/2017/0831/0", "title": "2017 21st International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040026", "title": "A Graph Algebra for Scalable Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040026/13rRUILLkpN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875967", "title": "Knowledge Generation Model for Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875967/13rRUILLkvt", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2013/08/mco2013080090", "title": "Bixplorer: Visual Analytics with Biclusters", "doi": null, "abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534883", "title": "Characterizing Guidance in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534883/13rRUxBa568", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a368", "title": "A Characterization of Data Exchange between Visual Analytics Tools", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a368/1rSRaA2LJBK", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09477191", "title": "Show Me Your Face: Towards an Automated Method to Provide Timely Guidance in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2022/12/09477191/1v2MaNxe9k4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09903572", "articleId": "1GZonS2SkKs", "__typename": "AdjacentArticleType" }, "next": { "fno": "09911200", "articleId": "1Hcjm0PMkgw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "1J9yVPqhSKc", "name": "ttg202301-09915529s1-supp1-3209393.pdf", "location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09915529s1-supp1-3209393.pdf", "extension": "pdf", "size": "146 kB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1HMOit1lSk8", "title": "Dec.", "year": "2022", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "28", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1v2MaNxe9k4", "doi": "10.1109/TVCG.2021.3094870", "abstract": "Providing guidance during a Visual Analytics session can support analysts in pursuing their goals more efficiently. However, the effectiveness of guidance depends on many factors: Determining the right timing to provide it is one of them. Although in complex analysis scenarios choosing the right timing could make the difference between a dependable and a superfluous guidance, an analysis of the literature suggests that this problem did not receive enough attention. In this paper, we describe a methodology to determine moments in which guidance is needed. Our assumption is that the need of guidance would influence the user state-of-mind, as in distress situations during the analytical process, and we hypothesize that such moments could be identified by analyzing the user&#x0027;s facial expressions. We propose a framework composed by a facial recognition software and a machine learning model trained to detect when to provide guidance according to changes of the user facial expressions. We trained the model by interviewing eight analysts during their work and ranked multiple facial features based on their relative importance in determining the need of guidance. Finally, we show that by applying only minor modifications to its architecture, our prototype was able to detect a need of guidance on the fly and made our methodology well suited also for real-time analysis sessions. The results of our evaluations show that our methodology is indeed effective in determining when a need of guidance is present, which constitutes a prerequisite to providing timely and effective guidance in VA.", "abstracts": [ { "abstractType": "Regular", "content": "Providing guidance during a Visual Analytics session can support analysts in pursuing their goals more efficiently. However, the effectiveness of guidance depends on many factors: Determining the right timing to provide it is one of them. Although in complex analysis scenarios choosing the right timing could make the difference between a dependable and a superfluous guidance, an analysis of the literature suggests that this problem did not receive enough attention. In this paper, we describe a methodology to determine moments in which guidance is needed. Our assumption is that the need of guidance would influence the user state-of-mind, as in distress situations during the analytical process, and we hypothesize that such moments could be identified by analyzing the user&#x0027;s facial expressions. We propose a framework composed by a facial recognition software and a machine learning model trained to detect when to provide guidance according to changes of the user facial expressions. We trained the model by interviewing eight analysts during their work and ranked multiple facial features based on their relative importance in determining the need of guidance. Finally, we show that by applying only minor modifications to its architecture, our prototype was able to detect a need of guidance on the fly and made our methodology well suited also for real-time analysis sessions. The results of our evaluations show that our methodology is indeed effective in determining when a need of guidance is present, which constitutes a prerequisite to providing timely and effective guidance in VA.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Providing guidance during a Visual Analytics session can support analysts in pursuing their goals more efficiently. However, the effectiveness of guidance depends on many factors: Determining the right timing to provide it is one of them. Although in complex analysis scenarios choosing the right timing could make the difference between a dependable and a superfluous guidance, an analysis of the literature suggests that this problem did not receive enough attention. In this paper, we describe a methodology to determine moments in which guidance is needed. Our assumption is that the need of guidance would influence the user state-of-mind, as in distress situations during the analytical process, and we hypothesize that such moments could be identified by analyzing the user's facial expressions. We propose a framework composed by a facial recognition software and a machine learning model trained to detect when to provide guidance according to changes of the user facial expressions. We trained the model by interviewing eight analysts during their work and ranked multiple facial features based on their relative importance in determining the need of guidance. Finally, we show that by applying only minor modifications to its architecture, our prototype was able to detect a need of guidance on the fly and made our methodology well suited also for real-time analysis sessions. The results of our evaluations show that our methodology is indeed effective in determining when a need of guidance is present, which constitutes a prerequisite to providing timely and effective guidance in VA.", "title": "Show Me Your Face: Towards an Automated Method to Provide Timely Guidance in Visual Analytics", "normalizedTitle": "Show Me Your Face: Towards an Automated Method to Provide Timely Guidance in Visual Analytics", "fno": "09477191", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Visualisation", "Emotion Recognition", "Face Recognition", "Feature Extraction", "Learning Artificial Intelligence", "Effective Guidance", "Provide Timely Guidance", "Providing Guidance", "Real Time Analysis Sessions", "Superfluous Guidance", "Visual Analytics Session", "Timing", "Machine Learning", "Visual Analytics", "Emotion Recognition", "Data Visualization", "Real Time Systems", "Guidance", "Visual Analytics", "Emotions", "Facial Analysis", "Machine Learning" ], "authors": [ { "givenName": "Davide", "surname": "Ceneda", "fullName": "Davide Ceneda", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Alessio", "surname": "Arleo", "fullName": "Alessio Arleo", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Theresia", "surname": "Gschwandtner", "fullName": "Theresia Gschwandtner", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" }, { "givenName": "Silvia", "surname": "Miksch", "fullName": "Silvia Miksch", "affiliation": "TU Wien, Vienna, Austria", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": false, "showRecommendedArticles": true, "isOpenAccess": true, "issueNum": "12", "pubDate": "2022-12-01 00:00:00", "pubType": "trans", "pages": "4570-4581", "year": "2022", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/hicss/2014/2504/0/2504b364", "title": "Studying Animation for Real-Time Visual Analytics: A Design Study of Social Media Analytics in Emergency Management", "doi": null, "abstractUrl": "/proceedings-article/hicss/2014/2504b364/12OmNBrlPzK", "parentPublication": { "id": "proceedings/hicss/2014/2504/0", "title": "2014 47th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534883", "title": "Characterizing Guidance in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534883/13rRUxBa568", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09915529", "title": "Lotse: A Practical Framework for Guidance in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2023/01/09915529/1HmgceWU2bK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a070", "title": "Guided Data Discovery in Interactive Visualizations via Active Search", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a070/1J6h5R5WY1O", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis/2022/8812/0/881200a105", "title": "Toward Systematic Design Considerations of Organizing Multiple Views", "doi": null, "abstractUrl": "/proceedings-article/vis/2022/881200a105/1J6hb8c5Zde", "parentPublication": { "id": "proceedings/vis/2022/8812/0", "title": "2022 IEEE Visualization and Visual Analytics (VIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlui/2018/4063/0/10075561", "title": "Providing Contextual Assistance in Response to Frustration in Visual Analytics Tasks", "doi": null, "abstractUrl": "/proceedings-article/mlui/2018/10075561/1LIRyHjgHJu", "parentPublication": { "id": "proceedings/mlui/2018/4063/0", "title": "2018 IEEE Workshop on Machine Learning from User Interaction for Visualization and Analytics (MLUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807235", "title": "<italic>EmoCo</italic>: Visual Analysis of Emotion Coherence in Presentation Videos", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807235/1cG6m1AVG6c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807283", "title": "Interactive Learning for Identifying Relevant Tweets to Support Real-time Situational Awareness", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807283/1cG6meGsQYE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2020/6768/0/676800b287", "title": "Edge4Real: A Cost-Effective Edge Computing based Human Behaviour Recognition System for Human-Centric Software Engineering", "doi": null, "abstractUrl": "/proceedings-article/ase/2020/676800b287/1pP3KqLx4dy", "parentPublication": { "id": "proceedings/ase/2020/6768/0", "title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/candarw/2020/9919/0/991900a008", "title": "Evacuation Route Guidance Scheme for Building Evacuation Using Wireless Mesh Network Systems", "doi": null, "abstractUrl": "/proceedings-article/candarw/2020/991900a008/1rqEEEuIXPW", "parentPublication": { "id": "proceedings/candarw/2020/9919/0", "title": "2020 Eighth International Symposium on Computing and Networking Workshops (CANDARW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "09468903", "articleId": "1uR9KNPeety", "__typename": "AdjacentArticleType" }, "next": { "fno": "09477202", "articleId": "1v2MaDZG6fC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNvsDHDL", "title": "January/February", "year": "2012", "issueNum": "01", "idPrefix": "tb", "pubType": "journal", "volume": "9", "label": "January/February", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUwIF6cw", "doi": "10.1109/TCBB.2011.44", "abstract": "Genome-wide association studies (GWA) try to identify the genetic polymorphisms associated with variation in phenotypes. However, the most significant genetic variants may have a small predictive power to forecast the future development of common diseases. We study the prediction of the risk of developing a disease given genome-wide genotypic data using classifiers with a reject option, which only make a prediction when they are sufficiently certain, but in doubtful situations may reject making a classification. To test the reliability of our proposal, we used the Wellcome Trust Case Control Consortium (WTCCC) data set, comprising 14,000 cases of seven common human diseases and 3,000 shared controls.", "abstracts": [ { "abstractType": "Regular", "content": "Genome-wide association studies (GWA) try to identify the genetic polymorphisms associated with variation in phenotypes. However, the most significant genetic variants may have a small predictive power to forecast the future development of common diseases. We study the prediction of the risk of developing a disease given genome-wide genotypic data using classifiers with a reject option, which only make a prediction when they are sufficiently certain, but in doubtful situations may reject making a classification. To test the reliability of our proposal, we used the Wellcome Trust Case Control Consortium (WTCCC) data set, comprising 14,000 cases of seven common human diseases and 3,000 shared controls.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Genome-wide association studies (GWA) try to identify the genetic polymorphisms associated with variation in phenotypes. However, the most significant genetic variants may have a small predictive power to forecast the future development of common diseases. We study the prediction of the risk of developing a disease given genome-wide genotypic data using classifiers with a reject option, which only make a prediction when they are sufficiently certain, but in doubtful situations may reject making a classification. To test the reliability of our proposal, we used the Wellcome Trust Case Control Consortium (WTCCC) data set, comprising 14,000 cases of seven common human diseases and 3,000 shared controls.", "title": "Disease Liability Prediction from Large Scale Genotyping Data Using Classifiers with a Reject Option", "normalizedTitle": "Disease Liability Prediction from Large Scale Genotyping Data Using Classifiers with a Reject Option", "fno": "ttb2012010088", "hasPdf": true, "idPrefix": "tb", "keywords": [ "Diseases", "Bioinformatics", "Diabetes", "Biological Cells", "Input Variables", "Genomics", "Risk Of Common Human Diseases", "Genome Wide Analysis", "Classification With A Reject Option" ], "authors": [ { "givenName": "J. R.", "surname": "Quevedo", "fullName": "J. R. Quevedo", "affiliation": "Dept. de Inf., Univ. de Oviedo en Gijon, Gijon, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "A.", "surname": "Bahamonde", "fullName": "A. Bahamonde", "affiliation": "Centro de Intel. Artificial, Univ. de Oviedo en Gijon, Gijon, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "M.", "surname": "Perez-Enciso", "fullName": "M. Perez-Enciso", "affiliation": "Dept. de Cienc. Animal i dels Aliments, Univ. Autonoma de Barcelona, Bellaterra, Spain", "__typename": "ArticleAuthorType" }, { "givenName": "O.", "surname": "Luaces", "fullName": "O. Luaces", "affiliation": "Centro de Intel. Artificial, Univ. de Oviedo en Gijon, Gijon, Spain", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2012-01-01 00:00:00", "pubType": "trans", "pages": "88-97", "year": "2012", "issn": "1545-5963", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/iacc/2017/1560/0/07976867", "title": "DNA Based Disease Prediction Using Pathway Analysis", "doi": null, "abstractUrl": "/proceedings-article/iacc/2017/07976867/12OmNBqv2mF", "parentPublication": { "id": "proceedings/iacc/2017/1560/0", "title": "2017 IEEE 7th International Advance Computing Conference (IACC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2014/5669/0/06999379", "title": "Genome-wide association analysis with matched samples discloses additional novel risk loci", "doi": null, "abstractUrl": "/proceedings-article/bibm/2014/06999379/12OmNqGA5bj", "parentPublication": { "id": "proceedings/bibm/2014/5669/0", "title": "2014 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2014/4274/0/4274a127", "title": "Text Mining for Hypotheses and Results in Translational Medicine Studies", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2014/4274a127/12OmNwNwzCo", "parentPublication": { "id": "proceedings/icdmw/2014/4274/0", "title": "2014 IEEE International Conference on Data Mining Workshop (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1992/2915/0/00201729", "title": "Multistage pattern recognition with reject option", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00201729/12OmNwdL7kG", "parentPublication": { "id": "proceedings/icpr/1992/2915/0", "title": "11th IAPR International Conference on Pattern Recognition. Vol.II. Conference B: Pattern Recognition Methodology and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2017/3050/0/08217674", "title": "Down syndrome prediction/screening model based on deep learning and illumina genotyping array", "doi": null, "abstractUrl": "/proceedings-article/bibm/2017/08217674/12OmNwlHT0S", "parentPublication": { "id": "proceedings/bibm/2017/3050/0", "title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibmw/2010/8303/0/05703926", "title": "Genome-wide linkage and association analyses for the fasting glucose level in Korean population including monozygotic twins", "doi": null, "abstractUrl": "/proceedings-article/bibmw/2010/05703926/12OmNzayNkT", "parentPublication": { "id": "proceedings/bibmw/2010/8303/0", "title": "2010 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2017/03/07403975", "title": "Searching Genome-Wide Multi-Locus Associations for Multiple Diseases Based on Bayesian Inference", "doi": null, "abstractUrl": "/journal/tb/2017/03/07403975/13rRUxBa54H", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/5555/01/08514810", "title": "Backpropagation Approach Supported by Image Compression Algorithm for the Classification of Chronic Condition Diseases", "doi": null, "abstractUrl": "/journal/tb/5555/01/08514810/14NIz5mrVeg", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669639", "title": "prePathCluster: An novel deep-learning based method for endocrine disease pathway analysis", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669639/1A9VPw6t80E", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2020/6215/0/09313337", "title": "Epistasis Detection using Heterogeneous Bio-molecular Network", "doi": null, "abstractUrl": "/proceedings-article/bibm/2020/09313337/1qmfSQyokjS", "parentPublication": { "id": "proceedings/bibm/2020/6215/0", "title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttb2012010079", "articleId": "13rRUxASuWv", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttb2012010098", "articleId": "13rRUxASuo7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwGqBql", "title": "July-Aug.", "year": "2013", "issueNum": "04", "idPrefix": "cg", "pubType": "magazine", "volume": "33", "label": "July-Aug.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUB7a1ij", "doi": "10.1109/MCG.2013.39", "abstract": "The volume of available data has been growing exponentially, increasing data problem's complexity and obscurity. In response, visual analytics (VA) has gained attention, yet its solutions haven't scaled well for big data. Computational methods can improve VA's scalability by giving users compact, meaningful information about the input data. However, the significant computation time these methods require hinders real-time interactive visualization of big data. By addressing crucial discrepancies between these methods and VA regarding precision and convergence, researchers have proposed ways to customize them for VA. These approaches, which include low-precision computation and iteration-level interactive visualization, ensure real-time interactive VA for big data.", "abstracts": [ { "abstractType": "Regular", "content": "The volume of available data has been growing exponentially, increasing data problem's complexity and obscurity. In response, visual analytics (VA) has gained attention, yet its solutions haven't scaled well for big data. Computational methods can improve VA's scalability by giving users compact, meaningful information about the input data. However, the significant computation time these methods require hinders real-time interactive visualization of big data. By addressing crucial discrepancies between these methods and VA regarding precision and convergence, researchers have proposed ways to customize them for VA. These approaches, which include low-precision computation and iteration-level interactive visualization, ensure real-time interactive VA for big data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The volume of available data has been growing exponentially, increasing data problem's complexity and obscurity. In response, visual analytics (VA) has gained attention, yet its solutions haven't scaled well for big data. Computational methods can improve VA's scalability by giving users compact, meaningful information about the input data. However, the significant computation time these methods require hinders real-time interactive visualization of big data. By addressing crucial discrepancies between these methods and VA regarding precision and convergence, researchers have proposed ways to customize them for VA. These approaches, which include low-precision computation and iteration-level interactive visualization, ensure real-time interactive VA for big data.", "title": "Customizing Computational Methods for Visual Analytics with Big Data", "normalizedTitle": "Customizing Computational Methods for Visual Analytics with Big Data", "fno": "mcg2013040022", "hasPdf": true, "idPrefix": "cg", "keywords": [ "Visual Analytics", "Data Visualization", "Real Time Systems", "Principal Component Analysis", "Clustering Algorithms", "Algorithm Design And Analysis", "Iteration Level Visualization", "Visual Analytics", "Data Visualization", "Convergence", "Real Time Systems", "Principal Component Analysis", "Clustering Algorithms", "Algorithm Design And Analysis", "Computer Graphics", "Large Scale Data", "Big Data", "Visual Analytics", "Dimension Reduction", "Clustering", "Low Precision Computation" ], "authors": [ { "givenName": null, "surname": "Jaegul Choo", "fullName": "Jaegul Choo", "affiliation": null, "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Haesun Park", "fullName": "Haesun Park", "affiliation": null, "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "04", "pubDate": "2013-07-01 00:00:00", "pubType": "mags", "pages": "22-28", "year": "2013", "issn": "0272-1716", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2014/5666/0/07004262", "title": "Evaluating density-based motion for big data visual analytics", "doi": null, "abstractUrl": "/proceedings-article/big-data/2014/07004262/12OmNB0nWeq", "parentPublication": { "id": "proceedings/big-data/2014/5666/0", "title": "2014 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2012/4752/0/06400554", "title": "Visual analytics for the big data era — A comparative review of state-of-the-art commercial systems", "doi": null, "abstractUrl": "/proceedings-article/vast/2012/06400554/12OmNBPc8qK", "parentPublication": { "id": "proceedings/vast/2012/4752/0", "title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2013/4892/0/4892c416", "title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making", "doi": null, "abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq", "parentPublication": { "id": "proceedings/hicss/2013/4892/0", "title": "2013 46th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2015/7343/0/07314286", "title": "Big Data Visual Analytics with Parallel Coordinates", "doi": null, "abstractUrl": "/proceedings-article/bdva/2015/07314286/12OmNybfqVl", "parentPublication": { "id": "proceedings/bdva/2015/7343/0", "title": "2015 Big Data Visual Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040026", "title": "A Graph Algebra for Scalable Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040026/13rRUILLkpN", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040063", "title": "The Top 10 Challenges in Extreme-Scale Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040063/13rRUxC0SGA", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2012/04/mcg2012040023", "title": "Extreme-Scale Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2012/04/mcg2012040023/13rRUxjQyxF", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08805439", "title": "The Validity, Generalizability and Feasibility of Summative Evaluation Methods in Visual Analytics", "doi": null, "abstractUrl": "/journal/tg/2020/01/08805439/1cG4DVd6FcQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/10/08423105", "title": "Commercial Visual Analytics Systems&#x2013;Advances in the Big Data Analytics Field", "doi": null, "abstractUrl": "/journal/tg/2019/10/08423105/1cYd7bZMLp6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a368", "title": "A Characterization of Data Exchange between Visual Analytics Tools", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a368/1rSRaA2LJBK", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "mcg2013040020", "articleId": "13rRUxcsYRj", "__typename": "AdjacentArticleType" }, "next": { "fno": "mcg2013040029", "articleId": "13rRUzphDsv", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNwFid7w", "title": "Jan.", "year": "2019", "issueNum": "01", "idPrefix": "tg", "pubType": "journal", "volume": "25", "label": "Jan.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "17D45XeKgns", "doi": "10.1109/TVCG.2018.2865047", "abstract": "Much research has been done regarding how to visualize and interact with observations and attributes of high-dimensional data for exploratory data analysis. From the analyst's perceptual and cognitive perspective, current visualization approaches typically treat the observations of the high-dimensional dataset very differently from the attributes. Often, the attributes are treated as inputs (e.g., sliders), and observations as outputs (e.g., projection plots), thus emphasizing investigation of the observations. However, there are many cases in which analysts wish to investigate both the observations and the attributes of the dataset, suggesting a symmetry between how analysts think about attributes and observations. To address this, we define SIRIUS (Symmetric Interactive Representations In a Unified System), a symmetric, dual projection technique to support exploratory data analysis of high-dimensional data. We provide an example implementation of SIRIUS and demonstrate how this symmetry affords additional insights.", "abstracts": [ { "abstractType": "Regular", "content": "Much research has been done regarding how to visualize and interact with observations and attributes of high-dimensional data for exploratory data analysis. From the analyst's perceptual and cognitive perspective, current visualization approaches typically treat the observations of the high-dimensional dataset very differently from the attributes. Often, the attributes are treated as inputs (e.g., sliders), and observations as outputs (e.g., projection plots), thus emphasizing investigation of the observations. However, there are many cases in which analysts wish to investigate both the observations and the attributes of the dataset, suggesting a symmetry between how analysts think about attributes and observations. To address this, we define SIRIUS (Symmetric Interactive Representations In a Unified System), a symmetric, dual projection technique to support exploratory data analysis of high-dimensional data. We provide an example implementation of SIRIUS and demonstrate how this symmetry affords additional insights.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Much research has been done regarding how to visualize and interact with observations and attributes of high-dimensional data for exploratory data analysis. From the analyst's perceptual and cognitive perspective, current visualization approaches typically treat the observations of the high-dimensional dataset very differently from the attributes. Often, the attributes are treated as inputs (e.g., sliders), and observations as outputs (e.g., projection plots), thus emphasizing investigation of the observations. However, there are many cases in which analysts wish to investigate both the observations and the attributes of the dataset, suggesting a symmetry between how analysts think about attributes and observations. To address this, we define SIRIUS (Symmetric Interactive Representations In a Unified System), a symmetric, dual projection technique to support exploratory data analysis of high-dimensional data. We provide an example implementation of SIRIUS and demonstrate how this symmetry affords additional insights.", "title": "SIRIUS: Dual, Symmetric, Interactive Dimension Reductions", "normalizedTitle": "SIRIUS: Dual, Symmetric, Interactive Dimension Reductions", "fno": "08440814", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Data Analysis", "Data Mining", "Data Visualisation", "SIRIUS", "High Dimensional Dataset", "Symmetric Projection Technique", "Dual Projection Technique", "Data Analysis", "Visualization Approaches", "Cognitive Perspective", "Analysts Perceptual", "Symmetric Interactive Representations In A Unified System", "Symmetric Interactive Dimension Reductions", "Dual Interactive Dimension Reductions", "Data Visualization", "Task Analysis", "Data Analysis", "Animals", "Principal Component Analysis", "Visual Analytics", "Image Color Analysis", "Dimension Reduction", "Semantic Interaction", "Exploratory Data Analysis", "Observation Projection", "Attribute Projection" ], "authors": [ { "givenName": "Michelle", "surname": "Dowling", "fullName": "Michelle Dowling", "affiliation": "Virginia Tech Department of Computer Science", "__typename": "ArticleAuthorType" }, { "givenName": "John", "surname": "Wenskovitch", "fullName": "John Wenskovitch", "affiliation": "Virginia Tech Department of Computer Science", "__typename": "ArticleAuthorType" }, { "givenName": "J.T.", "surname": "Fry", "fullName": "J.T. Fry", "affiliation": "Virginia Tech Department of Statistics", "__typename": "ArticleAuthorType" }, { "givenName": "Scotland", "surname": "Leman", "fullName": "Scotland Leman", "affiliation": "Virginia Tech Department of Statistics", "__typename": "ArticleAuthorType" }, { "givenName": "Leanna", "surname": "House", "fullName": "Leanna House", "affiliation": "Virginia Tech Department of Statistics", "__typename": "ArticleAuthorType" }, { "givenName": "Chris", "surname": "North", "fullName": "Chris North", "affiliation": "Virginia Tech Department of Computer Science", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "01", "pubDate": "2019-01-01 00:00:00", "pubType": "trans", "pages": "172-182", "year": "2019", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "trans/tg/2013/12/ttg2013122634", "title": "Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122634/13rRUEgs2BW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192671", "title": "InterAxis: Steering Scatterplot Axes via Observation-Level Interaction", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192671/13rRUILLkDT", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07534760", "title": "Designing Progressive and Interactive Analytics Processes for High-Dimensional Data Analysis", "doi": null, "abstractUrl": "/journal/tg/2017/01/07534760/13rRUwbaqLy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07536110", "title": "Annotation Graphs: A Graph-Based Visualization for Meta-Analysis of Data Based on User-Authored Annotations", "doi": null, "abstractUrl": "/journal/tg/2017/01/07536110/13rRUxZ0o1F", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/beliv/2018/6884/0/08634103", "title": "The Garden of Forking Paths in Visualization: A Design Space for Reliable Exploratory Visual Analytics : Position Paper", "doi": null, "abstractUrl": "/proceedings-article/beliv/2018/08634103/17D45VsBTXJ", "parentPublication": { "id": "proceedings/beliv/2018/6884/0", "title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdva/2018/9194/0/08534019", "title": "Multiple Workspaces in Visual Analytics", "doi": null, "abstractUrl": "/proceedings-article/bdva/2018/08534019/17D45W9KVIu", "parentPublication": { "id": "proceedings/bdva/2018/9194/0", "title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/mc/2018/04/08576666", "title": "SIRIUS: Enabling Progressive Data Exploration for Extreme-Scale Scientific Data", "doi": null, "abstractUrl": "/journal/mc/2018/04/08576666/17D45WaTkdy", "parentPublication": { "id": "trans/mc", "title": "IEEE Transactions on Multi-Scale Computing Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/01/09903343", "title": "RankAxis: Towards a Systematic Combination of Projection and Ranking in Multi-Attribute Data Exploration", "doi": null, "abstractUrl": "/journal/tg/2023/01/09903343/1GZooOkjYzK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/10068257", "title": "Interactive Subspace Cluster Analysis Guided by Semantic Attribute Associations", "doi": null, "abstractUrl": "/journal/tg/5555/01/10068257/1LtR7CeyeHe", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807354", "title": "VASABI: Hierarchical User Profiles for Interactive Visual User Behaviour Analytics", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807354/1cG6pt9f4ly", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "08440044", "articleId": "17D45VTRovg", "__typename": "AdjacentArticleType" }, "next": { "fno": "08443131", "articleId": "17D45WIXbOh", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTYesNL", "name": "ttg201901-08440814s1.zip", "location": "https://www.computer.org/csdl/api/v1/extra/ttg201901-08440814s1.zip", "extension": "zip", "size": "258 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "1CdACzpvTPi", "title": "May", "year": "2022", "issueNum": "05", "idPrefix": "tk", "pubType": "journal", "volume": "34", "label": "May", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "1lIYCwlAKLS", "doi": "10.1109/TKDE.2020.3011674", "abstract": "Scholar name disambiguation remains a hard and unsolved problem, which brings various troubles for bibliography data analytics. Most existing methods handle name disambiguation separately that tackles one name at a time, and neglect the fact that disambiguation of one name affects the others. Further, it is typically common that only limited information is available for bibliography data, e.g., only basic paper and citation information is available in DBLP. In this study, we propose a collective approach to name disambiguation, which takes the connection of different ambiguous names into consideration. We reformulate bibliography data as a heterogeneous multipartite network, which initially treats each author reference as a unique author entity, and disambiguation results of one name propagate to the others of the network. To further deal with the sparsity problem caused by limited available information, we also introduce word-word and venue-venue similarities, and we finally measure author similarities by assembling similarities from four perspectives. Using real-life data, we experimentally demonstrate that our approach is both effective and efficient.", "abstracts": [ { "abstractType": "Regular", "content": "Scholar name disambiguation remains a hard and unsolved problem, which brings various troubles for bibliography data analytics. Most existing methods handle name disambiguation separately that tackles one name at a time, and neglect the fact that disambiguation of one name affects the others. Further, it is typically common that only limited information is available for bibliography data, e.g., only basic paper and citation information is available in DBLP. In this study, we propose a collective approach to name disambiguation, which takes the connection of different ambiguous names into consideration. We reformulate bibliography data as a heterogeneous multipartite network, which initially treats each author reference as a unique author entity, and disambiguation results of one name propagate to the others of the network. To further deal with the sparsity problem caused by limited available information, we also introduce word-word and venue-venue similarities, and we finally measure author similarities by assembling similarities from four perspectives. Using real-life data, we experimentally demonstrate that our approach is both effective and efficient.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scholar name disambiguation remains a hard and unsolved problem, which brings various troubles for bibliography data analytics. Most existing methods handle name disambiguation separately that tackles one name at a time, and neglect the fact that disambiguation of one name affects the others. Further, it is typically common that only limited information is available for bibliography data, e.g., only basic paper and citation information is available in DBLP. In this study, we propose a collective approach to name disambiguation, which takes the connection of different ambiguous names into consideration. We reformulate bibliography data as a heterogeneous multipartite network, which initially treats each author reference as a unique author entity, and disambiguation results of one name propagate to the others of the network. To further deal with the sparsity problem caused by limited available information, we also introduce word-word and venue-venue similarities, and we finally measure author similarities by assembling similarities from four perspectives. Using real-life data, we experimentally demonstrate that our approach is both effective and efficient.", "title": "A Collective Approach to Scholar Name Disambiguation", "normalizedTitle": "A Collective Approach to Scholar Name Disambiguation", "fno": "09147044", "hasPdf": true, "idPrefix": "tk", "keywords": [ "Bibliographies", "Wide Area Networks", "Search Problems", "Merging", "Data Analysis", "Indexes", "Data Integrity", "Name Disambiguation", "Collective Clustering", "Information Network" ], "authors": [ { "givenName": "Dongsheng", "surname": "Luo", "fullName": "Dongsheng Luo", "affiliation": "College of Information Sciences and Technology, Pennsylvania State University, State College, PA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Shuai", "surname": "Ma", "fullName": "Shuai Ma", "affiliation": "SKLSDE Lab, Beihang University and Beijing Advanced Innovation Center for Big Data and Brain Computing, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Yaowei", "surname": "Yan", "fullName": "Yaowei Yan", "affiliation": "College of Information Sciences and Technology, Pennsylvania State University, State College, PA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Chunming", "surname": "Hu", "fullName": "Chunming Hu", "affiliation": "SKLSDE Lab, Beihang University and Beijing Advanced Innovation Center for Big Data and Brain Computing, Beijing, China", "__typename": "ArticleAuthorType" }, { "givenName": "Xiang", "surname": "Zhang", "fullName": "Xiang Zhang", "affiliation": "College of Information Sciences and Technology, Pennsylvania State University, State College, PA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Jinpeng", "surname": "Huai", "fullName": "Jinpeng Huai", "affiliation": "SKLSDE Lab, Beihang University and Beijing Advanced Innovation Center for Big Data and Brain Computing, Beijing, China", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "05", "pubDate": "2022-05-01 00:00:00", "pubType": "trans", "pages": "2020-2032", "year": "2022", "issn": "1041-4347", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/skg/2012/4794/0/4794a197", "title": "A Novel Ontology-Based Approach of Chinese Person Name Disambiguation", "doi": null, "abstractUrl": "/proceedings-article/skg/2012/4794a197/12OmNApcuem", "parentPublication": { "id": "proceedings/skg/2012/4794/0", "title": "Semantics, Knowledge and Grid, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isme/2010/7669/2/05573917", "title": "Cloud-Based Name Disambiguation Algorithm", "doi": null, "abstractUrl": "/proceedings-article/isme/2010/05573917/12OmNCd2rBF", "parentPublication": { "id": "proceedings/isme/2010/7669/2", "title": "2010 International Conference of Information Science and Management Engineering. ISME 2010", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/das/2012/4661/0/4661a440", "title": "Evaluation of Features for Author Name Disambiguation Using Linear Support Vector Machines", "doi": null, "abstractUrl": "/proceedings-article/das/2012/4661a440/12OmNrkT7AI", "parentPublication": { "id": "proceedings/das/2012/4661/0", "title": "Document Analysis Systems, IAPR International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isme/2010/4132/2/4132b155", "title": "Cloud-Based Name Disambiguation Algorithm", "doi": null, "abstractUrl": "/proceedings-article/isme/2010/4132b155/12OmNvSbBnL", "parentPublication": { "id": "proceedings/isme/2010/4132/2", "title": "Information Science and Management Engineering, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2005/876/0/04118563", "title": "Name disambiguation in author citations using a K-way spectral clustering method", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2005/04118563/12OmNy2ah16", "parentPublication": { "id": "proceedings/jcdl/2005/876/0", "title": "Proceedings of the 5th ACM/IEEE Joint Conference on Digital Libraries", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2011/4408/0/4408a794", "title": "ADANA: Active Name Disambiguation", "doi": null, "abstractUrl": "/proceedings-article/icdm/2011/4408a794/12OmNz2C1lL", "parentPublication": { "id": "proceedings/icdm/2011/4408/0", "title": "2011 IEEE 11th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2016/4229/0/07559614", "title": "Using co-authorship networks for author name disambiguation", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2016/07559614/12OmNzBOhLQ", "parentPublication": { "id": "proceedings/jcdl/2016/4229/0", "title": "2016 IEEE/ACM Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi/2006/2747/0/274700378", "title": "Name Disambiguation in Person Information Mining", "doi": null, "abstractUrl": "/proceedings-article/wi/2006/274700378/12OmNzaQotb", "parentPublication": { "id": "proceedings/wi/2006/2747/0", "title": "2006 IEEE/WIC/ACM International Conference on Web Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2021/9184/0/918400c317", "title": "A Collective Approach to Scholar Name Disambiguation (Extended Abstract)", "doi": null, "abstractUrl": "/proceedings-article/icde/2021/918400c317/1uGXsnGDirC", "parentPublication": { "id": "proceedings/icde/2021/9184/0", "title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2021/1681/0/168100a506", "title": "Multiple Features Driven Author Name Disambiguation", "doi": null, "abstractUrl": "/proceedings-article/icws/2021/168100a506/1yrHD5SqC9a", "parentPublication": { "id": "proceedings/icws/2021/1681/0", "title": "2021 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": null, "next": { "fno": "09139346", "articleId": "1ls8NKJ0Qww", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }
{ "issue": { "id": "12OmNCbCrUN", "title": "Dec.", "year": "2013", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUIIVlcI", "doi": "10.1109/TVCG.2013.212", "abstract": "Topic modeling has been widely used for analyzing text document collections. Recently, there have been significant advancements in various topic modeling techniques, particularly in the form of probabilistic graphical modeling. State-of-the-art techniques such as Latent Dirichlet Allocation (LDA) have been successfully applied in visual text analytics. However, most of the widely-used methods based on probabilistic modeling have drawbacks in terms of consistency from multiple runs and empirical convergence. Furthermore, due to the complicatedness in the formulation and the algorithm, LDA cannot easily incorporate various types of user feedback. To tackle this problem, we propose a reliable and flexible visual analytics system for topic modeling called UTOPIAN (User-driven Topic modeling based on Interactive Nonnegative Matrix Factorization). Centered around its semi-supervised formulation, UTOPIAN enables users to interact with the topic modeling method and steer the result in a user-driven manner. We demonstrate the capability of UTOPIAN via several usage scenarios with real-world document corpuses such as InfoVis/VAST paper data set and product review data sets.", "abstracts": [ { "abstractType": "Regular", "content": "Topic modeling has been widely used for analyzing text document collections. Recently, there have been significant advancements in various topic modeling techniques, particularly in the form of probabilistic graphical modeling. State-of-the-art techniques such as Latent Dirichlet Allocation (LDA) have been successfully applied in visual text analytics. However, most of the widely-used methods based on probabilistic modeling have drawbacks in terms of consistency from multiple runs and empirical convergence. Furthermore, due to the complicatedness in the formulation and the algorithm, LDA cannot easily incorporate various types of user feedback. To tackle this problem, we propose a reliable and flexible visual analytics system for topic modeling called UTOPIAN (User-driven Topic modeling based on Interactive Nonnegative Matrix Factorization). Centered around its semi-supervised formulation, UTOPIAN enables users to interact with the topic modeling method and steer the result in a user-driven manner. We demonstrate the capability of UTOPIAN via several usage scenarios with real-world document corpuses such as InfoVis/VAST paper data set and product review data sets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Topic modeling has been widely used for analyzing text document collections. Recently, there have been significant advancements in various topic modeling techniques, particularly in the form of probabilistic graphical modeling. State-of-the-art techniques such as Latent Dirichlet Allocation (LDA) have been successfully applied in visual text analytics. However, most of the widely-used methods based on probabilistic modeling have drawbacks in terms of consistency from multiple runs and empirical convergence. Furthermore, due to the complicatedness in the formulation and the algorithm, LDA cannot easily incorporate various types of user feedback. To tackle this problem, we propose a reliable and flexible visual analytics system for topic modeling called UTOPIAN (User-driven Topic modeling based on Interactive Nonnegative Matrix Factorization). Centered around its semi-supervised formulation, UTOPIAN enables users to interact with the topic modeling method and steer the result in a user-driven manner. We demonstrate the capability of UTOPIAN via several usage scenarios with real-world document corpuses such as InfoVis/VAST paper data set and product review data sets.", "title": "UTOPIAN: User-Driven Topic Modeling Based on Interactive Nonnegative Matrix Factorization", "normalizedTitle": "UTOPIAN: User-Driven Topic Modeling Based on Interactive Nonnegative Matrix Factorization", "fno": "ttg2013121992", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Analytical Models", "Visual Analytics", "Computational Modeling", "Interactive States", "Context Modeling", "Interactive Clustering", "Analytical Models", "Visual Analytics", "Computational Modeling", "Interactive States", "Context Modeling", "Text Analytics", "Latent Dirichlet Allocation", "Nonnegative Matrix Factorization", "Topic Modeling", "Visual Analytics" ], "authors": [ { "givenName": null, "surname": "Jaegul Choo", "fullName": "Jaegul Choo", "affiliation": "Georgia Inst. of Technol., Atlanta, GA, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Changhyun Lee", "fullName": "Changhyun Lee", "affiliation": "Georgia Inst. of Technol., Atlanta, GA, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Chandan K.", "surname": "Reddy", "fullName": "Chandan K. Reddy", "affiliation": "Wayne State Univ., Detroit, MI, USA", "__typename": "ArticleAuthorType" }, { "givenName": "Haesun", "surname": "Park", "fullName": "Haesun Park", "affiliation": "Georgia Inst. of Technol., Atlanta, GA, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2013-12-01 00:00:00", "pubType": "trans", "pages": "1992-2001", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2016/5473/0/07837872", "title": "L-EnsNMF: Boosted Local Topic Discovery via Ensemble of Nonnegative Matrix Factorization", "doi": null, "abstractUrl": "/proceedings-article/icdm/2016/07837872/12OmNAWH9xf", "parentPublication": { "id": "proceedings/icdm/2016/5473/0", "title": "2016 IEEE 16th International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019825", "title": "Progressive Learning of Topic Modeling Parameters: A Visual Analytics Framework", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019825/13rRUwghd9c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/12/ttg2013122119", "title": "Interactive Exploration of Surveillance Video through Action Shot Summarization and Trajectory Visualization", "doi": null, "abstractUrl": "/journal/tg/2013/12/ttg2013122119/13rRUxC0SOX", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539597", "title": "TopicLens: Efficient Multi-Level Visual Topic Exploration of Large-Scale Document Collections", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539597/13rRUy0qnLK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08467535", "title": "Visual Analytics for Topic Model Optimization based on User-Steerable Speculative Execution", "doi": null, "abstractUrl": "/journal/tg/2019/01/08467535/17D45XeKgtW", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2020/07/08666058", "title": "Affinity Regularized Non-Negative Matrix Factorization for Lifelong Topic Modeling", "doi": null, "abstractUrl": "/journal/tk/2020/07/08666058/18l6FUrOG88", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807224", "title": "Semantic Concept Spaces: Guided Topic Model Refinement using Word-Embedding Projections", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807224/1cG6twVJ2HC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2019/9226/0/922600a148", "title": "An Interactive Visual Analytics System for Incremental Classification Based on Semi-supervised Topic Modeling", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2019/922600a148/1cMF8cnyXfi", "parentPublication": { "id": "proceedings/pacificvis/2019/9226/0", "title": "2019 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2019/2284/0/08986922", "title": "TopicSifter: Interactive Search Space Reduction through Targeted Topic Modeling", "doi": null, "abstractUrl": "/proceedings-article/vast/2019/08986922/1hrMz9LdbzO", "parentPublication": { "id": "proceedings/vast/2019/2284/0", "title": "2019 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09039699", "title": "ArchiText: Interactive Hierarchical Topic Modeling", "doi": null, "abstractUrl": "/journal/tg/2021/09/09039699/1igS4Rezjr2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013121982", "articleId": "13rRUy2YLT1", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013122002", "articleId": "13rRUxNW1Zo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [ { "id": "17ShDTXWRPa", "name": "ttg2013121992s.mp4", "location": "https://www.computer.org/csdl/api/v1/extra/ttg2013121992s.mp4", "extension": "mp4", "size": "11.9 MB", "__typename": "WebExtraType" } ], "articleVideos": [] }
{ "issue": { "id": "12OmNCbCrUN", "title": "Dec.", "year": "2013", "issueNum": "12", "idPrefix": "tg", "pubType": "journal", "volume": "19", "label": "Dec.", "downloadables": { "hasCover": false, "__typename": "PeriodicalIssueDownloadablesType" }, "__typename": "PeriodicalIssue" }, "article": { "id": "13rRUxNW1Zo", "doi": "10.1109/TVCG.2013.162", "abstract": "Analyzing large textual collections has become increasingly challenging given the size of the data available and the rate that more data is being generated. Topic-based text summarization methods coupled with interactive visualizations have presented promising approaches to address the challenge of analyzing large text corpora. As the text corpora and vocabulary grow larger, more topics need to be generated in order to capture the meaningful latent themes and nuances in the corpora. However, it is difficult for most of current topic-based visualizations to represent large number of topics without being cluttered or illegible. To facilitate the representation and navigation of a large number of topics, we propose a visual analytics system - HierarchicalTopic (HT). HT integrates a computational algorithm, Topic Rose Tree, with an interactive visual interface. The Topic Rose Tree constructs a topic hierarchy based on a list of topics. The interactive visual interface is designed to present the topic content as well as temporal evolution of topics in a hierarchical fashion. User interactions are provided for users to make changes to the topic hierarchy based on their mental model of the topic space. To qualitatively evaluate HT, we present a case study that showcases how HierarchicalTopics aid expert users in making sense of a large number of topics and discovering interesting patterns of topic groups. We have also conducted a user study to quantitatively evaluate the effect of hierarchical topic structure. The study results reveal that the HT leads to faster identification of large number of relevant topics. We have also solicited user feedback during the experiments and incorporated some suggestions into the current version of HierarchicalTopics.", "abstracts": [ { "abstractType": "Regular", "content": "Analyzing large textual collections has become increasingly challenging given the size of the data available and the rate that more data is being generated. Topic-based text summarization methods coupled with interactive visualizations have presented promising approaches to address the challenge of analyzing large text corpora. As the text corpora and vocabulary grow larger, more topics need to be generated in order to capture the meaningful latent themes and nuances in the corpora. However, it is difficult for most of current topic-based visualizations to represent large number of topics without being cluttered or illegible. To facilitate the representation and navigation of a large number of topics, we propose a visual analytics system - HierarchicalTopic (HT). HT integrates a computational algorithm, Topic Rose Tree, with an interactive visual interface. The Topic Rose Tree constructs a topic hierarchy based on a list of topics. The interactive visual interface is designed to present the topic content as well as temporal evolution of topics in a hierarchical fashion. User interactions are provided for users to make changes to the topic hierarchy based on their mental model of the topic space. To qualitatively evaluate HT, we present a case study that showcases how HierarchicalTopics aid expert users in making sense of a large number of topics and discovering interesting patterns of topic groups. We have also conducted a user study to quantitatively evaluate the effect of hierarchical topic structure. The study results reveal that the HT leads to faster identification of large number of relevant topics. We have also solicited user feedback during the experiments and incorporated some suggestions into the current version of HierarchicalTopics.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Analyzing large textual collections has become increasingly challenging given the size of the data available and the rate that more data is being generated. Topic-based text summarization methods coupled with interactive visualizations have presented promising approaches to address the challenge of analyzing large text corpora. As the text corpora and vocabulary grow larger, more topics need to be generated in order to capture the meaningful latent themes and nuances in the corpora. However, it is difficult for most of current topic-based visualizations to represent large number of topics without being cluttered or illegible. To facilitate the representation and navigation of a large number of topics, we propose a visual analytics system - HierarchicalTopic (HT). HT integrates a computational algorithm, Topic Rose Tree, with an interactive visual interface. The Topic Rose Tree constructs a topic hierarchy based on a list of topics. The interactive visual interface is designed to present the topic content as well as temporal evolution of topics in a hierarchical fashion. User interactions are provided for users to make changes to the topic hierarchy based on their mental model of the topic space. To qualitatively evaluate HT, we present a case study that showcases how HierarchicalTopics aid expert users in making sense of a large number of topics and discovering interesting patterns of topic groups. We have also conducted a user study to quantitatively evaluate the effect of hierarchical topic structure. The study results reveal that the HT leads to faster identification of large number of relevant topics. We have also solicited user feedback during the experiments and incorporated some suggestions into the current version of HierarchicalTopics.", "title": "HierarchicalTopics: Visually Exploring Large Text Collections Using Topic Hierarchies", "normalizedTitle": "HierarchicalTopics: Visually Exploring Large Text Collections Using Topic Hierarchies", "fno": "ttg2013122002", "hasPdf": true, "idPrefix": "tg", "keywords": [ "Visual Analytics", "Vocabulary", "Analytical Models", "Text Mining", "Computational Modeling", "Algorithm Design And Analysis", "Visual Analytics", "Vocabulary", "Analytical Models", "Text Mining", "Computational Modeling", "Algorithm Design And Analysis", "Rose Tree", "Hierarchical Topic Representation", "Topic Modeling" ], "authors": [ { "givenName": null, "surname": "Wenwen Dou", "fullName": "Wenwen Dou", "affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Li Yu", "fullName": "Li Yu", "affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Xiaoyu Wang", "fullName": "Xiaoyu Wang", "affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA", "__typename": "ArticleAuthorType" }, { "givenName": null, "surname": "Zhiqiang Ma", "fullName": "Zhiqiang Ma", "affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA", "__typename": "ArticleAuthorType" }, { "givenName": "William", "surname": "Ribarsky", "fullName": "William Ribarsky", "affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA", "__typename": "ArticleAuthorType" } ], "replicability": null, "showBuyMe": true, "showRecommendedArticles": true, "isOpenAccess": false, "issueNum": "12", "pubDate": "2013-12-01 00:00:00", "pubType": "trans", "pages": "2002-2011", "year": "2013", "issn": "1077-2626", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "recommendedArticles": [ { "id": "proceedings/spire/2000/0746/0/07460055", "title": "Experiment Analysis in Newspaper Topic Detection", "doi": null, "abstractUrl": "/proceedings-article/spire/2000/07460055/12OmNAndin8", "parentPublication": { "id": "proceedings/spire/2000/0746/0", "title": "String Processing and Information Retrieval, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2011/4596/0/4596a936", "title": "TopicView: Visually Comparing Topic Models of Text Collections", "doi": null, "abstractUrl": "/proceedings-article/ictai/2011/4596a936/12OmNArKSif", "parentPublication": { "id": "proceedings/ictai/2011/4596/0", "title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbk/2017/3120/0/3120a254", "title": "Incorporating Entity Correlation Knowledge into Topic Modeling", "doi": null, "abstractUrl": "/proceedings-article/icbk/2017/3120a254/12OmNzdoMCc", "parentPublication": { "id": "proceedings/icbk/2017/3120/0", "title": "2017 IEEE International Conference on Big Knowledge (ICBK)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2011/4408/0/4408a101", "title": "SolarMap: Multifaceted Visual Analytics for Topic Exploration", "doi": null, "abstractUrl": "/proceedings-article/icdm/2011/4408a101/12OmNzw8j1t", "parentPublication": { "id": "proceedings/icdm/2011/4408/0", "title": "2011 IEEE 11th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08019825", "title": "Progressive Learning of Topic Modeling Parameters: A Visual Analytics Framework", "doi": null, "abstractUrl": "/journal/tg/2018/01/08019825/13rRUwghd9c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/01/ttg2012010093", "title": "EventRiver: Visually Exploring Text Collections with Temporal References", "doi": null, "abstractUrl": "/journal/tg/2012/01/ttg2012010093/13rRUxly8SS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539597", "title": "TopicLens: Efficient Multi-Level Visual Topic Exploration of Large-Scale Document Collections", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539597/13rRUy0qnLK", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669550", "title": "Text Fingerprinting and Topic Mining in the Prescription Opioid Use Literature", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669550/1A9VGSMpNYY", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09039699", "title": "ArchiText: Interactive Hierarchical Topic Modeling", "doi": null, "abstractUrl": "/journal/tg/2021/09/09039699/1igS4Rezjr2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vis4dh/2021/1370/0/137000a012", "title": "Uncertainty-aware Topic Modeling Visualization", "doi": null, "abstractUrl": "/proceedings-article/vis4dh/2021/137000a012/1yNiG9yU9JS", "parentPublication": { "id": "proceedings/vis4dh/2021/1370/0", "title": "2021 IEEE 6th Workshop on Visualization for the Digital Humanities (VIS4DH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "adjacentArticles": { "previous": { "fno": "ttg2013121992", "articleId": "13rRUIIVlcI", "__typename": "AdjacentArticleType" }, "next": { "fno": "ttg2013122012", "articleId": "13rRUyogGAa", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "webExtras": [], "articleVideos": [] }