data
dict |
|---|
{
"issue": {
"id": "12OmNviZlCL",
"title": "January",
"year": "2011",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "January",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgziB",
"doi": "10.1109/TVCG.2010.28",
"abstract": "Euler diagrams have a wide variety of uses, from information visualization to logical reasoning. In all of their application areas, the ability to automatically layout Euler diagrams brings considerable benefits. In this paper, we present a novel approach to Euler diagram generation. We develop certain graphs associated with Euler diagrams in order to allow curves to be added by finding cycles in these graphs. This permits us to build Euler diagrams inductively, adding one curve at a time. Our technique is adaptable, allowing the easy specification, and enforcement, of sets of well-formedness conditions; we present a series of results that identify properties of cycles that correspond to the well-formedness conditions. This improves upon other contributions toward the automated generation of Euler diagrams which implicitly assume some fixed set of well-formedness conditions must hold. In addition, unlike most of these other generation methods, our technique allows any abstract description to be drawn as an Euler diagram. To establish the utility of the approach, a prototype implementation has been developed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Euler diagrams have a wide variety of uses, from information visualization to logical reasoning. In all of their application areas, the ability to automatically layout Euler diagrams brings considerable benefits. In this paper, we present a novel approach to Euler diagram generation. We develop certain graphs associated with Euler diagrams in order to allow curves to be added by finding cycles in these graphs. This permits us to build Euler diagrams inductively, adding one curve at a time. Our technique is adaptable, allowing the easy specification, and enforcement, of sets of well-formedness conditions; we present a series of results that identify properties of cycles that correspond to the well-formedness conditions. This improves upon other contributions toward the automated generation of Euler diagrams which implicitly assume some fixed set of well-formedness conditions must hold. In addition, unlike most of these other generation methods, our technique allows any abstract description to be drawn as an Euler diagram. To establish the utility of the approach, a prototype implementation has been developed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Euler diagrams have a wide variety of uses, from information visualization to logical reasoning. In all of their application areas, the ability to automatically layout Euler diagrams brings considerable benefits. In this paper, we present a novel approach to Euler diagram generation. We develop certain graphs associated with Euler diagrams in order to allow curves to be added by finding cycles in these graphs. This permits us to build Euler diagrams inductively, adding one curve at a time. Our technique is adaptable, allowing the easy specification, and enforcement, of sets of well-formedness conditions; we present a series of results that identify properties of cycles that correspond to the well-formedness conditions. This improves upon other contributions toward the automated generation of Euler diagrams which implicitly assume some fixed set of well-formedness conditions must hold. In addition, unlike most of these other generation methods, our technique allows any abstract description to be drawn as an Euler diagram. To establish the utility of the approach, a prototype implementation has been developed.",
"title": "Inductively Generating Euler Diagrams",
"normalizedTitle": "Inductively Generating Euler Diagrams",
"fno": "ttg2011010088",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Information Visualization",
"Diagram Layout",
"Diagram Generation",
"Euler Diagrams",
"Venn Diagrams"
],
"authors": [
{
"givenName": "Gem",
"surname": "Stapleton",
"fullName": "Gem Stapleton",
"affiliation": "University of Brighton, Brighton, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Rodgers",
"fullName": "Peter Rodgers",
"affiliation": "University of Kent, Canterbury, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Howse",
"fullName": "John Howse",
"affiliation": "University of Brighton, Brighton, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leishi",
"surname": "Zhang",
"fullName": "Leishi Zhang",
"affiliation": "University of Kent, Canterbury, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2011-01-01 00:00:00",
"pubType": "trans",
"pages": "88-100",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a594",
"title": "Visualise Undrawable Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2003/1988/0/19880272",
"title": "Layout Metrics for Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2003/19880272/12OmNvD8RBs",
"parentPublication": {
"id": "proceedings/iv/2003/1988/0",
"title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070401",
"title": "Drawing Euler diagrams with circles and ellipses",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070401/12OmNvpew49",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070382",
"title": "SketchSet: Creating Euler diagrams using pen or mouse",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070382/12OmNx965CA",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/29000771",
"title": "Evaluating the Comprehension of Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000771/12OmNxjjEhx",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2013/0369/0/06645262",
"title": "Improving user comprehension of Euler diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2013/06645262/12OmNxveNOL",
"parentPublication": {
"id": "proceedings/vlhcc/2013/0369/0",
"title": "2013 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a585",
"title": "Embedding Wellformed Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011071020",
"title": "Drawing Euler Diagrams with Circles: The Theory of Piercings",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011071020/13rRUEgarBq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/05999665",
"title": "Wellformedness Properties in Euler Diagrams: Which Should Be Used?",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/05999665/13rRUILLkvo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061090",
"title": "Untangling Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061090/13rRUILtJm3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011010074",
"articleId": "13rRUxAAT0N",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011010101",
"articleId": "13rRUx0xPIA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRFe",
"name": "ttg2011010088s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2011010088s.pdf",
"extension": "pdf",
"size": "286 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyY28Yv",
"doi": "10.1109/TVCG.2012.285",
"abstract": "Uncertainty can arise in any stage of a visual analytics process, especially in data-intensive applications with a sequence of data transformations. Additionally, throughout the process of multidimensional, multivariate data analysis, uncertainty due to data transformation and integration may split, merge, increase, or decrease. This dynamic characteristic along with other features of uncertainty pose a great challenge to effective uncertainty-aware visualization. This paper presents a new framework for modeling uncertainty and characterizing the evolution of the uncertainty information through analytical processes. Based on the framework, we have designed a visual metaphor called uncertainty flow to visually and intuitively summarize how uncertainty information propagates over the whole analysis pipeline. Our system allows analysts to interact with and analyze the uncertainty information at different levels of detail. Three experiments were conducted to demonstrate the effectiveness and intuitiveness of our design.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Uncertainty can arise in any stage of a visual analytics process, especially in data-intensive applications with a sequence of data transformations. Additionally, throughout the process of multidimensional, multivariate data analysis, uncertainty due to data transformation and integration may split, merge, increase, or decrease. This dynamic characteristic along with other features of uncertainty pose a great challenge to effective uncertainty-aware visualization. This paper presents a new framework for modeling uncertainty and characterizing the evolution of the uncertainty information through analytical processes. Based on the framework, we have designed a visual metaphor called uncertainty flow to visually and intuitively summarize how uncertainty information propagates over the whole analysis pipeline. Our system allows analysts to interact with and analyze the uncertainty information at different levels of detail. Three experiments were conducted to demonstrate the effectiveness and intuitiveness of our design.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Uncertainty can arise in any stage of a visual analytics process, especially in data-intensive applications with a sequence of data transformations. Additionally, throughout the process of multidimensional, multivariate data analysis, uncertainty due to data transformation and integration may split, merge, increase, or decrease. This dynamic characteristic along with other features of uncertainty pose a great challenge to effective uncertainty-aware visualization. This paper presents a new framework for modeling uncertainty and characterizing the evolution of the uncertainty information through analytical processes. Based on the framework, we have designed a visual metaphor called uncertainty flow to visually and intuitively summarize how uncertainty information propagates over the whole analysis pipeline. Our system allows analysts to interact with and analyze the uncertainty information at different levels of detail. Three experiments were conducted to demonstrate the effectiveness and intuitiveness of our design.",
"title": "Visualizing Flow of Uncertainty through Analytical Processes",
"normalizedTitle": "Visualizing Flow of Uncertainty through Analytical Processes",
"fno": "ttg2012122526",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Uncertainty",
"Data Visualization",
"Ellipsoids",
"Visual Analytics",
"Covariance Matrix",
"Uncertainty Fusion",
"Uncertainty Visualization",
"Uncertainty Quantification",
"Uncertainty Propagation",
"Error Ellipsoids"
],
"authors": [
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guo-Xun",
"surname": "Yuan",
"fullName": "Guo-Xun Yuan",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2526-2535",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c183",
"title": "Propagation of Orientation Uncertainty of 3D Rigid Object to Its Points",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c183/12OmNC9lEFa",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2002/1751/0/17510037",
"title": "Visualizing Data with Bounded Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2002/17510037/12OmNrFkeWk",
"parentPublication": {
"id": "proceedings/ieee-infovis/2002/1751/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1996/3673/0/36730249",
"title": "UFLOW: Visualizing Uncertainty in Fluid Flow",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1996/36730249/12OmNs59JIG",
"parentPublication": {
"id": "proceedings/ieee-vis/1996/3673/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122496",
"title": "Visual Semiotics & Uncertainty Visualization: An Empirical Study",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122496/13rRUNvyaeZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/04376197",
"title": "Visualizing Large-Scale Uncertainty in Astrophysical Data",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/04376197/13rRUy0qnGe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192694",
"title": "An Uncertainty-Aware Approach for Exploratory Microblog Retrieval",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192694/13rRUy2YLYy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1640",
"title": "Visualizing Large-Scale Uncertainty in Astrophysical Data",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1640/13rRUy3gn7p",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017613",
"title": "Bubble Treemaps for Uncertainty Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017613/13rRUyuvRoQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2020/5697/0/09086235",
"title": "Uncertainty Treemaps",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2020/09086235/1kuHly3FEwU",
"parentPublication": {
"id": "proceedings/pacificvis/2020/5697/0",
"title": "2020 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a663",
"title": "Visualizing Spectral Bundle Adjustment Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a663/1qyxkDucGpG",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122516",
"articleId": "13rRUwwJWFM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122536",
"articleId": "13rRUIJuxvi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxvwoOe",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "ts",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LbFrPLyW1q",
"doi": "10.1109/TSE.2023.3250835",
"abstract": "Modelling the behaviour of state-based systems can be challenging, especially when the modeller is not entirely certain about its intended interactions with the user or the environment. Currently, it is possible to associate a stated level of uncertainty with a given event by attaching probabilities to transitions (producing ‘Probabilistic State Machines’). This captures the ‘First-order uncertainty’ - the (un-)certainty that a given event will occur. However, this does not permit the modeller to capture their own uncertainty (or lack thereof) about that stated probability - also known as ‘Second-order uncertainty’. In this paper we introduce a generalisation of probabilistic finite state machines that makes it possible to incorporate this important additional dimension of uncertainty. For this we adopt a formalism for reasoning about uncertainty called Subjective Logic. We present an algorithm to create these enhanced state machines automatically from a conventional state machine and a set of observed sequences. We show how this approach can be used for reverse-engineering predictive state machines from traces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modelling the behaviour of state-based systems can be challenging, especially when the modeller is not entirely certain about its intended interactions with the user or the environment. Currently, it is possible to associate a stated level of uncertainty with a given event by attaching probabilities to transitions (producing ‘Probabilistic State Machines’). This captures the ‘First-order uncertainty’ - the (un-)certainty that a given event will occur. However, this does not permit the modeller to capture their own uncertainty (or lack thereof) about that stated probability - also known as ‘Second-order uncertainty’. In this paper we introduce a generalisation of probabilistic finite state machines that makes it possible to incorporate this important additional dimension of uncertainty. For this we adopt a formalism for reasoning about uncertainty called Subjective Logic. We present an algorithm to create these enhanced state machines automatically from a conventional state machine and a set of observed sequences. We show how this approach can be used for reverse-engineering predictive state machines from traces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modelling the behaviour of state-based systems can be challenging, especially when the modeller is not entirely certain about its intended interactions with the user or the environment. Currently, it is possible to associate a stated level of uncertainty with a given event by attaching probabilities to transitions (producing ‘Probabilistic State Machines’). This captures the ‘First-order uncertainty’ - the (un-)certainty that a given event will occur. However, this does not permit the modeller to capture their own uncertainty (or lack thereof) about that stated probability - also known as ‘Second-order uncertainty’. In this paper we introduce a generalisation of probabilistic finite state machines that makes it possible to incorporate this important additional dimension of uncertainty. For this we adopt a formalism for reasoning about uncertainty called Subjective Logic. We present an algorithm to create these enhanced state machines automatically from a conventional state machine and a set of observed sequences. We show how this approach can be used for reverse-engineering predictive state machines from traces.",
"title": "Modelling Second-Order Uncertainty in State Machines",
"normalizedTitle": "Modelling Second-Order Uncertainty in State Machines",
"fno": "10057480",
"hasPdf": true,
"idPrefix": "ts",
"keywords": [
"Uncertainty",
"Probabilistic Logic",
"Testing",
"Protocols",
"Predictive Models",
"IEEE 1394 Standard",
"Finite Element Analysis",
"State Machine",
"Second Order Uncertainty",
"Subjective Logic",
"Inference",
"Test Prioritization"
],
"authors": [
{
"givenName": "Neil",
"surname": "Walkinshaw",
"fullName": "Neil Walkinshaw",
"affiliation": "Department of Computer Science, The University of Sheffield, Sheffield, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert M.",
"surname": "Hierons",
"fullName": "Robert M. Hierons",
"affiliation": "Department of Computer Science, The University of Sheffield, Sheffield, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-03-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "0098-5589",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/async/1997/7922/0/79220228",
"title": "Improved State Assignment for Burst Mode Finite State Machines",
"doi": null,
"abstractUrl": "/proceedings-article/async/1997/79220228/12OmNAGepUU",
"parentPublication": {
"id": "proceedings/async/1997/7922/0",
"title": "Proceedings Third International Symposium on Advanced Research in Asynchronous Circuits and Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euromicro/1996/7487/0/74870202",
"title": "Efficient Simulation of Multiprocessors through Finite State Machines",
"doi": null,
"abstractUrl": "/proceedings-article/euromicro/1996/74870202/12OmNAle6PC",
"parentPublication": {
"id": "proceedings/euromicro/1996/7487/0",
"title": "EUROMICRO Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2009/2353/0/04960465",
"title": "Joint uncertainty decoding with the second order approximation for noise robust speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2009/04960465/12OmNvEyR76",
"parentPublication": {
"id": "proceedings/icassp/2009/2353/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1999/5041/4/00758392",
"title": "Control-oriented identification and uncertainty estimation for paper machines",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1999/00758392/12OmNwDSdvb",
"parentPublication": {
"id": "proceedings/icassp/1999/5041/4",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cca/2000/6562/0/00897429",
"title": "Robust, near time-optimal control of nonlinear second order systems with model uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/cca/2000/00897429/12OmNwcCIXf",
"parentPublication": {
"id": "proceedings/cca/2000/6562/0",
"title": "Proceedings of the 2000 IEEE International Conference on Control Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2016/2312/0/2312a584",
"title": "Robust Topology Optimization under Loading Uncertainty with Proportional Topology Optimization Method",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2016/2312a584/12OmNx3ZjoO",
"parentPublication": {
"id": "proceedings/icmtma/2016/2312/0",
"title": "2016 Eighth International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcecs/2008/3555/0/3555a123",
"title": "Simulation Workflow for the Overall Design of Squirrel Cage Induction Machines",
"doi": null,
"abstractUrl": "/proceedings-article/wcecs/2008/3555a123/12OmNxj238O",
"parentPublication": {
"id": "proceedings/wcecs/2008/3555/0",
"title": "World Congress on Engineering and Computer Science, Advances in Electrical and Electronics Engineering - IAENG Special Edition of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1970/03/01671498",
"title": "On Finite-Memory Sequential Machines",
"doi": null,
"abstractUrl": "/journal/tc/1970/03/01671498/13rRUwcS1Bw",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/04/09826787",
"title": "Dimensional Affect Uncertainty Modelling for Apparent Personality Recognition",
"doi": null,
"abstractUrl": "/journal/ta/2022/04/09826787/1EWSpbXdtqo",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2011",
"title": "Uncertainty-Aware CNNs for Depth Completion: Uncertainty from Beginning to End",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2011/1m3nqBO2klG",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10057998",
"articleId": "1LbFrIwvwfm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10058578",
"articleId": "1Ldkkv2Zt4Y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyq0zFA",
"title": "Sept.",
"year": "2015",
"issueNum": "09",
"idPrefix": "tk",
"pubType": "journal",
"volume": "27",
"label": "Sept.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuGK",
"doi": "10.1109/TKDE.2015.2411278",
"abstract": "We consider the problem of adaptively routing a fleet of cooperative vehicles within a road network in the presence of uncertain and dynamic congestion conditions. To tackle this problem, we first propose a Gaussian process dynamic congestion model that can effectively characterize both the dynamics and the uncertainty of congestion conditions. Our model is efficient and thus facilitates real-time adaptive routing in the face of uncertainty. Using this congestion model, we develop efficient algorithms for non-myopic adaptive routing to minimize the collective travel time of all vehicles in the system. A key property of our approach is the ability to efficiently reason about the long-term value of exploration, which enables collectively balancing the exploration/exploitation trade-off for entire fleets of vehicles. Our approach is validated by traffic data from two large Asian cities. Our congestion model is shown to be effective in modeling dynamic congestion conditions. Our routing algorithms also generate significantly faster routes compared to standard baselines, and achieve near-optimal performance compared to an omniscient routing algorithm. We also present the results from a preliminary field study, which showcases the efficacy of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We consider the problem of adaptively routing a fleet of cooperative vehicles within a road network in the presence of uncertain and dynamic congestion conditions. To tackle this problem, we first propose a Gaussian process dynamic congestion model that can effectively characterize both the dynamics and the uncertainty of congestion conditions. Our model is efficient and thus facilitates real-time adaptive routing in the face of uncertainty. Using this congestion model, we develop efficient algorithms for non-myopic adaptive routing to minimize the collective travel time of all vehicles in the system. A key property of our approach is the ability to efficiently reason about the long-term value of exploration, which enables collectively balancing the exploration/exploitation trade-off for entire fleets of vehicles. Our approach is validated by traffic data from two large Asian cities. Our congestion model is shown to be effective in modeling dynamic congestion conditions. Our routing algorithms also generate significantly faster routes compared to standard baselines, and achieve near-optimal performance compared to an omniscient routing algorithm. We also present the results from a preliminary field study, which showcases the efficacy of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We consider the problem of adaptively routing a fleet of cooperative vehicles within a road network in the presence of uncertain and dynamic congestion conditions. To tackle this problem, we first propose a Gaussian process dynamic congestion model that can effectively characterize both the dynamics and the uncertainty of congestion conditions. Our model is efficient and thus facilitates real-time adaptive routing in the face of uncertainty. Using this congestion model, we develop efficient algorithms for non-myopic adaptive routing to minimize the collective travel time of all vehicles in the system. A key property of our approach is the ability to efficiently reason about the long-term value of exploration, which enables collectively balancing the exploration/exploitation trade-off for entire fleets of vehicles. Our approach is validated by traffic data from two large Asian cities. Our congestion model is shown to be effective in modeling dynamic congestion conditions. Our routing algorithms also generate significantly faster routes compared to standard baselines, and achieve near-optimal performance compared to an omniscient routing algorithm. We also present the results from a preliminary field study, which showcases the efficacy of our approach.",
"title": "Non-Myopic Adaptive Route Planning in Uncertain Congestion Environments",
"normalizedTitle": "Non-Myopic Adaptive Route Planning in Uncertain Congestion Environments",
"fno": "07056447",
"hasPdf": true,
"idPrefix": "tk",
"keywords": [
"Mobile Robots",
"Path Planning",
"Road Traffic Control",
"Nonmyopic Adaptive Route Planning",
"Uncertain Congestion Environments",
"Cooperative Vehicle Adaptive Routing",
"Dynamic Congestion Conditions",
"Gaussian Process Dynamic Congestion Model",
"Real Time Adaptive Routing",
"Traffic Data",
"Asian Cities",
"Vehicles",
"Routing",
"Roads",
"Gaussian Processes",
"Uncertainty",
"Context",
"Planning",
"Gaussian Process Dynamics",
"Adaptive Routing",
"Planning Under Uncertainty",
"Gaussian Process Dynamics",
"Adaptive Routing",
"Planning Under Uncertainty"
],
"authors": [
{
"givenName": "Siyuan",
"surname": "Liu",
"fullName": "Siyuan Liu",
"affiliation": "Heinz College, Carnegie Mellon University, Pittsburgh, PA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yisong",
"surname": "Yue",
"fullName": "Yisong Yue",
"affiliation": "California Institute of Technology, Pasadena, CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ramayya",
"surname": "Krishnan",
"fullName": "Ramayya Krishnan",
"affiliation": "Heinz College, Carnegie Mellon University, Pittsburgh, PA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "09",
"pubDate": "2015-09-01 00:00:00",
"pubType": "trans",
"pages": "2438-2451",
"year": "2015",
"issn": "1041-4347",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2013/5004/0/5004b855",
"title": "Study on Reasons and Countermeasures of Traffic Congestion of Urban Road Based on the Queuing Models",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004b855/12OmNANBZsK",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0/07363112",
"title": "Scorpion: A Solution Using Cooperative Rerouting to Prevent Congestion and Improve Traffic Condition",
"doi": null,
"abstractUrl": "/proceedings-article/cit-iucc-dasc-picom/2015/07363112/12OmNBK5m8c",
"parentPublication": {
"id": "proceedings/cit-iucc-dasc-picom/2015/0154/0",
"title": "2015 IEEE International Conference on Computer and Information Technology; Ubiquitous Computing and Communications; Dependable, Autonomic and Secure Computing; Pervasive Intelligence and Computing (CIT/IUCC/DASC/PICOM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2017/1792/0/1792a383",
"title": "SunChase: Energy-Efficient Route Planning for Solar-Powered EVs",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2017/1792a383/12OmNqHItw3",
"parentPublication": {
"id": "proceedings/icdcs/2017/1792/0",
"title": "2017 IEEE 37th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/soca/2013/2702/0/2701a351",
"title": "Evaluation of Automated Negotiation System for Changing Route Assignment to Acquire Efficient Traffic Flow",
"doi": null,
"abstractUrl": "/proceedings-article/soca/2013/2701a351/12OmNyvY9uy",
"parentPublication": {
"id": "proceedings/soca/2013/2702/0",
"title": "2013 IEEE 6th International Conference on Service-Oriented Computing and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nca/2014/5393/0/5393a197",
"title": "Media Streams Planning with Uncertain Link Capacities",
"doi": null,
"abstractUrl": "/proceedings-article/nca/2014/5393a197/12OmNz5JCgQ",
"parentPublication": {
"id": "proceedings/nca/2014/5393/0",
"title": "2014 IEEE 13th International Symposium on Network Computing and Applications (NCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2017/01/07425257",
"title": "DIVERT: A Distributed Vehicular Traffic Re-Routing System for Congestion Avoidance",
"doi": null,
"abstractUrl": "/journal/tm/2017/01/07425257/13rRUyv53G7",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cnsm/2018/7614/0/08584989",
"title": "Congestion-Constrained Virtual Link Embedding with Uncertain Demands",
"doi": null,
"abstractUrl": "/proceedings-article/cnsm/2018/08584989/17D45VTRovE",
"parentPublication": {
"id": "proceedings/cnsm/2018/7614/0",
"title": "2018 14th International Conference on Network and Service Management (CNSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671357",
"title": "City-Scale System-Optimal Route Planning with Route Replanning",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671357/1A8hutjd57i",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispcem/2022/9271/0/927100a043",
"title": "Connected Vehicle Path Planning and Resource Allocation in Edge Computing Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ispcem/2022/927100a043/1LHd1q0Z79u",
"parentPublication": {
"id": "proceedings/ispcem/2022/9271/0",
"title": "2022 2nd International Signal Processing, Communications and Engineering Management Conference (ISPCEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2020/9866/0/986600a220",
"title": "Distributed and Privacy Preserving Routing of Connected Vehicles to Minimize Congestion",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2020/986600a220/1rsiZgAyo92",
"parentPublication": {
"id": "proceedings/mass/2020/9866/0",
"title": "2020 IEEE 17th International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07052326",
"articleId": "13rRUILc8fy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07058431",
"articleId": "13rRUIIVlkR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNALlciO",
"title": "January/February",
"year": "2010",
"issueNum": "01",
"idPrefix": "sp",
"pubType": "magazine",
"volume": "8",
"label": "January/February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNEqU3",
"doi": "10.1109/MSP.2010.51",
"abstract": "We outline a model for security investment that reflects dynamic interaction between a defender, who faces uncertainty, and an attacker, who repeatedly targets the weakest link. Using the model, we derive and compare optimal security investment over multiple periods, exploring the delicate balance between proactive and reactive security investment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We outline a model for security investment that reflects dynamic interaction between a defender, who faces uncertainty, and an attacker, who repeatedly targets the weakest link. Using the model, we derive and compare optimal security investment over multiple periods, exploring the delicate balance between proactive and reactive security investment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We outline a model for security investment that reflects dynamic interaction between a defender, who faces uncertainty, and an attacker, who repeatedly targets the weakest link. Using the model, we derive and compare optimal security investment over multiple periods, exploring the delicate balance between proactive and reactive security investment.",
"title": "The Iterated Weakest Link",
"normalizedTitle": "The Iterated Weakest Link",
"fno": "msp2010010053",
"hasPdf": true,
"idPrefix": "sp",
"keywords": [
"Economics",
"Security",
"Optimal Security Investment Under Uncertainty",
"ROSI"
],
"authors": [
{
"givenName": "Rainer",
"surname": "Böhme",
"fullName": "Rainer Böhme",
"affiliation": "International Computer Science Institute",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tyler",
"surname": "Moore",
"fullName": "Tyler Moore",
"affiliation": "Harvard University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2010-01-01 00:00:00",
"pubType": "mags",
"pages": "53-55",
"year": "2010",
"issn": "1540-7993",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/secdev/2016/5589/0/07839789",
"title": "Software Security Investment: The Right Amount of a Good Thing",
"doi": null,
"abstractUrl": "/proceedings-article/secdev/2016/07839789/12OmNB7LvEW",
"parentPublication": {
"id": "proceedings/secdev/2016/5589/0",
"title": "2016 IEEE Cybersecurity Development (SecDev)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloudcom/2013/5095/2/5095b132",
"title": "Return on Security Investment for Cloud Platforms",
"doi": null,
"abstractUrl": "/proceedings-article/cloudcom/2013/5095b132/12OmNrFkeV9",
"parentPublication": {
"id": "proceedings/cloudcom/2013/5095/2",
"title": "2013 IEEE 5th International Conference on Cloud Computing Technology and Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995450",
"title": "Finding the weakest link in person detectors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995450/12OmNrkBwje",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icds/2008/3087/0/3087a007",
"title": "Standard Approach for Quantification of the ICT Security Investment for Cybercrime Prevention",
"doi": null,
"abstractUrl": "/proceedings-article/icds/2008/3087a007/12OmNvlPkzl",
"parentPublication": {
"id": "proceedings/icds/2008/3087/0",
"title": "International Conference on the Digital Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cloudcom/2013/5095/2/5095b039",
"title": "Defining \"The Weakest Link\" Comparative Security in Complex Systems of Systems",
"doi": null,
"abstractUrl": "/proceedings-article/cloudcom/2013/5095b039/12OmNyv7m3Z",
"parentPublication": {
"id": "proceedings/cloudcom/2013/5095/2",
"title": "2013 IEEE 5th International Conference on Cloud Computing Technology and Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bife/2012/4750/0/4750a274",
"title": "A jump-diffusion approach to modelling software security investment",
"doi": null,
"abstractUrl": "/proceedings-article/bife/2012/4750a274/12OmNz2TCDN",
"parentPublication": {
"id": "proceedings/bife/2012/4750/0",
"title": "2012 Fifth International Conference on Business Intelligence and Financial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ems/2010/4308/0/4308a200",
"title": "Firm's Strategic Decision Process Modeling in Security Investment",
"doi": null,
"abstractUrl": "/proceedings-article/ems/2010/4308a200/12OmNzYeAYZ",
"parentPublication": {
"id": "proceedings/ems/2010/4308/0",
"title": "Computer Modeling and Simulation, UKSIM European Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/sp/2003/02/j2072",
"title": "The Weakest Link Revisited",
"doi": null,
"abstractUrl": "/magazine/sp/2003/02/j2072/13rRUB6SpYH",
"parentPublication": {
"id": "mags/sp",
"title": "IEEE Security & Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2016/04/mso2016040018",
"title": "The Weakest Link",
"doi": null,
"abstractUrl": "/magazine/so/2016/04/mso2016040018/13rRUNvya7y",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2008/05/mit2008050046",
"title": "Choosing a Security Option: The InfoSecure Methodology",
"doi": null,
"abstractUrl": "/magazine/it/2008/05/mit2008050046/13rRUyXKxNx",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "msp2010010050",
"articleId": "13rRUxbTMx3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "msp2010010056",
"articleId": "13rRUIJcWv7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xic4zmtlra",
"doi": "10.1109/TVCG.2021.3114842",
"abstract": "In many real-world strategic settings, people use information displays to make decisions. In these settings, an information provider chooses which information to provide to strategic agents and how to present it, and agents formulate a best response based on the information and their anticipation of how others will behave. We contribute the results of a controlled online experiment to examine how the provision and presentation of information impacts people's decisions in a congestion game. Our experiment compares how different visualization approaches for displaying this information, including bar charts and hypothetical outcome plots, and different information conditions, including where the visualized information is private versus public (i.e., available to all agents), affect decision making and welfare. We characterize the effects of visualization anticipation, referring to changes to behavior when an agent goes from alone having access to a visualization to knowing that others also have access to the visualization to guide their decisions. We also empirically identify the visualization equilibrium, i.e., the visualization for which the visualized outcome of agents' decisions matches the realized decisions of the agents who view it. We reflect on the implications of visualization equilibria and visualization anticipation for designing information displays for real-world strategic settings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In many real-world strategic settings, people use information displays to make decisions. In these settings, an information provider chooses which information to provide to strategic agents and how to present it, and agents formulate a best response based on the information and their anticipation of how others will behave. We contribute the results of a controlled online experiment to examine how the provision and presentation of information impacts people's decisions in a congestion game. Our experiment compares how different visualization approaches for displaying this information, including bar charts and hypothetical outcome plots, and different information conditions, including where the visualized information is private versus public (i.e., available to all agents), affect decision making and welfare. We characterize the effects of visualization anticipation, referring to changes to behavior when an agent goes from alone having access to a visualization to knowing that others also have access to the visualization to guide their decisions. We also empirically identify the visualization equilibrium, i.e., the visualization for which the visualized outcome of agents' decisions matches the realized decisions of the agents who view it. We reflect on the implications of visualization equilibria and visualization anticipation for designing information displays for real-world strategic settings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In many real-world strategic settings, people use information displays to make decisions. In these settings, an information provider chooses which information to provide to strategic agents and how to present it, and agents formulate a best response based on the information and their anticipation of how others will behave. We contribute the results of a controlled online experiment to examine how the provision and presentation of information impacts people's decisions in a congestion game. Our experiment compares how different visualization approaches for displaying this information, including bar charts and hypothetical outcome plots, and different information conditions, including where the visualized information is private versus public (i.e., available to all agents), affect decision making and welfare. We characterize the effects of visualization anticipation, referring to changes to behavior when an agent goes from alone having access to a visualization to knowing that others also have access to the visualization to guide their decisions. We also empirically identify the visualization equilibrium, i.e., the visualization for which the visualized outcome of agents' decisions matches the realized decisions of the agents who view it. We reflect on the implications of visualization equilibria and visualization anticipation for designing information displays for real-world strategic settings.",
"title": "Visualization Equilibrium",
"normalizedTitle": "Visualization Equilibrium",
"fno": "09552205",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Decision Making",
"Game Theory",
"Internet",
"Different Visualization Approaches",
"Hypothetical Outcome Plots",
"Different Information Conditions",
"Visualized Information",
"Decision Making",
"Welfare",
"Visualization Anticipation",
"Visualization Equilibrium",
"Visualized Outcome",
"Realized Decisions",
"Visualization Equilibria",
"Information Displays",
"Real World Strategic Settings",
"Information Provider",
"Strategic Agents",
"Controlled Online Experiment",
"Data Visualization",
"Uncertainty",
"Games",
"Visualization",
"Nash Equilibrium",
"Bars",
"Economics",
"Visualization Equilibrium",
"Uncertainty Visualization",
"Strategic Communication",
"Nash Equilibrium"
],
"authors": [
{
"givenName": "Paula",
"surname": "Kayongo",
"fullName": "Paula Kayongo",
"affiliation": "Northwestern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Glenn",
"surname": "Sun",
"fullName": "Glenn Sun",
"affiliation": "University of California, Los Angeles, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jason",
"surname": "Hartline",
"fullName": "Jason Hartline",
"affiliation": "Northwestern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jessica",
"surname": "Hullman",
"fullName": "Jessica Hullman",
"affiliation": "Northwestern University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "465-474",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/apscc/2008/3473/0/3473a795",
"title": "Nash Equilibrium: Better Strategy for Agents Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/apscc/2008/3473a795/12OmNCdTeON",
"parentPublication": {
"id": "proceedings/apscc/2008/3473/0",
"title": "2008 IEEE Asia-Pacific Services Computing Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2009/3639/0/3639a001",
"title": "Fuzzy Communication Leading to Equilibrium through Possibility Messages",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2009/3639a001/12OmNvIfDR8",
"parentPublication": {
"id": "proceedings/waina/2009/3639/0",
"title": "2009 International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/3/3682c598",
"title": "An Analysis of Manufacturers' R&D Decision-Making in Defense Procurement with Incomplete Information",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682c598/12OmNx5piZc",
"parentPublication": {
"id": "proceedings/esiat/2009/3682/3",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2012/0227/1/06495062",
"title": "Incorporating Fairness into Infinitely Repeated Games with Conflicting Interests for Conflicts Elimination",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2012/06495062/12OmNx5pj2V",
"parentPublication": {
"id": "proceedings/ictai/2012/0227/1",
"title": "2012 IEEE 24th International Conference on Tools with Artificial Intelligence (ICTAI 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2017/3876/0/387601a404",
"title": "Midgame Solving: A New Weapon for Efficient Large-Scale Equilibrium Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2017/387601a404/12OmNxvO05p",
"parentPublication": {
"id": "proceedings/ictai/2017/3876/0",
"title": "2017 IEEE 29th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2011/279/0/05749489",
"title": "Conflict in Distributed Hypothesis Testing with Quantized Prior Probabilities",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2011/05749489/12OmNzwpU6m",
"parentPublication": {
"id": "proceedings/dcc/2011/279/0",
"title": "2011 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2017/01/mex2017010017",
"title": "Cooperation and Competition When Bidding for Complex Projects: Centralized and Decentralized Perspectives",
"doi": null,
"abstractUrl": "/magazine/ex/2017/01/mex2017010017/13rRUwh80yN",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539634",
"title": "VLAT: Development of a Visualization Literacy Assessment Test",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539634/13rRUxASuhE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440854",
"title": "Graphicle: Exploring Units, Networks, and Context in a Blended Visualization Approach",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440854/17D45WHONqi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904456",
"title": "Measuring Effects of Spatial Visualization and Domain on Visualization Task Performance: A Comparative Study",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904456/1H1gmktPnLa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09548797",
"articleId": "1xeSlZqOf8A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552870",
"articleId": "1xic90zZWDu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBaOKelAuQ",
"name": "ttg202201-09552205s1-tvcg-3114842-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552205s1-tvcg-3114842-mm.zip",
"extension": "zip",
"size": "5.81 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNqESuiq",
"title": "Jan.-Feb.",
"year": "2017",
"issueNum": "01",
"idPrefix": "ex",
"pubType": "magazine",
"volume": "32",
"label": "Jan.-Feb.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwh80yN",
"doi": "10.1109/MIS.2017.4",
"abstract": "To successfully complete a complex project, agents (companies or individuals) must form a team with the required competencies and resources. A team can be formed either by the project issuer based on individual agents' offers (centralized formation) or by the agents themselves (decentralized formation) bidding for a project as a consortium. The authors investigate rational strategies for agents, propose concepts to characterize the stability of winning teams and study computational complexity of finding these concepts of stability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To successfully complete a complex project, agents (companies or individuals) must form a team with the required competencies and resources. A team can be formed either by the project issuer based on individual agents' offers (centralized formation) or by the agents themselves (decentralized formation) bidding for a project as a consortium. The authors investigate rational strategies for agents, propose concepts to characterize the stability of winning teams and study computational complexity of finding these concepts of stability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To successfully complete a complex project, agents (companies or individuals) must form a team with the required competencies and resources. A team can be formed either by the project issuer based on individual agents' offers (centralized formation) or by the agents themselves (decentralized formation) bidding for a project as a consortium. The authors investigate rational strategies for agents, propose concepts to characterize the stability of winning teams and study computational complexity of finding these concepts of stability.",
"title": "Cooperation and Competition When Bidding for Complex Projects: Centralized and Decentralized Perspectives",
"normalizedTitle": "Cooperation and Competition When Bidding for Complex Projects: Centralized and Decentralized Perspectives",
"fno": "mex2017010017",
"hasPdf": true,
"idPrefix": "ex",
"keywords": [
"Remuneration",
"Games",
"Schedules",
"Complexity Theory",
"Agricultural Machinery",
"Economics",
"Nash Equilibrium",
"Intelligent Systems",
"Game Theory",
"Cooperative Game Theory",
"Coalition Formation",
"Equilibria",
"Skill Games",
"Scheduling",
"Coopetition"
],
"authors": [
{
"givenName": "Piotr",
"surname": "Skowron",
"fullName": "Piotr Skowron",
"affiliation": "University of Oxford",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Krzysztof",
"surname": "Rzadca",
"fullName": "Krzysztof Rzadca",
"affiliation": "University of Warsaw",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anwitaman",
"surname": "Datta",
"fullName": "Anwitaman Datta",
"affiliation": "Nanyang Technological University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "mags",
"pages": "17-23",
"year": "2017",
"issn": "1541-1672",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wi-iat/2014/4143/3/4143c143",
"title": "Finding Better Teammates in a Semi-cooperative Multi-agent System",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2014/4143c143/12OmNqJHFLi",
"parentPublication": {
"id": "proceedings/wi-iat/2014/4143/3",
"title": "2014 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent Agent Technologies (IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciii/2010/4279/4/4279d003",
"title": "Transactive Memory Systems in Multi-teams Cooperation",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2010/4279d003/12OmNrJRP5K",
"parentPublication": {
"id": "proceedings/iciii/2010/4279/4",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mass/2008/2574/0/04660013",
"title": "Enforcing cooperation in ad hoc networks with unreliable channel",
"doi": null,
"abstractUrl": "/proceedings-article/mass/2008/04660013/12OmNvHY2IC",
"parentPublication": {
"id": "proceedings/mass/2008/2574/0",
"title": "2008 5th IEEE International Conference on Mobile Ad Hoc and Sensor Systems (MASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iacsit-sc/2009/3653/0/3653a428",
"title": "Price-Service Competition and Cooperation in a Supply Chain Distribution System",
"doi": null,
"abstractUrl": "/proceedings-article/iacsit-sc/2009/3653a428/12OmNyTfg5m",
"parentPublication": {
"id": "proceedings/iacsit-sc/2009/3653/0",
"title": "Computer Science and Information Technology, International Association of",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bcgin/2012/4854/0/4854a871",
"title": "Network Game and Network Formation with Synergy Effect",
"doi": null,
"abstractUrl": "/proceedings-article/bcgin/2012/4854a871/12OmNyrIaJn",
"parentPublication": {
"id": "proceedings/bcgin/2012/4854/0",
"title": "2012 Second International Conference on Business Computing and Global Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2022/8879/0/887900a113",
"title": "On Learning Stable Cooperation in the Iterated Prisoner's Dilemma with Paid Incentives",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2022/887900a113/1IFJEmBsWVq",
"parentPublication": {
"id": "proceedings/icdcsw/2022/8879/0",
"title": "2022 IEEE 42nd International Conference on Distributed Computing Systems Workshops (ICDCSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2021/04/09069271",
"title": "Enhancing the Quality in Crowdsourcing E-Markets Through Team Formation Games",
"doi": null,
"abstractUrl": "/magazine/ex/2021/04/09069271/1j4Gv7YKBy0",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2019/6868/0/09073227",
"title": "Optimal Influence Strategies in an Oligopolistic Competition Network Environment",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2019/09073227/1jjAaqi9aw0",
"parentPublication": {
"id": "proceedings/asonam/2019/6868/0",
"title": "2019 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euros&p/2020/5087/0/508700a256",
"title": "Reward Sharing Schemes for Stake Pools",
"doi": null,
"abstractUrl": "/proceedings-article/euros&p/2020/508700a256/1oqKy4tCI6Y",
"parentPublication": {
"id": "proceedings/euros&p/2020/5087/0",
"title": "2020 IEEE European Symposium on Security and Privacy (EuroS&P)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mex2017010008",
"articleId": "13rRUxlgxVr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mex2017010024",
"articleId": "13rRUx0xPxj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxvO04Q",
"title": "Jan.",
"year": "2017",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuhE",
"doi": "10.1109/TVCG.2016.2598920",
"abstract": "The Information Visualization community has begun to pay attention to visualization literacy; however, researchers still lack instruments for measuring the visualization literacy of users. In order to address this gap, we systematically developed a visualization literacy assessment test (VLAT), especially for non-expert users in data visualization, by following the established procedure of test development in Psychological and Educational Measurement: (1) Test Blueprint Construction, (2) Test Item Generation, (3) Content Validity Evaluation, (4) Test Tryout and Item Analysis, (5) Test Item Selection, and (6) Reliability Evaluation. The VLAT consists of 12 data visualizations and 53 multiple-choice test items that cover eight data visualization tasks. The test items in the VLAT were evaluated with respect to their essentialness by five domain experts in Information Visualization and Visual Analytics (average content validity ratio = 0.66). The VLAT was also tried out on a sample of 191 test takers and showed high reliability (reliability coefficient omega = 0.76). In addition, we demonstrated the relationship between users' visualization literacy and aptitude for learning an unfamiliar visualization and showed that they had a fairly high positive relationship (correlation coefficient = 0.64). Finally, we discuss evidence for the validity of the VLAT and potential research areas that are related to the instrument.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The Information Visualization community has begun to pay attention to visualization literacy; however, researchers still lack instruments for measuring the visualization literacy of users. In order to address this gap, we systematically developed a visualization literacy assessment test (VLAT), especially for non-expert users in data visualization, by following the established procedure of test development in Psychological and Educational Measurement: (1) Test Blueprint Construction, (2) Test Item Generation, (3) Content Validity Evaluation, (4) Test Tryout and Item Analysis, (5) Test Item Selection, and (6) Reliability Evaluation. The VLAT consists of 12 data visualizations and 53 multiple-choice test items that cover eight data visualization tasks. The test items in the VLAT were evaluated with respect to their essentialness by five domain experts in Information Visualization and Visual Analytics (average content validity ratio = 0.66). The VLAT was also tried out on a sample of 191 test takers and showed high reliability (reliability coefficient omega = 0.76). In addition, we demonstrated the relationship between users' visualization literacy and aptitude for learning an unfamiliar visualization and showed that they had a fairly high positive relationship (correlation coefficient = 0.64). Finally, we discuss evidence for the validity of the VLAT and potential research areas that are related to the instrument.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The Information Visualization community has begun to pay attention to visualization literacy; however, researchers still lack instruments for measuring the visualization literacy of users. In order to address this gap, we systematically developed a visualization literacy assessment test (VLAT), especially for non-expert users in data visualization, by following the established procedure of test development in Psychological and Educational Measurement: (1) Test Blueprint Construction, (2) Test Item Generation, (3) Content Validity Evaluation, (4) Test Tryout and Item Analysis, (5) Test Item Selection, and (6) Reliability Evaluation. The VLAT consists of 12 data visualizations and 53 multiple-choice test items that cover eight data visualization tasks. The test items in the VLAT were evaluated with respect to their essentialness by five domain experts in Information Visualization and Visual Analytics (average content validity ratio = 0.66). The VLAT was also tried out on a sample of 191 test takers and showed high reliability (reliability coefficient omega = 0.76). In addition, we demonstrated the relationship between users' visualization literacy and aptitude for learning an unfamiliar visualization and showed that they had a fairly high positive relationship (correlation coefficient = 0.64). Finally, we discuss evidence for the validity of the VLAT and potential research areas that are related to the instrument.",
"title": "VLAT: Development of a Visualization Literacy Assessment Test",
"normalizedTitle": "VLAT: Development of a Visualization Literacy Assessment Test",
"fno": "07539634",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Reliability",
"Instruments",
"Bars",
"Conferences",
"Market Research",
"Psychology",
"Education",
"Visualization Literacy",
"Assessment Test",
"Instrument",
"Measurement",
"Aptitude"
],
"authors": [
{
"givenName": "Sukwon",
"surname": "Lee",
"fullName": "Sukwon Lee",
"affiliation": "School of Industrial Engineering, Purdue University, West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sung-Hee",
"surname": "Kim",
"fullName": "Sung-Hee Kim",
"affiliation": "Samsung Electronics Co., Ltd., Seoul, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bum Chul",
"surname": "Kwon",
"fullName": "Bum Chul Kwon",
"affiliation": "IBM T.J. Watson Research Center, Yorktown Heights, NY, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "trans",
"pages": "551-560",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2015/8454/0/07344250",
"title": "A self-assessment instrument to assess engineering students' self-directedness in information literacy",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344250/12OmNqI04MW",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a711",
"title": "Research on Statistical Literacy Using Japanese Textbooks",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a711/12OmNzcxYWJ",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876023",
"title": "The Persuasive Power of Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876023/13rRUIIVlcL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2018/03/mcg2018030021",
"title": "Observations and Reflections on Visualization Literacy in Elementary School",
"doi": null,
"abstractUrl": "/magazine/cg/2018/03/mcg2018030021/13rRUwwJWBp",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875906",
"title": "A Principled Way of Assessing Visualization Literacy",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875906/13rRUyYjK5i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2018/5520/0/552000a101",
"title": "DeepEye: Towards Automatic Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2018/552000a101/14Fq0VI6tcV",
"parentPublication": {
"id": "proceedings/icde/2018/5520/0",
"title": "2018 IEEE 34th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/03/09790006",
"title": "VisLitE: Visualization Literacy and Evaluation",
"doi": null,
"abstractUrl": "/magazine/cg/2022/03/09790006/1E0NfTJ2oak",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903547",
"title": "Cultivating Visualization Literacy for Children Through Curiosity and Play",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903547/1GZookEFGzC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2020/9171/0/917100a075",
"title": "Development and Application of Information Literacy Assessment Tool for Primary and Secondary School Teachers",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2020/917100a075/1qyxpHyMUxy",
"parentPublication": {
"id": "proceedings/eitt/2020/9171/0",
"title": "2020 Ninth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiee/2020/5668/0/566800a182",
"title": "Research Hot Spots of Teachers’ Information Literacy and Visualization Analysis of Theme Evolution in China",
"doi": null,
"abstractUrl": "/proceedings-article/isaiee/2020/566800a182/1sQKlCg6NNe",
"parentPublication": {
"id": "proceedings/isaiee/2020/5668/0",
"title": "2020 International Symposium on Advances in Informatics, Electronics and Education (ISAIEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07539373",
"articleId": "13rRUwcS1CZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07539629",
"articleId": "13rRUxOdD8l",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzFdtce",
"title": "Mar.-Apr.",
"year": "2016",
"issueNum": "02",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "18",
"label": "Mar.-Apr.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7e4",
"doi": "10.1109/MCSE.2016.38",
"abstract": "Engineering faculty members play a multifaceted role in the profession in that they help discover, promote, and disseminate advancements in technology, and they engage in capacity-building by training a future workforce of engineers. However, many potential faculty members are dissuaded from academia. A study of Black engineering PhD students and postdoctoral scholars investigates their career decision-making processes concerning the professoriate. The racial and gendered experiences of these students and scholars have impacted their desires and choices to pursue an academic career. Programmatic innovation is needed within graduate mentoring programs to address racial, gender, and other identity-based biases within engineering and academia, in addition to traditional content that focuses on presentation skills, networking, and other professional development areas. A new approach develops a mentoring curriculum that raises racial and gender consciousness by utilizing the expertise of scholars from various social science disciplines.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Engineering faculty members play a multifaceted role in the profession in that they help discover, promote, and disseminate advancements in technology, and they engage in capacity-building by training a future workforce of engineers. However, many potential faculty members are dissuaded from academia. A study of Black engineering PhD students and postdoctoral scholars investigates their career decision-making processes concerning the professoriate. The racial and gendered experiences of these students and scholars have impacted their desires and choices to pursue an academic career. Programmatic innovation is needed within graduate mentoring programs to address racial, gender, and other identity-based biases within engineering and academia, in addition to traditional content that focuses on presentation skills, networking, and other professional development areas. A new approach develops a mentoring curriculum that raises racial and gender consciousness by utilizing the expertise of scholars from various social science disciplines.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Engineering faculty members play a multifaceted role in the profession in that they help discover, promote, and disseminate advancements in technology, and they engage in capacity-building by training a future workforce of engineers. However, many potential faculty members are dissuaded from academia. A study of Black engineering PhD students and postdoctoral scholars investigates their career decision-making processes concerning the professoriate. The racial and gendered experiences of these students and scholars have impacted their desires and choices to pursue an academic career. Programmatic innovation is needed within graduate mentoring programs to address racial, gender, and other identity-based biases within engineering and academia, in addition to traditional content that focuses on presentation skills, networking, and other professional development areas. A new approach develops a mentoring curriculum that raises racial and gender consciousness by utilizing the expertise of scholars from various social science disciplines.",
"title": "Addressing Negative Racial and Gendered Experiences That Discourage Academic Careers in Engineering",
"normalizedTitle": "Addressing Negative Racial and Gendered Experiences That Discourage Academic Careers in Engineering",
"fno": "mcs2016020029",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [
"Education",
"Engineering Profession",
"Mentoring",
"Computer Science Education",
"Decision Making",
"Encoding",
"Scientific Computing",
"Impostor Phenomenon",
"Micro Aggressions",
"Faculty Diversity"
],
"authors": [
{
"givenName": "William H.",
"surname": "Robinson",
"fullName": "William H. Robinson",
"affiliation": "Vanderbilt University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ebony O.",
"surname": "McGee",
"fullName": "Ebony O. McGee",
"affiliation": "Vanderbilt University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lydia C.",
"surname": "Bentley",
"fullName": "Lydia C. Bentley",
"affiliation": "Vanderbilt University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stacey L.",
"surname": "Houston",
"fullName": "Stacey L. Houston",
"affiliation": "Vanderbilt University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Portia K.",
"surname": "Botchway",
"fullName": "Portia K. Botchway",
"affiliation": "Vanderbilt University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2016-03-01 00:00:00",
"pubType": "mags",
"pages": "29-39",
"year": "2016",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/respect/2016/3419/0/07836172",
"title": "Lessons learned: Engaging students with disabilities on a national scale",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2016/07836172/12OmNC3FG4e",
"parentPublication": {
"id": "proceedings/respect/2016/3419/0",
"title": "2016 Research on Equity and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/respect/2015/0151/0/07296500",
"title": "Racial and gendered experiences that dissuade a career in the professoriate",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2015/07296500/12OmNzUPptE",
"parentPublication": {
"id": "proceedings/respect/2015/0151/0",
"title": "2015 Research in Equity and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659253",
"title": "Developing Postdoctoral Scholar and Graduate Student Mentorship Ability",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659253/18j9bWiSzJu",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658858",
"title": "Faculty Apprentice as a Mentorship Model for Engineering Graduate Students interested in Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658858/18j9eSWkMBa",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658860",
"title": "Examining the Value of Mentoring in Youth Engineering Programs: What Motivates a Mentor to Mentor?",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658860/18j9m6GzhsI",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658625",
"title": "Topical Concerns and Critical Questions Engineering Students Want/Need Answers To: Dependence on Key Groups",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658625/18j9uST0OHu",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2022/6244/0/09962687",
"title": "Gender differences in early careers of Finnish engineers",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2022/09962687/1IHnLpE7zcA",
"parentPublication": {
"id": "proceedings/fie/2022/6244/0",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2022/6244/0/09962473",
"title": "The Role of Community in Promoting Engineering Identity Formation in Historically Marginalized Communities",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2022/09962473/1IHnQw7a0mc",
"parentPublication": {
"id": "proceedings/fie/2022/6244/0",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/respect/2021/4905/0/09620627",
"title": "Collaborative Problem-Solving Workshops for Improving Confidence and Paving Pathways for Research Careers in Computer Science",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2021/09620627/1yXuLv1GNLW",
"parentPublication": {
"id": "proceedings/respect/2021/4905/0",
"title": "2021 Conference on Research in Equitable and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2021/3851/0/09637333",
"title": "Understanding Career Identity Development and Preparedness of Freshmen Students to Leverage Convergence in Engineering Education",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2021/09637333/1zuwchqpqRa",
"parentPublication": {
"id": "proceedings/fie/2021/3851/0",
"title": "2021 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcs2016020018",
"articleId": "13rRUytWFcT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcs2016020040",
"articleId": "13rRUx0xPAj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1GjwQktLcB2",
"title": "Oct.",
"year": "2022",
"issueNum": "10",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1saZr0JHX5C",
"doi": "10.1109/TVCG.2021.3067200",
"abstract": "The increased availability of quantitative historical datasets has provided new research opportunities for multiple disciplines in social science. In this article, we work closely with the constructors of a new dataset, CGED-Q (China Government Employee Database-Qing), that records the career trajectories of over 340,000 government officials in the Qing bureaucracy in China from 1760 to 1912. We use these data to study career mobility from a historical perspective and understand social mobility and inequality. However, existing statistical approaches are inadequate for analyzing career mobility in this historical dataset with its fine-grained attributes and long time span, since they are mostly hypothesis-driven and require substantial effort. We propose <italic>CareerLens</italic>, an interactive visual analytics system for assisting experts in exploring, understanding, and reasoning from historical career data. With <italic>CareerLens</italic>, experts examine mobility patterns in three levels-of-detail, namely, the macro-level providing a summary of overall mobility, the meso-level extracting latent group mobility patterns, and the micro-level revealing social relationships of individuals. We demonstrate the effectiveness and usability of <italic>CareerLens</italic> through two case studies and receive encouraging feedback from follow-up interviews with domain experts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The increased availability of quantitative historical datasets has provided new research opportunities for multiple disciplines in social science. In this article, we work closely with the constructors of a new dataset, CGED-Q (China Government Employee Database-Qing), that records the career trajectories of over 340,000 government officials in the Qing bureaucracy in China from 1760 to 1912. We use these data to study career mobility from a historical perspective and understand social mobility and inequality. However, existing statistical approaches are inadequate for analyzing career mobility in this historical dataset with its fine-grained attributes and long time span, since they are mostly hypothesis-driven and require substantial effort. We propose <italic>CareerLens</italic>, an interactive visual analytics system for assisting experts in exploring, understanding, and reasoning from historical career data. With <italic>CareerLens</italic>, experts examine mobility patterns in three levels-of-detail, namely, the macro-level providing a summary of overall mobility, the meso-level extracting latent group mobility patterns, and the micro-level revealing social relationships of individuals. We demonstrate the effectiveness and usability of <italic>CareerLens</italic> through two case studies and receive encouraging feedback from follow-up interviews with domain experts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The increased availability of quantitative historical datasets has provided new research opportunities for multiple disciplines in social science. In this article, we work closely with the constructors of a new dataset, CGED-Q (China Government Employee Database-Qing), that records the career trajectories of over 340,000 government officials in the Qing bureaucracy in China from 1760 to 1912. We use these data to study career mobility from a historical perspective and understand social mobility and inequality. However, existing statistical approaches are inadequate for analyzing career mobility in this historical dataset with its fine-grained attributes and long time span, since they are mostly hypothesis-driven and require substantial effort. We propose CareerLens, an interactive visual analytics system for assisting experts in exploring, understanding, and reasoning from historical career data. With CareerLens, experts examine mobility patterns in three levels-of-detail, namely, the macro-level providing a summary of overall mobility, the meso-level extracting latent group mobility patterns, and the micro-level revealing social relationships of individuals. We demonstrate the effectiveness and usability of CareerLens through two case studies and receive encouraging feedback from follow-up interviews with domain experts.",
"title": "Interactive Visual Exploration of Longitudinal Historical Career Mobility Data",
"normalizedTitle": "Interactive Visual Exploration of Longitudinal Historical Career Mobility Data",
"fno": "09382844",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Engineering Profession",
"Data Visualization",
"History",
"Visual Analytics",
"Social Groups",
"Trajectory",
"Government",
"Digital Humanities",
"Quantitative History",
"Career Mobility",
"Visual Analytics"
],
"authors": [
{
"givenName": "Yifang",
"surname": "Wang",
"fullName": "Yifang Wang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hongye",
"surname": "Liang",
"fullName": "Hongye Liang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xinhuan",
"surname": "Shu",
"fullName": "Xinhuan Shu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiachen",
"surname": "Wang",
"fullName": "Jiachen Wang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ke",
"surname": "Xu",
"fullName": "Ke Xu",
"affiliation": "New York University, New York, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zikun",
"surname": "Deng",
"fullName": "Zikun Deng",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cameron",
"surname": "Campbell",
"fullName": "Cameron Campbell",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bijia",
"surname": "Chen",
"fullName": "Bijia Chen",
"affiliation": "Renmin University of China, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "10",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "3441-3455",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2015/8454/0/07344402",
"title": "Professional engineering pathways study: A longitudinal study of early career preparedness and decision-making",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344402/12OmNAoUT1K",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2013/2240/0/06785791",
"title": "Visual exploration of academic career paths",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2013/06785791/12OmNAoUT2b",
"parentPublication": {
"id": "proceedings/asonam/2013/2240/0",
"title": "2013 International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2011/468/0/06142901",
"title": "Facing computer science misconceptions: An introductory course based on historical strands and career paths at a glance",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2011/06142901/12OmNvDI3WM",
"parentPublication": {
"id": "proceedings/fie/2011/468/0",
"title": "2011 Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2015/9504/0/9504b057",
"title": "Learning Career Mobility and Human Activity Patterns for Job Change Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2015/9504b057/12OmNx4Q6C3",
"parentPublication": {
"id": "proceedings/icdm/2015/9504/0",
"title": "2015 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2015/8454/0/07344258",
"title": "Campus to career, understanding how engineering student skill perceptions link to future career pathways",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344258/12OmNyz5JPj",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192730",
"title": "TelCoVis: Visual Exploration of Co-occurrence in Urban Human Mobility Based on Telco Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192730/13rRUxbTMyU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800b411",
"title": "JobSense: A Data-Driven Career Knowledge Exploration Framework and System",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800b411/18jXyjKB5zq",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2019/1746/0/09028505",
"title": "Who are EEC NSF CAREER awardees?: Educational Backgrounds, Institutional Affiliations, and Public Award Abstracts",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2019/09028505/1iffiYDk0nu",
"parentPublication": {
"id": "proceedings/fie/2019/1746/0",
"title": "2019 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274164",
"title": "Co-constructing Engineering Doctoral Identities Through Career Prospects",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274164/1phRPMHgH3G",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552870",
"title": "Seek for Success: A Visualization Approach for Understanding the Dynamics of Academic Careers",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552870/1xic90zZWDu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09829321",
"articleId": "1EYxoEPe9eU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09376675",
"articleId": "1rSN1VOCUHC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GjwTqAvtAc",
"name": "ttg202210-09382844s1-tvcg-3067200-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202210-09382844s1-tvcg-3067200-mm.zip",
"extension": "zip",
"size": "37.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzZEAys",
"title": "Sep./Oct.",
"year": "2018",
"issueNum": "05",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "38",
"label": "Sep./Oct.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13WBGNxhc5X",
"doi": "10.1109/MCG.2018.053491730",
"abstract": "Creating lively places with high urban vitality is an ultimate goal for urban planning and design. The VitalVizor visual analytics system employs well-established visualization and interaction techniques to facilitate user exploration of spatial physical entities and non-spatial urban design metrics when studying urban vitality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Creating lively places with high urban vitality is an ultimate goal for urban planning and design. The VitalVizor visual analytics system employs well-established visualization and interaction techniques to facilitate user exploration of spatial physical entities and non-spatial urban design metrics when studying urban vitality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Creating lively places with high urban vitality is an ultimate goal for urban planning and design. The VitalVizor visual analytics system employs well-established visualization and interaction techniques to facilitate user exploration of spatial physical entities and non-spatial urban design metrics when studying urban vitality.",
"title": "VitalVizor: A Visual Analytics System for Studying Urban Vitality",
"normalizedTitle": "VitalVizor: A Visual Analytics System for Studying Urban Vitality",
"fno": "mcg2018050038",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Town And Country Planning",
"Urban Planning",
"Vital Vizor Visual Analytics System",
"Visualization",
"Urban Vitality",
"Spatial Physical Entities",
"Non Spatial Urban Design Metrics",
"Smart Buildings",
"Smart Cities",
"Visual Analytics",
"Urban Areas",
"Data Visualization",
"Task Analysis",
"Visualization",
"Urban Vitality",
"Tree Diagram",
"Visual Analytics",
"Computer Graphics"
],
"authors": [
{
"givenName": "Wei",
"surname": "Zeng",
"fullName": "Wei Zeng",
"affiliation": "Shenzhen Institutes of Advanced Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yu",
"surname": "Ye",
"fullName": "Yu Ye",
"affiliation": "Tongji University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2018-09-01 00:00:00",
"pubType": "mags",
"pages": "38-53",
"year": "2018",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "mags/cg/2018/05/mcg2018050026",
"title": "Spatio-Temporal Urban Data Analysis: A Visual Analytics Perspective",
"doi": null,
"abstractUrl": "/magazine/cg/2018/05/mcg2018050026/13WBGTItFGV",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2016/03/07506246",
"title": "Visual Analytics in Urban Computing: An Overview",
"doi": null,
"abstractUrl": "/journal/bd/2016/03/07506246/13rRUB6SpUe",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/05/mcg2017050050",
"title": "Urban Space Explorer: A Visual Analytics System for Urban Planning",
"doi": null,
"abstractUrl": "/magazine/cg/2017/05/mcg2017050050/13rRUEgarq3",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192687",
"title": "TrajGraph: A Graph-Based Visual Analytics Approach to Studying Urban Network Centralities Using Taxi Trajectory Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192687/13rRUwInvBa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017655",
"title": "StreetVizor: Visual Exploration of Human-Scale Urban Forms Based on Street Views",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017655/13rRUwInvsW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08054703",
"title": "VAUD: A Visual Analysis Approach for Exploring Spatio-Temporal Urban Data",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08054703/13rRUxlgxOq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cibda/2020/9837/0/983700a348",
"title": "Monitoring the Level of Regional Economic Vitality: A Case Study of Guangdong",
"doi": null,
"abstractUrl": "/proceedings-article/cibda/2020/983700a348/1lO1KmZewVi",
"parentPublication": {
"id": "proceedings/cibda/2020/9837/0",
"title": "2020 International Conference on Computer Information and Big Data Applications (CIBDA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a095",
"title": "A Urban Economic Vitality Model Based on Nash Equilibrium to Analyze the Impact of Enterprises and Talents with Matlab",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a095/1tnY7ZMzvag",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2020/1969/0/196900a446",
"title": "Quantification of Urban Economic Vitality Based on Data Mining",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2020/196900a446/1wG662CvSak",
"parentPublication": {
"id": "proceedings/icris/2020/1969/0",
"title": "2020 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552447",
"title": "VITALITY: Promoting Serendipitous Discovery of Academic Literature with Transformers & Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552447/1xic0dHxM9a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2018050036",
"articleId": "13WBGTItFGW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2018050054",
"articleId": "13WBGQCAPLm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBhpS2A",
"title": "July",
"year": "2013",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOdD2D",
"doi": "10.1109/TVCG.2013.78",
"abstract": "Visual Analytics (VA) is an evolving field that, at its core, is directed to the science of analytical reasoning supported by highly interactive visual interfaces. The IEEE Conference on Visual Analytics Science and Technology (IEEE VAST), founded in 2006 as the IEEE Symposium on Visual Analytics Science and Technology, is the first international conference dedicated to advances in Visual Analytics Science and Technology. The IEEE Transactions on Visualization and Computer Graphics (TVCG) has recognized and honored the importance of Visual Analytics from the beginning, and invites the authors of the best conference papers to submit substantively extended versions of VAST papers to the journal. For these papers, TVCG applies the usual standard in asking for more than 30% new material and insights compared to the conference paper. This special section presents the extended versions of the best papers of IEEE VAST 2011, which took place in October 2011 in Providence, Rhode Island, USA. These papers were selected together with the best paper award selection committee, which was composed of three members who reviewed the top papers and their peer reviews. The three selected papers went through the regular and standard reviewing process of TVCG. The papers presented here reflect the diversity of the growing field of visual analytics. Collectively, the set of papers exemplify three components that are central to visual analytics as a field. the Guest Editors then provide an overview of the technical articles and features presented in this issue.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual Analytics (VA) is an evolving field that, at its core, is directed to the science of analytical reasoning supported by highly interactive visual interfaces. The IEEE Conference on Visual Analytics Science and Technology (IEEE VAST), founded in 2006 as the IEEE Symposium on Visual Analytics Science and Technology, is the first international conference dedicated to advances in Visual Analytics Science and Technology. The IEEE Transactions on Visualization and Computer Graphics (TVCG) has recognized and honored the importance of Visual Analytics from the beginning, and invites the authors of the best conference papers to submit substantively extended versions of VAST papers to the journal. For these papers, TVCG applies the usual standard in asking for more than 30% new material and insights compared to the conference paper. This special section presents the extended versions of the best papers of IEEE VAST 2011, which took place in October 2011 in Providence, Rhode Island, USA. These papers were selected together with the best paper award selection committee, which was composed of three members who reviewed the top papers and their peer reviews. The three selected papers went through the regular and standard reviewing process of TVCG. The papers presented here reflect the diversity of the growing field of visual analytics. Collectively, the set of papers exemplify three components that are central to visual analytics as a field. the Guest Editors then provide an overview of the technical articles and features presented in this issue.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual Analytics (VA) is an evolving field that, at its core, is directed to the science of analytical reasoning supported by highly interactive visual interfaces. The IEEE Conference on Visual Analytics Science and Technology (IEEE VAST), founded in 2006 as the IEEE Symposium on Visual Analytics Science and Technology, is the first international conference dedicated to advances in Visual Analytics Science and Technology. The IEEE Transactions on Visualization and Computer Graphics (TVCG) has recognized and honored the importance of Visual Analytics from the beginning, and invites the authors of the best conference papers to submit substantively extended versions of VAST papers to the journal. For these papers, TVCG applies the usual standard in asking for more than 30% new material and insights compared to the conference paper. This special section presents the extended versions of the best papers of IEEE VAST 2011, which took place in October 2011 in Providence, Rhode Island, USA. These papers were selected together with the best paper award selection committee, which was composed of three members who reviewed the top papers and their peer reviews. The three selected papers went through the regular and standard reviewing process of TVCG. The papers presented here reflect the diversity of the growing field of visual analytics. Collectively, the set of papers exemplify three components that are central to visual analytics as a field. the Guest Editors then provide an overview of the technical articles and features presented in this issue.",
"title": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)",
"normalizedTitle": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)",
"fno": "ttg2013071076",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Visual Analytics",
"Meetings"
],
"authors": [
{
"givenName": "Silvia",
"surname": "Miksch",
"fullName": "Silvia Miksch",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthew",
"surname": "Ward",
"fullName": "Matthew Ward",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "07",
"pubDate": "2013-07-01 00:00:00",
"pubType": "trans",
"pages": "1076-1077",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/08/07138667",
"title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014",
"doi": null,
"abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050660",
"title": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050660/13rRUxBa5bV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/06/08352605",
"title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2018",
"doi": null,
"abstractUrl": "/journal/tg/2018/06/08352605/13rRUxlgxOp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08703194",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2019",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08703194/19Er7j5Ad7a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09766260",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/09082802",
"title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/09082802/1jrTVLo1tpC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/07/09108341",
"title": "Guest Editors' Introduction to the Special Section on Computational Photography",
"doi": null,
"abstractUrl": "/journal/tp/2020/07/09108341/1koL3gQqTHa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09430173",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "ttg2013071078",
"articleId": "13rRUxjQyhr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1MQvcIkoAko",
"title": "June",
"year": "2023",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1LRbRtJhrG0",
"doi": "10.1109/TVCG.2023.3261935",
"abstract": "Vision transformer (ViT) expands the success of transformer models from sequential data to images. The model decomposes an image into many smaller patches and arranges them into a sequence. Multi-head self-attentions are then applied to the sequence to learn the attention between patches. Despite many successful interpretations of transformers on sequential data, little effort has been devoted to the interpretation of ViTs, and many questions remain unanswered. For example, among the numerous attention heads, which one is more important? How strong are individual patches attending to their spatial neighbors in different heads? What attention patterns have individual heads learned? In this work, we answer these questions through a visual analytics approach. Specifically, we first identify <italic>what</italic> heads are more important in ViTs by introducing multiple pruning-based metrics. Then, we profile the spatial distribution of attention strengths between patches inside individual heads, as well as the trend of attention strengths across attention layers. Third, using an autoencoder-based learning solution, we summarize all possible attention patterns that individual heads could learn. Examining the attention strengths and patterns of the important heads, we answer <italic>why</italic> they are important. Through concrete case studies with experienced deep learning experts on multiple ViTs, we validate the effectiveness of our solution that deepens the understanding of ViTs from <italic>head importance</italic>, <italic>head attention strength</italic>, and <italic>head attention pattern</italic>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Vision transformer (ViT) expands the success of transformer models from sequential data to images. The model decomposes an image into many smaller patches and arranges them into a sequence. Multi-head self-attentions are then applied to the sequence to learn the attention between patches. Despite many successful interpretations of transformers on sequential data, little effort has been devoted to the interpretation of ViTs, and many questions remain unanswered. For example, among the numerous attention heads, which one is more important? How strong are individual patches attending to their spatial neighbors in different heads? What attention patterns have individual heads learned? In this work, we answer these questions through a visual analytics approach. Specifically, we first identify <italic>what</italic> heads are more important in ViTs by introducing multiple pruning-based metrics. Then, we profile the spatial distribution of attention strengths between patches inside individual heads, as well as the trend of attention strengths across attention layers. Third, using an autoencoder-based learning solution, we summarize all possible attention patterns that individual heads could learn. Examining the attention strengths and patterns of the important heads, we answer <italic>why</italic> they are important. Through concrete case studies with experienced deep learning experts on multiple ViTs, we validate the effectiveness of our solution that deepens the understanding of ViTs from <italic>head importance</italic>, <italic>head attention strength</italic>, and <italic>head attention pattern</italic>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Vision transformer (ViT) expands the success of transformer models from sequential data to images. The model decomposes an image into many smaller patches and arranges them into a sequence. Multi-head self-attentions are then applied to the sequence to learn the attention between patches. Despite many successful interpretations of transformers on sequential data, little effort has been devoted to the interpretation of ViTs, and many questions remain unanswered. For example, among the numerous attention heads, which one is more important? How strong are individual patches attending to their spatial neighbors in different heads? What attention patterns have individual heads learned? In this work, we answer these questions through a visual analytics approach. Specifically, we first identify what heads are more important in ViTs by introducing multiple pruning-based metrics. Then, we profile the spatial distribution of attention strengths between patches inside individual heads, as well as the trend of attention strengths across attention layers. Third, using an autoencoder-based learning solution, we summarize all possible attention patterns that individual heads could learn. Examining the attention strengths and patterns of the important heads, we answer why they are important. Through concrete case studies with experienced deep learning experts on multiple ViTs, we validate the effectiveness of our solution that deepens the understanding of ViTs from head importance, head attention strength, and head attention pattern.",
"title": "How Does Attention Work in Vision Transformers? A Visual Analytics Attempt",
"normalizedTitle": "How Does Attention Work in Vision Transformers? A Visual Analytics Attempt",
"fno": "10081322",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Head",
"Transformers",
"Visual Analytics",
"Task Analysis",
"Measurement",
"Heating Systems",
"Deep Learning",
"Explainable Artificial Intelligence",
"Multi Head Self Attention",
"Vision Transformer",
"Visual Analytics"
],
"authors": [
{
"givenName": "Yiran",
"surname": "Li",
"fullName": "Yiran Li",
"affiliation": "University of California, Davis, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junpeng",
"surname": "Wang",
"fullName": "Junpeng Wang",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Dai",
"fullName": "Xin Dai",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liang",
"surname": "Wang",
"fullName": "Liang Wang",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chin-Chia Michael",
"surname": "Yeh",
"fullName": "Chin-Chia Michael Yeh",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yan",
"surname": "Zheng",
"fullName": "Yan Zheng",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Zhang",
"fullName": "Wei Zhang",
"affiliation": "Visa Research, Palo Alto, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2023-06-01 00:00:00",
"pubType": "trans",
"pages": "2888-2900",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tb/2023/02/09772392",
"title": "MCWS-Transformers: Towards an Efficient Modeling of Protein Sequences via Multi Context-Window Based Scaled Self-Attention",
"doi": null,
"abstractUrl": "/journal/tb/2023/02/09772392/1DgjtIA9NkI",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900d819",
"title": "Towards Exemplar-Free Continual Learning in Vision Transformers: an Account of Attention, Functional and Weight Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900d819/1G56zb8vJGU",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/09903081",
"title": "Token Selection is a Simple Booster for Vision Transformers",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/09903081/1GZog5DAM6s",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2299",
"title": "AdaViT: Adaptive Vision Transformers for Efficient Image Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2299/1H0LFD5xsCQ",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600i344",
"title": "Patch-level Representation Learning for Self-supervised Vision Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600i344/1H0Lf8H2RGw",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600p5213",
"title": "Give Me Your Attention: Dot-Product Attention Considered Harmful for Adversarial Patch Robustness",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600p5213/1H1ngce1tba",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600c777",
"title": "Multi-level Contrastive Learning for Self-Supervised Vision Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600c777/1KxUTlCRbZS",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2023/7652/0/10071047",
"title": "HeatViT: Hardware-Efficient Adaptive Token Pruning for Vision Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2023/10071047/1LMbGetPrxu",
"parentPublication": {
"id": "proceedings/hpca/2023/7652/0",
"title": "2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2023/7652/0/10071081",
"title": "ViTALiTy: Unifying Low-rank and Sparse Approximation for Vision Transformer Acceleration with a Linear Taylor Attention",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2023/10071081/1LMbHgjS7h6",
"parentPublication": {
"id": "proceedings/hpca/2023/7652/0",
"title": "2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a267",
"title": "Multimodal Machine Translation Enhancement by Fusing Multimodal-attention and Fine-grained Image Features",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a267/1xPsmkH0m40",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10081433",
"articleId": "1LRbR78bpDy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10083324",
"articleId": "1LSKC3EiUkU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1MQvhVwHmGQ",
"name": "ttg202306-010081322s1-supp2-3261935.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202306-010081322s1-supp2-3261935.mp4",
"extension": "mp4",
"size": "143 MB",
"__typename": "WebExtraType"
},
{
"id": "1MQvijn0i6Q",
"name": "ttg202306-010081322s1-supp1-3261935.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202306-010081322s1-supp1-3261935.pdf",
"extension": "pdf",
"size": "51 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xic77YygOk",
"doi": "10.1109/TVCG.2021.3114835",
"abstract": "Situated visualization is an emerging concept within visualization, in which data is visualized in situ, where it is relevant to people. The concept has gained interest from multiple research communities, including visualization, human-computer interaction (HCI) and augmented reality. This has led to a range of explorations and applications of the concept, however, this early work has focused on the operational aspect of situatedness leading to inconsistent adoption of the concept and terminology. First, we contribute a literature survey in which we analyze 44 papers that explicitly use the term “situated visualization” to provide an overview of the research area, how it defines situated visualization, common application areas and technology used, as well as type of data and type of visualizations. Our survey shows that research on situated visualization has focused on technology-centric approaches that foreground a spatial understanding of situatedness. Secondly, we contribute five perspectives on situatedness (space, time, place, activity, and community) that together expand on the prevalent notion of situatedness in the corpus. We draw from six case studies and prior theoretical developments in HCI. Each perspective develops a generative way of looking at and working with situatedness in design and research. We outline future directions, including considering technology, material and aesthetics, leveraging the perspectives for design, and methods for stronger engagement with target audiences. We conclude with opportunities to consolidate situated visualization research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Situated visualization is an emerging concept within visualization, in which data is visualized in situ, where it is relevant to people. The concept has gained interest from multiple research communities, including visualization, human-computer interaction (HCI) and augmented reality. This has led to a range of explorations and applications of the concept, however, this early work has focused on the operational aspect of situatedness leading to inconsistent adoption of the concept and terminology. First, we contribute a literature survey in which we analyze 44 papers that explicitly use the term “situated visualization” to provide an overview of the research area, how it defines situated visualization, common application areas and technology used, as well as type of data and type of visualizations. Our survey shows that research on situated visualization has focused on technology-centric approaches that foreground a spatial understanding of situatedness. Secondly, we contribute five perspectives on situatedness (space, time, place, activity, and community) that together expand on the prevalent notion of situatedness in the corpus. We draw from six case studies and prior theoretical developments in HCI. Each perspective develops a generative way of looking at and working with situatedness in design and research. We outline future directions, including considering technology, material and aesthetics, leveraging the perspectives for design, and methods for stronger engagement with target audiences. We conclude with opportunities to consolidate situated visualization research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Situated visualization is an emerging concept within visualization, in which data is visualized in situ, where it is relevant to people. The concept has gained interest from multiple research communities, including visualization, human-computer interaction (HCI) and augmented reality. This has led to a range of explorations and applications of the concept, however, this early work has focused on the operational aspect of situatedness leading to inconsistent adoption of the concept and terminology. First, we contribute a literature survey in which we analyze 44 papers that explicitly use the term “situated visualization” to provide an overview of the research area, how it defines situated visualization, common application areas and technology used, as well as type of data and type of visualizations. Our survey shows that research on situated visualization has focused on technology-centric approaches that foreground a spatial understanding of situatedness. Secondly, we contribute five perspectives on situatedness (space, time, place, activity, and community) that together expand on the prevalent notion of situatedness in the corpus. We draw from six case studies and prior theoretical developments in HCI. Each perspective develops a generative way of looking at and working with situatedness in design and research. We outline future directions, including considering technology, material and aesthetics, leveraging the perspectives for design, and methods for stronger engagement with target audiences. We conclude with opportunities to consolidate situated visualization research.",
"title": "What's the Situation with Situated Visualization? A Survey and Perspectives on Situatedness",
"normalizedTitle": "What's the Situation with Situated Visualization? A Survey and Perspectives on Situatedness",
"fno": "09552238",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Human Computer Interaction",
"Encoding",
"Visual Analytics",
"Terminology",
"Keyword Search",
"Augmented Reality",
"Situated Visualization",
"Literature Survey",
"Situatedness"
],
"authors": [
{
"givenName": "Nathalie",
"surname": "Bressa",
"fullName": "Nathalie Bressa",
"affiliation": "Aarhus University, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Henrik",
"surname": "Korsgaard",
"fullName": "Henrik Korsgaard",
"affiliation": "Aarhus University, Denmark",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aurélien",
"surname": "Tabard",
"fullName": "Aurélien Tabard",
"affiliation": "Université Claude Bernard Lyon 1, LIRIS, CNRS UMR5205, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Houben",
"fullName": "Steven Houben",
"affiliation": "Eindhoven University of Technology, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jo",
"surname": "Vermeulen",
"fullName": "Jo Vermeulen",
"affiliation": "Autodesk Research, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "107-117",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2011/0868/0/06004062",
"title": "What is Knowledge Visualization? Perspectives on an Emerging Discipline",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004062/12OmNAsTgVr",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571207",
"title": "Immersive Visualization Architectures and Situated Embodiments of Culture and Heritage",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571207/12OmNwFid56",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223352",
"title": "Using augmented reality to support situated analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223352/12OmNzhELmY",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446212",
"title": "Towards Situated Knee Trajectory Visualization for Self Analysis in Cycling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446212/13bd1fHrlRw",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07536110",
"title": "Annotation Graphs: A Graph-Based Visualization for Meta-Analysis of Data Based on User-Authored Annotations",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07536110/13rRUxZ0o1F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a472",
"title": "Situated Visualization of IIoT Data on the Hololens 2",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a472/1CJend8tNew",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904883",
"title": "Effects of View Layout on Situated Analytics for Multiple-View Representations in Immersive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904883/1H2lc7qemsg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805424",
"title": "What is Interaction for Data Visualization?",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805424/1cG4MsovTO0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a013",
"title": "Situated Visualization in The Decision Process Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a013/1cMF8HeJlW8",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a721",
"title": "[DC] Situated augmented reality: beyond the egocentric viewpoint",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a721/1tnWQUIqzza",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09559731",
"articleId": "1xs9BhiH0HK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09557225",
"articleId": "1xlvZlGiUsE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBaX5fyCuA",
"name": "ttg202201-09552238s1-supp1-3114835.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552238s1-supp1-3114835.pdf",
"extension": "pdf",
"size": "438 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBTJIKb",
"title": "July-Aug.",
"year": "2016",
"issueNum": "04",
"idPrefix": "ex",
"pubType": "magazine",
"volume": "31",
"label": "July-Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBJhj6",
"doi": "10.1109/MIS.2016.71",
"abstract": "The authors examine the problem of gender discrimination and attempt to move beyond the typical surface-level text classification approach by identifying differences between genders in the ways they use the same words. They present several experiments using data from a large collection of blogs authored by men and women, and they report results for a new task of \"gender-based word disambiguation\" for a set of over 350 words.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors examine the problem of gender discrimination and attempt to move beyond the typical surface-level text classification approach by identifying differences between genders in the ways they use the same words. They present several experiments using data from a large collection of blogs authored by men and women, and they report results for a new task of \"gender-based word disambiguation\" for a set of over 350 words.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors examine the problem of gender discrimination and attempt to move beyond the typical surface-level text classification approach by identifying differences between genders in the ways they use the same words. They present several experiments using data from a large collection of blogs authored by men and women, and they report results for a new task of \"gender-based word disambiguation\" for a set of over 350 words.",
"title": "What Men Say, What Women Hear: Finding Gender-Specific Meaning Shades",
"normalizedTitle": "What Men Say, What Women Hear: Finding Gender-Specific Meaning Shades",
"fno": "mex2016040062",
"hasPdf": true,
"idPrefix": "ex",
"keywords": [
"Classification",
"Gender Issues",
"Text Analysis",
"Web Sites",
"Gender Specific Meaning Shades",
"Gender Discrimination",
"Surface Level Text Classification",
"Blogs",
"Gender Based Word Disambiguation",
"Blogs",
"Speech",
"Writing",
"Context Awareness",
"Time Frequency Analysis",
"Computational Linguistics",
"Twitter",
"Gender Issues",
"Affective Computing",
"Sentiment Analysis",
"Gender Discrimination",
"Gender Based Word Disambiguation",
"Intelligent Systems"
],
"authors": [
{
"givenName": "Rada",
"surname": "Mihalcea",
"fullName": "Rada Mihalcea",
"affiliation": "University of Michigan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aparna",
"surname": "Garimella",
"fullName": "Aparna Garimella",
"affiliation": "University of Michigan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2016-07-01 00:00:00",
"pubType": "mags",
"pages": "62-67",
"year": "2016",
"issn": "1541-1672",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/asonam/2013/2240/0/06785899",
"title": "What is he/she like?: Estimating Twitter user attributes from contents and social neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2013/06785899/12OmNvkpliI",
"parentPublication": {
"id": "proceedings/asonam/2013/2240/0",
"title": "2013 International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2006/0256/0/04117062",
"title": "\"You're all a bunch of fucking feminists:\" Addressing the perceived conflict between gender and professional identities using the Montreal Massacre",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2006/04117062/12OmNzVoBAd",
"parentPublication": {
"id": "proceedings/fie/2006/0256/0",
"title": "Proceedings. Frontiers in Education. 36th Annual Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/alpit/2008/3273/0/3273a121",
"title": "Determining Gender of Korean Names with Context",
"doi": null,
"abstractUrl": "/proceedings-article/alpit/2008/3273a121/12OmNzZEAoN",
"parentPublication": {
"id": "proceedings/alpit/2008/3273/0",
"title": "Advanced Language Processing and Web Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2011/09/mco2011090106",
"title": "What Are Soft Biometrics and How Can They Be Used?",
"doi": null,
"abstractUrl": "/magazine/co/2011/09/mco2011090106/13rRUxcKzRV",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2016/04/mso2016040064",
"title": "Feminine Expertise in Architecting Teams",
"doi": null,
"abstractUrl": "/magazine/so/2016/04/mso2016040064/13rRUynpTbd",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2019/03/08543625",
"title": "Gender Differences in Multimodal Contact-Free Deception Detection",
"doi": null,
"abstractUrl": "/magazine/mu/2019/03/08543625/17D45XeKgvH",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/geicse/2022/9294/0/929400a027",
"title": "Pros and Cons of running educational Hackathons in a gender-neutral fashion",
"doi": null,
"abstractUrl": "/proceedings-article/geicse/2022/929400a027/1FRKuRJAZ68",
"parentPublication": {
"id": "proceedings/geicse/2022/9294/0",
"title": "2022 IEEE/ACM 3rd International Workshop on Gender Equality, Diversity and Inclusion in Software Engineering (GEICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ge/2019/2245/0/224500a013",
"title": "Can an Online Service Predict Gender? On the State-of-the-Art in Gender Identification from Texts",
"doi": null,
"abstractUrl": "/proceedings-article/ge/2019/224500a013/1cTJemBqqR2",
"parentPublication": {
"id": "proceedings/ge/2019/2245/0",
"title": "2019 IEEE/ACM 2nd International Workshop on Gender Equality in Software Engineering (GE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2020/06/09238650",
"title": "Gender Inclusivity as a Quality Requirement: Practices and Pitfalls",
"doi": null,
"abstractUrl": "/magazine/so/2020/06/09238650/1oa0Z4w4xoc",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/respect/2021/4905/0/09620659",
"title": "CS1 Students' Perspectives on the Computer Science Gender Gap: Achieving Equity Requires Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2021/09620659/1yXuIb7pwKk",
"parentPublication": {
"id": "proceedings/respect/2021/4905/0",
"title": "2021 Conference on Research in Equitable and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mex2016040056",
"articleId": "13rRUyg2jSg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mex2016040068",
"articleId": "13rRUxASuIB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1BJIbG1OGqc",
"doi": "10.1109/TVCG.2022.3158236",
"abstract": "We present the results of a scientometric analysis of 30 years of IEEE VIS publications between 1990-2020, in which we conducted a multifaceted analysis of interdisciplinary collaboration and gender composition among authors. To this end, we curated BiblioVIS, a bibliometric dataset that contains rich metadata about IEEE VIS publications, including 3032 papers and 6113 authors. One of the main factors differentiating BiblioVIS from similar datasets is the authors' gender and discipline data, which we inferred through iterative rounds of computational and manual processes. Our analysis shows that, by and large, inter-institutional and interdisciplinary collaboration has been steadily growing over the past 30 years. However, interdisciplinary research was mainly between a few fields, including Computer Science, Engineering and Technology, and Medicine and Health disciplines. Our analysis of gender shows steady growth in women's authorship. Despite this growth, the gender distribution is still highly skewed, with men dominating (~75%) of this space. Our predictive analysis of gender balance shows that if the current trends continue, gender parity in the visualization field will not be reached before the third quarter of the century (~2070). Our primary goal in this work is to call the visualization community's attention to the critical topics of collaboration, diversity, and gender. Our research offers critical insights through the lens of diversity and gender to help accelerate progress towards a more diverse and representative research community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present the results of a scientometric analysis of 30 years of IEEE VIS publications between 1990-2020, in which we conducted a multifaceted analysis of interdisciplinary collaboration and gender composition among authors. To this end, we curated BiblioVIS, a bibliometric dataset that contains rich metadata about IEEE VIS publications, including 3032 papers and 6113 authors. One of the main factors differentiating BiblioVIS from similar datasets is the authors' gender and discipline data, which we inferred through iterative rounds of computational and manual processes. Our analysis shows that, by and large, inter-institutional and interdisciplinary collaboration has been steadily growing over the past 30 years. However, interdisciplinary research was mainly between a few fields, including Computer Science, Engineering and Technology, and Medicine and Health disciplines. Our analysis of gender shows steady growth in women's authorship. Despite this growth, the gender distribution is still highly skewed, with men dominating (~75%) of this space. Our predictive analysis of gender balance shows that if the current trends continue, gender parity in the visualization field will not be reached before the third quarter of the century (~2070). Our primary goal in this work is to call the visualization community's attention to the critical topics of collaboration, diversity, and gender. Our research offers critical insights through the lens of diversity and gender to help accelerate progress towards a more diverse and representative research community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present the results of a scientometric analysis of 30 years of IEEE VIS publications between 1990-2020, in which we conducted a multifaceted analysis of interdisciplinary collaboration and gender composition among authors. To this end, we curated BiblioVIS, a bibliometric dataset that contains rich metadata about IEEE VIS publications, including 3032 papers and 6113 authors. One of the main factors differentiating BiblioVIS from similar datasets is the authors' gender and discipline data, which we inferred through iterative rounds of computational and manual processes. Our analysis shows that, by and large, inter-institutional and interdisciplinary collaboration has been steadily growing over the past 30 years. However, interdisciplinary research was mainly between a few fields, including Computer Science, Engineering and Technology, and Medicine and Health disciplines. Our analysis of gender shows steady growth in women's authorship. Despite this growth, the gender distribution is still highly skewed, with men dominating (~75%) of this space. Our predictive analysis of gender balance shows that if the current trends continue, gender parity in the visualization field will not be reached before the third quarter of the century (~2070). Our primary goal in this work is to call the visualization community's attention to the critical topics of collaboration, diversity, and gender. Our research offers critical insights through the lens of diversity and gender to help accelerate progress towards a more diverse and representative research community.",
"title": "Scientometric Analysis of Interdisciplinary Collaboration and Gender Trends in 30 Years of IEEE VIS Publications",
"normalizedTitle": "Scientometric Analysis of Interdisciplinary Collaboration and Gender Trends in 30 Years of IEEE VIS Publications",
"fno": "09733942",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Collaboration",
"Data Visualization",
"Conferences",
"Bibliometrics",
"Market Research",
"Visual Analytics",
"Productivity",
"Scientometric",
"IEEE VIS Publications",
"Gender",
"Co Authorship",
"Collaboration",
"Interdisciplinary",
"Inter Institutional"
],
"authors": [
{
"givenName": "Ali",
"surname": "Sarvghad",
"fullName": "Ali Sarvghad",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rolando",
"surname": "Franqui-Nadal",
"fullName": "Rolando Franqui-Nadal",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rebecca",
"surname": "Reznik-Zellen",
"fullName": "Rebecca Reznik-Zellen",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ria",
"surname": "Chawla",
"fullName": "Ria Chawla",
"affiliation": "Collage of Information and Computer Sciences, University of Massachusetts Amherst, Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Narges",
"surname": "Mahyar",
"fullName": "Narges Mahyar",
"affiliation": "College of Information and Computer Sciences, University of Massachusetts Amherst, 14707 Amherst, Massachusetts, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-03-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2009/3450/0/01-05-01",
"title": "Digital Cross-Organizational and Cross-Border Collaboration: A Scientometric Study",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2009/01-05-01/12OmNy6Zs1C",
"parentPublication": {
"id": "proceedings/hicss/2009/3450/0",
"title": "2009 42nd Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770972",
"title": "Visualizing Interdisciplinary Citations to and from Information and Library Science Publications",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770972/12OmNyS6RKR",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2016/2846/0/07752307",
"title": "The scientometrics of successful women in science",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2016/07752307/12OmNyoiYVM",
"parentPublication": {
"id": "proceedings/asonam/2016/2846/0",
"title": "2016 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/cc/2014/03/06808484",
"title": "A Scientometric Analysis of Cloud Computing Literature",
"doi": null,
"abstractUrl": "/journal/cc/2014/03/06808484/13rRUwdIOYO",
"parentPublication": {
"id": "trans/cc",
"title": "IEEE Transactions on Cloud Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2017/02/mcs2017020082",
"title": "A report from VIS 2016",
"doi": null,
"abstractUrl": "/magazine/cs/2017/02/mcs2017020082/13rRUwwaKmg",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258573",
"title": "Discovering the interdisciplinary nature of big data research",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258573/17D45VsBTTI",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440852",
"title": "VIS Author Profiles: Interactive Descriptions of Publication Records Combining Text and Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440852/17D45WXIkBi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09903512",
"title": "Thirty-Two Years of IEEE VIS: Authors, Fields of Study and Citations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903512/1GZol4dym8U",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377970",
"title": "A bibliometric network analysis of Deep Learning publications applied into legal documents",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377970/1s64Bs1mh6E",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552840",
"title": "Gender in 30 Years of IEEE Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552840/1xic2GL1FC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09733261",
"articleId": "1BENJyPkx5S",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09735308",
"articleId": "1BLn9j3xNQs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwdL7lN",
"title": "July-Aug.",
"year": "2019",
"issueNum": "04",
"idPrefix": "sp",
"pubType": "magazine",
"volume": "17",
"label": "July-Aug.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1bojOWk4jMQ",
"doi": "10.1109/MSEC.2019.2912727",
"abstract": "South Asia has one of the world's largest populations&mdash;India, Pakistan, and Bangladesh alone are home to more than 20% of all people. The region is also one of the fastest-growing technology markets as a result of its increased infrastructure and growing affordability. Despite this progress, South Asia faces one of the largest online gender disparities in the world, i.e., women are 58% less likely to connect to the mobile Internet than men.1 We believe that, as the Internet becomes more globally accessible, it is imperative for technologists to intentionally examine the biases and inequities perpetuated in technology to truly enable gender equity online.",
"abstracts": [
{
"abstractType": "Regular",
"content": "South Asia has one of the world's largest populations&mdash;India, Pakistan, and Bangladesh alone are home to more than 20% of all people. The region is also one of the fastest-growing technology markets as a result of its increased infrastructure and growing affordability. Despite this progress, South Asia faces one of the largest online gender disparities in the world, i.e., women are 58% less likely to connect to the mobile Internet than men.1 We believe that, as the Internet becomes more globally accessible, it is imperative for technologists to intentionally examine the biases and inequities perpetuated in technology to truly enable gender equity online.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "South Asia has one of the world's largest populations—India, Pakistan, and Bangladesh alone are home to more than 20% of all people. The region is also one of the fastest-growing technology markets as a result of its increased infrastructure and growing affordability. Despite this progress, South Asia faces one of the largest online gender disparities in the world, i.e., women are 58% less likely to connect to the mobile Internet than men.1 We believe that, as the Internet becomes more globally accessible, it is imperative for technologists to intentionally examine the biases and inequities perpetuated in technology to truly enable gender equity online.",
"title": "Toward Gender-Equitable Privacy and Security in South Asia",
"normalizedTitle": "Toward Gender-Equitable Privacy and Security in South Asia",
"fno": "08755960",
"hasPdf": true,
"idPrefix": "sp",
"keywords": [
"Data Privacy",
"Gender Issues",
"Internet",
"Security Of Data",
"South Asia",
"Gender Equitable Privacy",
"Online Gender Disparities",
"Gender Equity",
"Mobile Internet",
"Gender Issues",
"Computer Security",
"Privacy",
"Internet"
],
"authors": [
{
"givenName": "Nithya",
"surname": "Sambasivan",
"fullName": "Nithya Sambasivan",
"affiliation": "Google, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nova",
"surname": "Ahmed",
"fullName": "Nova Ahmed",
"affiliation": "North South University, Bangladesh",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Amna",
"surname": "Batool",
"fullName": "Amna Batool",
"affiliation": "Information Technology University, Pakistan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elie",
"surname": "Bursztein",
"fullName": "Elie Bursztein",
"affiliation": "Google, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elizabeth",
"surname": "Churchill",
"fullName": "Elizabeth Churchill",
"affiliation": "Google, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Laura",
"surname": "Sanely Gaytan-Lugo",
"fullName": "Laura Sanely Gaytan-Lugo",
"affiliation": "Universidad de Colima, Mexico",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tara",
"surname": "Matthews",
"fullName": "Tara Matthews",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Nemar",
"fullName": "David Nemar",
"affiliation": "University of Virginia, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kurt",
"surname": "Thomas",
"fullName": "Kurt Thomas",
"affiliation": "Google, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sunny",
"surname": "Consolvo",
"fullName": "Sunny Consolvo",
"affiliation": "Google, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2019-07-01 00:00:00",
"pubType": "mags",
"pages": "71-77",
"year": "2019",
"issn": "1540-7993",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/aspdac/1998/4425/0/00669521",
"title": "Reconfigurable systems: activities in Asia and South Pacific",
"doi": null,
"abstractUrl": "/proceedings-article/aspdac/1998/00669521/12OmNxvO096",
"parentPublication": {
"id": "proceedings/aspdac/1998/4425/0",
"title": "Proceedings of 1998 Asia and South Pacific Design Automation Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ge/2019/2245/0/224500a013",
"title": "Can an Online Service Predict Gender? On the State-of-the-Art in Gender Identification from Texts",
"doi": null,
"abstractUrl": "/proceedings-article/ge/2019/224500a013/1cTJemBqqR2",
"parentPublication": {
"id": "proceedings/ge/2019/2245/0",
"title": "2019 IEEE/ACM 2nd International Workshop on Gender Equality in Software Engineering (GE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2019/06/08880051",
"title": "Gender in Software Engineering",
"doi": null,
"abstractUrl": "/magazine/so/2019/06/08880051/1ekTayi7kt2",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2020/01/08852638",
"title": "Gender Diversity and Community Smells: Insights From the Trenches",
"doi": null,
"abstractUrl": "/magazine/so/2020/01/08852638/1fUSNHBzzGM",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/02/09261329",
"title": "Gender Differences in Public Code Contributions: A 50-Year Perspective",
"doi": null,
"abstractUrl": "/magazine/so/2021/02/09261329/1oPzR4iA4nu",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2020/06/09238650",
"title": "Gender Inclusivity as a Quality Requirement: Practices and Pitfalls",
"doi": null,
"abstractUrl": "/magazine/so/2020/06/09238650/1oa0Z4w4xoc",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdsba/2019/4644/0/464400a084",
"title": "Common Weight DEA-Based Methodology for Ranking APEC Countries by Considering Sustainable Development Goals Including Decent Work, Income Inequalities, and Gender Equality",
"doi": null,
"abstractUrl": "/proceedings-article/icdsba/2019/464400a084/1pbdQqhius0",
"parentPublication": {
"id": "proceedings/icdsba/2019/4644/0",
"title": "2019 3rd International Conference on Data Science and Business Analytics (ICDSBA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2020/1974/0/09411643",
"title": "Is Gender Associated with Proactivity: An Investigation in Higher Education",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2020/09411643/1taF7hcKWQg",
"parentPublication": {
"id": "proceedings/csde/2020/1974/0",
"title": "2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552840",
"title": "Gender in 30 Years of IEEE Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552840/1xic2GL1FC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/respect/2021/4905/0/09620659",
"title": "CS1 Students' Perspectives on the Computer Science Gender Gap: Achieving Equity Requires Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2021/09620659/1yXuIb7pwKk",
"parentPublication": {
"id": "proceedings/respect/2021/4905/0",
"title": "2021 Conference on Research in Equitable and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08755937",
"articleId": "1bojQvLj77W",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08755952",
"articleId": "1bojS7QTEMo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzWOBaD",
"title": "Nov.-Dec.",
"year": "2019",
"issueNum": "06",
"idPrefix": "so",
"pubType": "magazine",
"volume": "36",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1ekTayi7kt2",
"doi": "10.1109/MS.2019.2934584",
"abstract": "The topic of gender in software engineering received significant attention during the most recent International Conference on Software Engineering (ICSE). Papers related to gender appeared in the main research track, the Software Engineering in Society (SEIS) track, and the second Gender Equity (GE) workshop (https://sites.google.com/view/ge-icse2019). Three of the papers summarized in this column are coauthored by the column authors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The topic of gender in software engineering received significant attention during the most recent International Conference on Software Engineering (ICSE). Papers related to gender appeared in the main research track, the Software Engineering in Society (SEIS) track, and the second Gender Equity (GE) workshop (https://sites.google.com/view/ge-icse2019). Three of the papers summarized in this column are coauthored by the column authors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The topic of gender in software engineering received significant attention during the most recent International Conference on Software Engineering (ICSE). Papers related to gender appeared in the main research track, the Software Engineering in Society (SEIS) track, and the second Gender Equity (GE) workshop (https://sites.google.com/view/ge-icse2019). Three of the papers summarized in this column are coauthored by the column authors.",
"title": "Gender in Software Engineering",
"normalizedTitle": "Gender in Software Engineering",
"fno": "08880051",
"hasPdf": true,
"idPrefix": "so",
"keywords": [
"Engineering Profession",
"Software Engineering",
"Gender Issues"
],
"authors": [
{
"givenName": "Jeffrey C.",
"surname": "Carver",
"fullName": "Jeffrey C. Carver",
"affiliation": "Computer Science, University of Alabama, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Serebrenik",
"fullName": "Alexander Serebrenik",
"affiliation": "Mathematics and Computer Science, Eindhoven University of Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2019-11-01 00:00:00",
"pubType": "mags",
"pages": "76-78",
"year": "2019",
"issn": "0740-7459",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2009/4715/0/05350766",
"title": "Mapping international perspectives on gender in engineering education research",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2009/05350766/12OmNvIfDMA",
"parentPublication": {
"id": "proceedings/fie/2009/4715/0",
"title": "2009 39th IEEE Frontiers in Education Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2006/0256/0/04117062",
"title": "\"You're all a bunch of fucking feminists:\" Addressing the perceived conflict between gender and professional identities using the Montreal Massacre",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2006/04117062/12OmNzVoBAd",
"parentPublication": {
"id": "proceedings/fie/2006/0256/0",
"title": "Proceedings. Frontiers in Education. 36th Annual Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2019/01/08491276",
"title": "OpenStack Gender Diversity Report",
"doi": null,
"abstractUrl": "/magazine/so/2019/01/08491276/17D45WnnFYb",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/geicse/2022/9294/0/929400a027",
"title": "Pros and Cons of running educational Hackathons in a gender-neutral fashion",
"doi": null,
"abstractUrl": "/proceedings-article/geicse/2022/929400a027/1FRKuRJAZ68",
"parentPublication": {
"id": "proceedings/geicse/2022/9294/0",
"title": "2022 IEEE/ACM 3rd International Workshop on Gender Equality, Diversity and Inclusion in Software Engineering (GEICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/geicse/2022/9294/0/929400a084",
"title": "An Analysis of Sex Differences in Computing Teaching Evaluations",
"doi": null,
"abstractUrl": "/proceedings-article/geicse/2022/929400a084/1FRKuZnD6jS",
"parentPublication": {
"id": "proceedings/geicse/2022/9294/0",
"title": "2022 IEEE/ACM 3rd International Workshop on Gender Equality, Diversity and Inclusion in Software Engineering (GEICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2022/6244/0/09962687",
"title": "Gender differences in early careers of Finnish engineers",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2022/09962687/1IHnLpE7zcA",
"parentPublication": {
"id": "proceedings/fie/2022/6244/0",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ge/2019/2245/0/224500a021",
"title": "Gender Disparity in the Governance of Software Engineering Conferences",
"doi": null,
"abstractUrl": "/proceedings-article/ge/2019/224500a021/1cTJejsHBCM",
"parentPublication": {
"id": "proceedings/ge/2019/2245/0",
"title": "2019 IEEE/ACM 2nd International Workshop on Gender Equality in Software Engineering (GE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2020/06/09238650",
"title": "Gender Inclusivity as a Quality Requirement: Practices and Pitfalls",
"doi": null,
"abstractUrl": "/magazine/so/2020/06/09238650/1oa0Z4w4xoc",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/02/09293108",
"title": "Impact of Affirmative Action on Female Computer Science/Software Engineering Undergraduate Enrollment",
"doi": null,
"abstractUrl": "/magazine/so/2021/02/09293108/1pyokQ4ZJ1S",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552840",
"title": "Gender in 30 Years of IEEE Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552840/1xic2GL1FC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08880035",
"articleId": "1ekTdWVTPZm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08880037",
"articleId": "1ekT9o1MJoI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1reXmMcxtMQ",
"title": "March-April",
"year": "2021",
"issueNum": "02",
"idPrefix": "so",
"pubType": "magazine",
"volume": "38",
"label": "March-April",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oPzR4iA4nu",
"doi": "10.1109/MS.2020.3038765",
"abstract": "We study the gender of commits authors over 120 million projects and a period of 50 years. Commits by female authors remain low overall but are growing steadily, providing hope of a more gender-balanced future for collaborative software development.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We study the gender of commits authors over 120 million projects and a period of 50 years. Commits by female authors remain low overall but are growing steadily, providing hope of a more gender-balanced future for collaborative software development.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We study the gender of commits authors over 120 million projects and a period of 50 years. Commits by female authors remain low overall but are growing steadily, providing hope of a more gender-balanced future for collaborative software development.",
"title": "Gender Differences in Public Code Contributions: A 50-Year Perspective",
"normalizedTitle": "Gender Differences in Public Code Contributions: A 50-Year Perspective",
"fno": "09261329",
"hasPdf": true,
"idPrefix": "so",
"keywords": [
"Gender Issues",
"Groupware",
"Software Engineering",
"Gender Differences",
"Public Code Contributions",
"Female Authors",
"Gender Balanced Future",
"Collaborative Software Development",
"Gender Issues",
"Market Research",
"Gender Equity",
"Statistics",
"Software Development Management",
"Social Factors"
],
"authors": [
{
"givenName": "Stefano",
"surname": "Zacchiroli",
"fullName": "Stefano Zacchiroli",
"affiliation": "Computer Science, Universite de Paris, Paris, 75013, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-03-01 00:00:00",
"pubType": "mags",
"pages": "45-50",
"year": "2021",
"issn": "0740-7459",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isdea/2010/4212/1/4212a784",
"title": "Gender Differences in Cognitive Style and Personalized Foreign Language Teaching Strategy",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/4212a784/12OmNyaoDDy",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-seis/2022/9594/0/959400a172",
"title": "Worldwide Gender Differences in Public Code Contributions and how they have been affected by the COVID-19 pandemic",
"doi": null,
"abstractUrl": "/proceedings-article/icse-seis/2022/959400a172/1Emrit3Q688",
"parentPublication": {
"id": "proceedings/icse-seis/2022/9594/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering: Software Engineering in Society (ICSE-SEIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2022/6244/0/09962687",
"title": "Gender differences in early careers of Finnish engineers",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2022/09962687/1IHnLpE7zcA",
"parentPublication": {
"id": "proceedings/fie/2022/6244/0",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ge/2019/2245/0/224500a013",
"title": "Can an Online Service Predict Gender? On the State-of-the-Art in Gender Identification from Texts",
"doi": null,
"abstractUrl": "/proceedings-article/ge/2019/224500a013/1cTJemBqqR2",
"parentPublication": {
"id": "proceedings/ge/2019/2245/0",
"title": "2019 IEEE/ACM 2nd International Workshop on Gender Equality in Software Engineering (GE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2019/06/08880051",
"title": "Gender in Software Engineering",
"doi": null,
"abstractUrl": "/magazine/so/2019/06/08880051/1ekTayi7kt2",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2020/01/08852638",
"title": "Gender Diversity and Community Smells: Insights From the Trenches",
"doi": null,
"abstractUrl": "/magazine/so/2020/01/08852638/1fUSNHBzzGM",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2020/06/09238650",
"title": "Gender Inclusivity as a Quality Requirement: Practices and Pitfalls",
"doi": null,
"abstractUrl": "/magazine/so/2020/06/09238650/1oa0Z4w4xoc",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/02/09268454",
"title": "Gendered Experiences of Software Engineers During the COVID-19 Crisis",
"doi": null,
"abstractUrl": "/magazine/so/2021/02/09268454/1p1cdN4NYKk",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/02/09293108",
"title": "Impact of Affirmative Action on Female Computer Science/Software Engineering Undergraduate Enrollment",
"doi": null,
"abstractUrl": "/magazine/so/2021/02/09293108/1pyokQ4ZJ1S",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552840",
"title": "Gender in 30 Years of IEEE Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552840/1xic2GL1FC0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09268454",
"articleId": "1p1cdN4NYKk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09250363",
"articleId": "1oxkjlJSimA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1CKMkYw5fa0",
"doi": "10.1109/TVCG.2022.3169175",
"abstract": "Appropriate gestures can enhance message delivery and audience engagement in both daily communication and public presentations. In this paper, we contribute a visual analytic approach that assists professional public speaking coaches in improving their practice of gesture training through analyzing presentation videos. Manually checking and exploring gesture usage in the presentation videos is often tedious and time-consuming. There lacks an efficient method to help users conduct gesture exploration, which is challenging due to the intrinsically temporal evolution of gestures and their complex correlation to speech content. In this paper, we propose GestureLens, a visual analytics system to facilitate gesture-based and content-based exploration of gesture usage in presentation videos. Specifically, the exploration view enables users to obtain a quick overview of the spatial and temporal distributions of gestures. The dynamic hand movements are firstly aggregated through a heatmap in the gesture space for uncovering spatial patterns, and then decomposed into two mutually perpendicular timelines for revealing temporal patterns. The relation view allows users to explicitly explore the correlation between speech content and gestures by enabling linked analysis and intuitive glyph designs. The video view and dynamic view show the context and overall dynamic movement of the selected gestures, respectively. Two usage scenarios and expert interviews with professional presentation coaches demonstrate the effectiveness and usefulness of GestureLens in facilitating gesture exploration and analysis of presentation videos.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Appropriate gestures can enhance message delivery and audience engagement in both daily communication and public presentations. In this paper, we contribute a visual analytic approach that assists professional public speaking coaches in improving their practice of gesture training through analyzing presentation videos. Manually checking and exploring gesture usage in the presentation videos is often tedious and time-consuming. There lacks an efficient method to help users conduct gesture exploration, which is challenging due to the intrinsically temporal evolution of gestures and their complex correlation to speech content. In this paper, we propose GestureLens, a visual analytics system to facilitate gesture-based and content-based exploration of gesture usage in presentation videos. Specifically, the exploration view enables users to obtain a quick overview of the spatial and temporal distributions of gestures. The dynamic hand movements are firstly aggregated through a heatmap in the gesture space for uncovering spatial patterns, and then decomposed into two mutually perpendicular timelines for revealing temporal patterns. The relation view allows users to explicitly explore the correlation between speech content and gestures by enabling linked analysis and intuitive glyph designs. The video view and dynamic view show the context and overall dynamic movement of the selected gestures, respectively. Two usage scenarios and expert interviews with professional presentation coaches demonstrate the effectiveness and usefulness of GestureLens in facilitating gesture exploration and analysis of presentation videos.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Appropriate gestures can enhance message delivery and audience engagement in both daily communication and public presentations. In this paper, we contribute a visual analytic approach that assists professional public speaking coaches in improving their practice of gesture training through analyzing presentation videos. Manually checking and exploring gesture usage in the presentation videos is often tedious and time-consuming. There lacks an efficient method to help users conduct gesture exploration, which is challenging due to the intrinsically temporal evolution of gestures and their complex correlation to speech content. In this paper, we propose GestureLens, a visual analytics system to facilitate gesture-based and content-based exploration of gesture usage in presentation videos. Specifically, the exploration view enables users to obtain a quick overview of the spatial and temporal distributions of gestures. The dynamic hand movements are firstly aggregated through a heatmap in the gesture space for uncovering spatial patterns, and then decomposed into two mutually perpendicular timelines for revealing temporal patterns. The relation view allows users to explicitly explore the correlation between speech content and gestures by enabling linked analysis and intuitive glyph designs. The video view and dynamic view show the context and overall dynamic movement of the selected gestures, respectively. Two usage scenarios and expert interviews with professional presentation coaches demonstrate the effectiveness and usefulness of GestureLens in facilitating gesture exploration and analysis of presentation videos.",
"title": "GestureLens: Visual Analysis of Gestures in Presentation Videos",
"normalizedTitle": "GestureLens: Visual Analysis of Gestures in Presentation Videos",
"fno": "09761750",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Videos",
"Correlation",
"Training",
"Data Visualization",
"Public Speaking",
"Visual Analytics",
"Trajectory",
"Gesture",
"Hand Movements",
"Presentation Video Analysis",
"Visual Analysis"
],
"authors": [
{
"givenName": "Haipeng",
"surname": "Zeng",
"fullName": "Haipeng Zeng",
"affiliation": "School of Intelligent Systems Engineering, Sun Yat-Sen University, 26469 Shenzhen, Guangdong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xingbo",
"surname": "Wang",
"fullName": "Xingbo Wang",
"affiliation": "CSE, HKUST, 58207 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "School of Information Systems, Singapore Management University, 54756 Singapore, Singapore, Singapore, 178902",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aoyu",
"surname": "Wu",
"fullName": "Aoyu Wu",
"affiliation": "Computer Science, The Hong Kong University of Science and Technology, Hong Kong, Kowloon, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ting-Chuen",
"surname": "Pong",
"fullName": "Ting-Chuen Pong",
"affiliation": "Computer Science & Engineering, Hong Kong University of Science and Technology, 58207 Kowloon, Hong Kong, Hong Kong, HKG",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "The Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, 58207 Kowloon, Hong Kong, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2003/7965/1/7965181",
"title": "Oscillatory gestures and discourse",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2003/7965181/12OmNAkWvem",
"parentPublication": {
"id": "proceedings/icme/2003/7965/1",
"title": "2003 International Conference on Multimedia and Expo. ICME '03. Proceedings (Cat. No.03TH8698)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2010/7029/0/05543253",
"title": "Annotation and taxonomy of gestures in lecture videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2010/05543253/12OmNBNM8OJ",
"parentPublication": {
"id": "proceedings/cvprw/2010/7029/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition - Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2006/2521/1/252111100",
"title": "Visual Recognition of Similar Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2006/252111100/12OmNBcShVa",
"parentPublication": {
"id": "proceedings/icpr/2006/2521/1",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2008/3299/0/3299a137",
"title": "Gestures, Shapes and Multitouch Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2008/3299a137/12OmNBhZ4ew",
"parentPublication": {
"id": "proceedings/dexa/2008/3299/0",
"title": "2008 19th International Workshop on Database and Expert Systems Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2006/0366/0/04036684",
"title": "Prediction-Based Gesture Detection in Lecture Videos by Combining Visual, Speech and Electronic Slides",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2006/04036684/12OmNCzsKG8",
"parentPublication": {
"id": "proceedings/icme/2006/0366/0",
"title": "2006 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a476",
"title": "Multichannel Attention Network for Analyzing Visual Behavior in Public Speaking",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a476/12OmNqEjhZu",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2006/9753/0/04042260",
"title": "Hierarchical Segmentation of Presentation Videos through Visual and Text Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042260/12OmNzgwmSb",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2016/06/mex2016060082",
"title": "Multimodal Sentiment Intensity Analysis in Videos: Facial Gestures and Verbal Messages",
"doi": null,
"abstractUrl": "/magazine/ex/2016/06/mex2016060082/13rRUwgyOcc",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c145",
"title": "Recognizing American Sign Language Gestures from Within Continuous Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c145/17D45WrVg1p",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807235",
"title": "<italic>EmoCo</italic>: Visual Analysis of Emotion Coherence in Presentation Videos",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807235/1cG6m1AVG6c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09761724",
"articleId": "1CKMkLCKOSk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09762800",
"articleId": "1CRqVJ1OnTO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1BoctALNVRu",
"title": "Jan.-March",
"year": "2022",
"issueNum": "01",
"idPrefix": "ta",
"pubType": "journal",
"volume": "13",
"label": "Jan.-March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cRBn0C8oCc",
"doi": "10.1109/TAFFC.2019.2937028",
"abstract": "Public speaking is a critical skill in daily communication. While more practicing such as rehearsal is helpful to improve such a skill, lack of personalized feedback limits the effectiveness of practicing. Therefore, we formulate the task of personalized feedback as an affective audio annotation problem by learning knowledge from online public speech videos. Considering the great success of deep learning techniques such as convolutional neural networks in a wide range of applications including speech recognition and object recognition, we propose a novel convolutional clustering neural network (CCNN) to solve this multi-label classification problem. Instead of aggregating the features of different channels through pooling, we introduce a novel clustering layer to derive intermediate representation for improved annotation performance. In order to evaluate the performance of our proposed method, we purposely built an affective audio annotation dataset by collecting more than 2,000 video clips from the TED website. Experimental results on this dataset demonstrate that our proposed method outperforms traditional CNN-based approaches with a lower hamming loss for affective annotation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Public speaking is a critical skill in daily communication. While more practicing such as rehearsal is helpful to improve such a skill, lack of personalized feedback limits the effectiveness of practicing. Therefore, we formulate the task of personalized feedback as an affective audio annotation problem by learning knowledge from online public speech videos. Considering the great success of deep learning techniques such as convolutional neural networks in a wide range of applications including speech recognition and object recognition, we propose a novel convolutional clustering neural network (CCNN) to solve this multi-label classification problem. Instead of aggregating the features of different channels through pooling, we introduce a novel clustering layer to derive intermediate representation for improved annotation performance. In order to evaluate the performance of our proposed method, we purposely built an affective audio annotation dataset by collecting more than 2,000 video clips from the TED website. Experimental results on this dataset demonstrate that our proposed method outperforms traditional CNN-based approaches with a lower hamming loss for affective annotation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Public speaking is a critical skill in daily communication. While more practicing such as rehearsal is helpful to improve such a skill, lack of personalized feedback limits the effectiveness of practicing. Therefore, we formulate the task of personalized feedback as an affective audio annotation problem by learning knowledge from online public speech videos. Considering the great success of deep learning techniques such as convolutional neural networks in a wide range of applications including speech recognition and object recognition, we propose a novel convolutional clustering neural network (CCNN) to solve this multi-label classification problem. Instead of aggregating the features of different channels through pooling, we introduce a novel clustering layer to derive intermediate representation for improved annotation performance. In order to evaluate the performance of our proposed method, we purposely built an affective audio annotation dataset by collecting more than 2,000 video clips from the TED website. Experimental results on this dataset demonstrate that our proposed method outperforms traditional CNN-based approaches with a lower hamming loss for affective annotation.",
"title": "Affective Audio Annotation of Public Speeches with Convolutional Clustering Neural Network",
"normalizedTitle": "Affective Audio Annotation of Public Speeches with Convolutional Clustering Neural Network",
"fno": "08818609",
"hasPdf": true,
"idPrefix": "ta",
"keywords": [
"Convolutional Neural Nets",
"Learning Artificial Intelligence",
"Pattern Classification",
"Speech Recognition",
"Video Signal Processing",
"Public Speeches",
"Public Speaking",
"Daily Communication",
"Personalized Feedback",
"Affective Audio Annotation Problem",
"Online Public Speech Videos",
"Deep Learning Techniques",
"Convolutional Neural Networks",
"Speech Recognition",
"Multilabel Classification Problem",
"Novel Clustering Layer",
"Affective Audio Annotation Dataset",
"Video Clips",
"Affective Annotation",
"Convolutional Clustering Neural Network",
"CNN Based Approach",
"TED Website",
"Hamming Loss",
"Annotations",
"Tagging",
"Task Analysis",
"Deep Learning",
"Neural Networks",
"Public Speaking",
"Videos",
"Affective Annotation",
"Public Speech",
"Convolutional Neural Network",
"Intermediate Representation",
"Clustering"
],
"authors": [
{
"givenName": "Jiahao",
"surname": "Xu",
"fullName": "Jiahao Xu",
"affiliation": "School of Computer Science, The University of Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Boyan",
"surname": "Zhang",
"fullName": "Boyan Zhang",
"affiliation": "School of Computer Science, The University of Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhiyong",
"surname": "Wang",
"fullName": "Zhiyong Wang",
"affiliation": "School of Computer Science, The University of Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yang",
"surname": "Wang",
"fullName": "Yang Wang",
"affiliation": "School of Computer Science, University of Technology Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fang",
"surname": "Chen",
"fullName": "Fang Chen",
"affiliation": "School of Computer Science, University of Technology Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junbin",
"surname": "Gao",
"fullName": "Junbin Gao",
"affiliation": "Discipline of Business Analytics, The University of Sydney Business School, NSW, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David Dagan",
"surname": "Feng",
"fullName": "David Dagan Feng",
"affiliation": "School of Computer Science, The University of Sydney, NSW, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "238-249",
"year": "2022",
"issn": "1949-3045",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2018/2335/0/233501a607",
"title": "Analyzing the Impact of Gender on the Automation of Feedback for Public Speaking",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a607/12OmNBt3qna",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wiamis/2009/3609/0/05031483",
"title": "Posture annotation for studying affective interaction in multimodal corpora",
"doi": null,
"abstractUrl": "/proceedings-article/wiamis/2009/05031483/12OmNvStcMn",
"parentPublication": {
"id": "proceedings/wiamis/2009/3609/0",
"title": "Image Analysis for Multimedia Interactive Services, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607456",
"title": "Personalized automatic image annotation based on reinforcement learning",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607456/12OmNx7G61f",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iatw/2007/3028/0/3028a087",
"title": "Affective Computation Driven Personalization Modeling in Game-Based Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iatw/2007/3028a087/12OmNyXMQbT",
"parentPublication": {
"id": "proceedings/wi-iatw/2007/3028/0",
"title": "Web Intelligence and Intelligent Agent Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a417",
"title": "Multimodal Expressions of Stress during a Public Speaking Task: Collection, Annotation and Global Analyses",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a417/12OmNya72tj",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/3/07295065",
"title": "Mastering the art of persuasion intelligent tutoring system for presenters",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07295065/12OmNyvoXdu",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/2",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/04/09816018",
"title": "The Arousal Video Game AnnotatIoN (AGAIN) Dataset",
"doi": null,
"abstractUrl": "/journal/ta/2022/04/09816018/1EMUYPTHzIk",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcom/2022/7384/0/738400a389",
"title": "Crowdsourcing System for Multi-object Annotation in Surveillance Videos",
"doi": null,
"abstractUrl": "/proceedings-article/bigcom/2022/738400a389/1LFKF1wgQLK",
"parentPublication": {
"id": "proceedings/bigcom/2022/7384/0",
"title": "2022 8th International Conference on Big Data Computing and Communications (BigCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/10120958",
"title": "Crowdsourcing Affective Annotations via fNIRS-BCI",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/10120958/1MYNyNXJUwU",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a794",
"title": "Analysing Affective Behavior in the First ABAW 2020 Competition",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a794/1kecIYu9wL6",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08863974",
"articleId": "1e0YifONmyQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08738864",
"articleId": "1aXLeqQGaWs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1HMOit1lSk8",
"title": "Dec.",
"year": "2022",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1vhIcg5WCZy",
"doi": "10.1109/TVCG.2021.3097709",
"abstract": "Despite being a critical communication skill, grasping humor is challenging—a successful use of humor requires a mixture of both engaging content build-up and an appropriate vocal delivery (e.g., pause). Prior studies on computational humor emphasize the textual and audio features immediately next to the punchline, yet overlooking longer-term context setup. Moreover, the theories are usually too abstract for understanding each concrete humor snippet. To fill in the gap, we develop <italic>DeHumor</italic>, a visual analytical system for analyzing humorous behaviors in public speaking. To intuitively reveal the building blocks of each concrete example, <italic>DeHumor</italic> decomposes each humorous video into multimodal features and provides inline annotations of them on the video script. In particular, to better capture the build-ups, we introduce content repetition as a complement to features introduced in theories of computational humor and visualize them in a context linking graph. To help users locate the punchlines that have the desired features to learn, we summarize the content (with keywords) and humor feature statistics on an augmented time matrix. With case studies on stand-up comedy shows and TED talks, we show that <italic>DeHumor</italic> is able to highlight various building blocks of humor examples. In addition, expert interviews with communication coaches and humor researchers demonstrate the effectiveness of <italic>DeHumor</italic> for multimodal humor analysis of speech content and vocal delivery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite being a critical communication skill, grasping humor is challenging—a successful use of humor requires a mixture of both engaging content build-up and an appropriate vocal delivery (e.g., pause). Prior studies on computational humor emphasize the textual and audio features immediately next to the punchline, yet overlooking longer-term context setup. Moreover, the theories are usually too abstract for understanding each concrete humor snippet. To fill in the gap, we develop <italic>DeHumor</italic>, a visual analytical system for analyzing humorous behaviors in public speaking. To intuitively reveal the building blocks of each concrete example, <italic>DeHumor</italic> decomposes each humorous video into multimodal features and provides inline annotations of them on the video script. In particular, to better capture the build-ups, we introduce content repetition as a complement to features introduced in theories of computational humor and visualize them in a context linking graph. To help users locate the punchlines that have the desired features to learn, we summarize the content (with keywords) and humor feature statistics on an augmented time matrix. With case studies on stand-up comedy shows and TED talks, we show that <italic>DeHumor</italic> is able to highlight various building blocks of humor examples. In addition, expert interviews with communication coaches and humor researchers demonstrate the effectiveness of <italic>DeHumor</italic> for multimodal humor analysis of speech content and vocal delivery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite being a critical communication skill, grasping humor is challenging—a successful use of humor requires a mixture of both engaging content build-up and an appropriate vocal delivery (e.g., pause). Prior studies on computational humor emphasize the textual and audio features immediately next to the punchline, yet overlooking longer-term context setup. Moreover, the theories are usually too abstract for understanding each concrete humor snippet. To fill in the gap, we develop DeHumor, a visual analytical system for analyzing humorous behaviors in public speaking. To intuitively reveal the building blocks of each concrete example, DeHumor decomposes each humorous video into multimodal features and provides inline annotations of them on the video script. In particular, to better capture the build-ups, we introduce content repetition as a complement to features introduced in theories of computational humor and visualize them in a context linking graph. To help users locate the punchlines that have the desired features to learn, we summarize the content (with keywords) and humor feature statistics on an augmented time matrix. With case studies on stand-up comedy shows and TED talks, we show that DeHumor is able to highlight various building blocks of humor examples. In addition, expert interviews with communication coaches and humor researchers demonstrate the effectiveness of DeHumor for multimodal humor analysis of speech content and vocal delivery.",
"title": "DeHumor: Visual Analytics for Decomposing Humor",
"normalizedTitle": "DeHumor: Visual Analytics for Decomposing Humor",
"fno": "09488285",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Behavioural Sciences Computing",
"Data Visualisation",
"Multimedia Computing",
"Public Speaking",
"Appropriate Vocal Delivery",
"Audio Features",
"Build Ups",
"Building Blocks",
"Challenging A Successful",
"Computational Humor",
"Concrete Example",
"Concrete Humor Snippet",
"Content Repetition",
"Context Linking Graph",
"Critical Communication Skill",
"Decomposing Humor",
"De Humor",
"Desired Features",
"Engaging Content",
"Humor Examples",
"Humor Feature Statistics",
"Humor Researchers",
"Humorous Behaviors",
"Humorous Video",
"Longer Term Context Setup",
"Multimodal Features",
"Multimodal Humor Analysis",
"Punchline",
"Speech Content",
"Textual Features",
"Visual Analytical System",
"Visual Analytics",
"Interviews",
"Public Speaking",
"Speech",
"Semantics",
"Phonetics",
"Feature Extraction",
"Visual Analytics",
"Humor",
"Context",
"Multimodal Features",
"Visualization"
],
"authors": [
{
"givenName": "Xingbo",
"surname": "Wang",
"fullName": "Xingbo Wang",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yao",
"surname": "Ming",
"fullName": "Yao Ming",
"affiliation": "Bloomberg LP, New York, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tongshuang",
"surname": "Wu",
"fullName": "Tongshuang Wu",
"affiliation": "University of Washington, Seattle, WA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haipeng",
"surname": "Zeng",
"fullName": "Haipeng Zeng",
"affiliation": "Sun Yat-sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yong",
"surname": "Wang",
"fullName": "Yong Wang",
"affiliation": "Singapore Management University, Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "4609-4623",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/passat-socialcom/2012/5638/0/06406311",
"title": "Characterizing the Internet's Sense of Humor",
"doi": null,
"abstractUrl": "/proceedings-article/passat-socialcom/2012/06406311/12OmNAndida",
"parentPublication": {
"id": "proceedings/passat-socialcom/2012/5638/0",
"title": "2012 International Conference on Privacy, Security, Risk and Trust (PASSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851e603",
"title": "We are Humor Beings: Understanding and Predicting Visual Humor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851e603/12OmNBCqbF5",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a607",
"title": "Analyzing the Impact of Gender on the Automation of Feedback for Public Speaking",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a607/12OmNBt3qna",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2019/7789/0/08679261",
"title": "SpeechLens: A Visual Analytics Approach for Exploring Speech Strategies with Textural and Acoustic Features",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2019/08679261/18XknOkGKbK",
"parentPublication": {
"id": "proceedings/bigcomp/2019/7789/0",
"title": "2019 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09761750",
"title": "GestureLens: Visual Analysis of Gestures in Presentation Videos",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09761750/1CKMkYw5fa0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/10/09829321",
"title": "Influence Maximization With Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2022/10/09829321/1EYxoEPe9eU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/06/10081322",
"title": "How Does Attention Work in Vision Transformers? A Visual Analytics Attempt",
"doi": null,
"abstractUrl": "/journal/tg/2023/06/10081322/1LRbRtJhrG0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09420254",
"title": "Visual Analytics for RNN-Based Deep Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09420254/1tdUMGe1DAk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700a576",
"title": "Multimodal Humor Dataset: Predicting Laughter tracks for Sitcoms",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700a576/1uqGJRGn9OU",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555490",
"title": "E-ffective: A Visual Analytic System for Exploring the Emotion and Effectiveness of Inspirational Speeches",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555490/1xjR3LSQrLi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09483619",
"articleId": "1vcJrTJdq3m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09490310",
"articleId": "1vmGThNh9jq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HMOjDHNzmo",
"name": "ttg202212-09488285s1-supp1-3097709.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202212-09488285s1-supp1-3097709.mp4",
"extension": "mp4",
"size": "34.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1HMOjpefW4o",
"name": "ttg202212-09488285s1-supp2-3097709.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202212-09488285s1-supp2-3097709.pdf",
"extension": "pdf",
"size": "101 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCaLEju",
"title": "Jan.",
"year": "2018",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyY28YC",
"doi": "10.1109/TVCG.2017.2745878",
"abstract": "Visualizations are nowadays appearing in popular media and are used everyday in the workplace. This democratisation of visualization challenges educators to develop effective learning strategies, in order to train the next generation of creative visualization specialists. There is high demand for skilled individuals who can analyse a problem, consider alternative designs, develop new visualizations, and be creative and innovative. Our three-stage framework, leads the learner through a series of tasks, each designed to develop different skills necessary for coming up with creative, innovative, effective, and purposeful visualizations. For that, we get the learners to create an explanatory visualization of an algorithm of their choice. By making an algorithm choice, and by following an active-learning and project-based strategy, the learners take ownership of a particular visualization challenge. They become enthusiastic to develop good results and learn different creative skills on their learning journey.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualizations are nowadays appearing in popular media and are used everyday in the workplace. This democratisation of visualization challenges educators to develop effective learning strategies, in order to train the next generation of creative visualization specialists. There is high demand for skilled individuals who can analyse a problem, consider alternative designs, develop new visualizations, and be creative and innovative. Our three-stage framework, leads the learner through a series of tasks, each designed to develop different skills necessary for coming up with creative, innovative, effective, and purposeful visualizations. For that, we get the learners to create an explanatory visualization of an algorithm of their choice. By making an algorithm choice, and by following an active-learning and project-based strategy, the learners take ownership of a particular visualization challenge. They become enthusiastic to develop good results and learn different creative skills on their learning journey.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualizations are nowadays appearing in popular media and are used everyday in the workplace. This democratisation of visualization challenges educators to develop effective learning strategies, in order to train the next generation of creative visualization specialists. There is high demand for skilled individuals who can analyse a problem, consider alternative designs, develop new visualizations, and be creative and innovative. Our three-stage framework, leads the learner through a series of tasks, each designed to develop different skills necessary for coming up with creative, innovative, effective, and purposeful visualizations. For that, we get the learners to create an explanatory visualization of an algorithm of their choice. By making an algorithm choice, and by following an active-learning and project-based strategy, the learners take ownership of a particular visualization challenge. They become enthusiastic to develop good results and learn different creative skills on their learning journey.",
"title": "The Explanatory Visualization Framework: An Active Learning Framework for Teaching Creative Computing Using Explanatory Visualizations",
"normalizedTitle": "The Explanatory Visualization Framework: An Active Learning Framework for Teaching Creative Computing Using Explanatory Visualizations",
"fno": "08017594",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Education",
"Visualization",
"Algorithm Design And Analysis",
"Creativity",
"Computational Modeling",
"Explanatory Visualization",
"Information Visualization",
"Teaching Visualization",
"Learning Support"
],
"authors": [
{
"givenName": "Jonathan C.",
"surname": "Roberts",
"fullName": "Jonathan C. Roberts",
"affiliation": "Bangor University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Panagiotis D.",
"surname": "Ritsos",
"fullName": "Panagiotis D. Ritsos",
"affiliation": "Bangor University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "James R.",
"surname": "Jackson",
"fullName": "James R. Jackson",
"affiliation": "Bangor University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christopher",
"surname": "Headleand",
"fullName": "Christopher Headleand",
"affiliation": "University of Lincoln",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2018-01-01 00:00:00",
"pubType": "trans",
"pages": "791-801",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2011/4346/0/4346a171",
"title": "Constructivist and Constructionist Approaches to Constructing Algorithm Visualizations: A Proposal",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2011/4346a171/12OmNA14A3v",
"parentPublication": {
"id": "proceedings/icalt/2011/4346/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/la-web/2009/3856/0/3856a092",
"title": "Promoting Creative Design through Toolkits",
"doi": null,
"abstractUrl": "/proceedings-article/la-web/2009/3856a092/12OmNB8TUbP",
"parentPublication": {
"id": "proceedings/la-web/2009/3856/0",
"title": "Web Congress, Latin American",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2012/4702/0/4702a549",
"title": "The Effects of Cognitive-Affective Interaction Strategy on Novices' Creative Performance in Game Design Project",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a549/12OmNzIUfL7",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2015/8454/0/07344263",
"title": "Vamonos: Embeddable visualizations of advanced algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344263/12OmNzmLxDO",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07185456",
"title": "Visualization-by-Sketching: An Artist's Interface for Creating Multivariate Time-Varying Data Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07185456/13rRUIIVlcO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/06/07015617",
"title": "An Efficient Framework for Generating Storyline Visualizations from Streaming Data",
"doi": null,
"abstractUrl": "/journal/tg/2015/06/07015617/13rRUwwJWFO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2017/01/mmu2017010048",
"title": "Open Symphony: Creative Participation for Audiences of Live Music Performances",
"doi": null,
"abstractUrl": "/magazine/mu/2017/01/mmu2017010048/13rRUxBa5kC",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017606",
"title": "Active Reading of Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017606/13rRUyYSWl5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440830",
"title": "A Framework for Creative Visualization-Opportunities Workshops",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440830/17D45WIXbOg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09615008",
"title": "Explaining With Examples: Lessons Learned From Crowdsourced Introductory Description of Information Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09615008/1yyho082gEw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08017611",
"articleId": "13rRUxAAT7J",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08017579",
"articleId": "13rRUB7a116",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesUj",
"name": "ttg201801-08017594s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201801-08017594s1.zip",
"extension": "zip",
"size": "2.14 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgs2BW",
"doi": "10.1109/TVCG.2013.153",
"abstract": "To verify cluster separation in high-dimensional data, analysts often reduce the data with a dimension reduction (DR) technique, and then visualize it with 2D Scatterplots, interactive 3D Scatterplots, or Scatterplot Matrices (SPLOMs). With the goal of providing guidance between these visual encoding choices, we conducted an empirical data study in which two human coders manually inspected a broad set of 816 scatterplots derived from 75 datasets, 4 DR techniques, and the 3 previously mentioned scatterplot techniques. Each coder scored all color-coded classes in each scatterplot in terms of their separability from other classes. We analyze the resulting quantitative data with a heatmap approach, and qualitatively discuss interesting scatterplot examples. Our findings reveal that 2D scatterplots are often 'good enough', that is, neither SPLOM nor interactive 3D adds notably more cluster separability with the chosen DR technique. If 2D is not good enough, the most promising approach is to use an alternative DR technique in 2D. Beyond that, SPLOM occasionally adds additional value, and interactive 3D rarely helps but often hurts in terms of poorer class separation and usability. We summarize these results as a workflow model and implications for design. Our results offer guidance to analysts during the DR exploration process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To verify cluster separation in high-dimensional data, analysts often reduce the data with a dimension reduction (DR) technique, and then visualize it with 2D Scatterplots, interactive 3D Scatterplots, or Scatterplot Matrices (SPLOMs). With the goal of providing guidance between these visual encoding choices, we conducted an empirical data study in which two human coders manually inspected a broad set of 816 scatterplots derived from 75 datasets, 4 DR techniques, and the 3 previously mentioned scatterplot techniques. Each coder scored all color-coded classes in each scatterplot in terms of their separability from other classes. We analyze the resulting quantitative data with a heatmap approach, and qualitatively discuss interesting scatterplot examples. Our findings reveal that 2D scatterplots are often 'good enough', that is, neither SPLOM nor interactive 3D adds notably more cluster separability with the chosen DR technique. If 2D is not good enough, the most promising approach is to use an alternative DR technique in 2D. Beyond that, SPLOM occasionally adds additional value, and interactive 3D rarely helps but often hurts in terms of poorer class separation and usability. We summarize these results as a workflow model and implications for design. Our results offer guidance to analysts during the DR exploration process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To verify cluster separation in high-dimensional data, analysts often reduce the data with a dimension reduction (DR) technique, and then visualize it with 2D Scatterplots, interactive 3D Scatterplots, or Scatterplot Matrices (SPLOMs). With the goal of providing guidance between these visual encoding choices, we conducted an empirical data study in which two human coders manually inspected a broad set of 816 scatterplots derived from 75 datasets, 4 DR techniques, and the 3 previously mentioned scatterplot techniques. Each coder scored all color-coded classes in each scatterplot in terms of their separability from other classes. We analyze the resulting quantitative data with a heatmap approach, and qualitatively discuss interesting scatterplot examples. Our findings reveal that 2D scatterplots are often 'good enough', that is, neither SPLOM nor interactive 3D adds notably more cluster separability with the chosen DR technique. If 2D is not good enough, the most promising approach is to use an alternative DR technique in 2D. Beyond that, SPLOM occasionally adds additional value, and interactive 3D rarely helps but often hurts in terms of poorer class separation and usability. We summarize these results as a workflow model and implications for design. Our results offer guidance to analysts during the DR exploration process.",
"title": "Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices",
"normalizedTitle": "Empirical Guidance on Scatterplot and Dimension Reduction Technique Choices",
"fno": "ttg2013122634",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Encoding",
"Principal Component Analysis",
"Data Visualization",
"Data Analysis",
"Scatterplots",
"Three Dimensional Displays",
"Encoding",
"Principal Component Analysis",
"Data Visualization",
"Data Analysis",
"Quantitative Study",
"Dimensionality Reduction"
],
"authors": [
{
"givenName": "Michael",
"surname": "Sedlmair",
"fullName": "Michael Sedlmair",
"affiliation": "Univ. of Vienna, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tamara",
"surname": "Munzner",
"fullName": "Tamara Munzner",
"affiliation": "Univ. of British Columbia, Vancouver, BC, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Melanie",
"surname": "Tory",
"fullName": "Melanie Tory",
"affiliation": "Univ. of Victoria, Victoria, BC, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2634-2643",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042500",
"title": "A multidimensional brush for scatterplot data analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042500/12OmNxR5UJf",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/11/ttg2012111969",
"title": "3D Scatterplot Navigation",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111969/13rRUB6Sq0y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192671",
"title": "InterAxis: Steering Scatterplot Axes via Observation-Level Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192671/13rRUILLkDT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08022891",
"title": "Priming and Anchoring Effects in Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08022891/13rRUwbaqLz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101768",
"title": "The Generalized Sensitivity Scatterplot",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101768/13rRUwbs2gs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/06/mcg2010060003",
"title": "Improved Scatterplot Design",
"doi": null,
"abstractUrl": "/magazine/cg/2010/06/mcg2010060003/13rRUwjoNCc",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/09/07332976",
"title": "The Connected Scatterplot for Presenting Paired Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2016/09/07332976/13rRUyY294F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09826389",
"title": "Automatic Scatterplot Design Optimization for Clustering Identification",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09826389/1EVdDTX0i2I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09925049",
"title": "Dual Space Coupling Model Guided Overlap-Free Scatterplot",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09925049/1HBHYSHqD3a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08794768",
"title": "Evaluating Perceptual Bias During Geometric Scaling of Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08794768/1cr2ZlCC2xG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122625",
"articleId": "13rRUx0xPi9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122644",
"articleId": "13rRUEgarBt",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgLX",
"name": "ttg2013122634s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122634s.mov",
"extension": "mov",
"size": "17.7 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgLY",
"name": "ttg2013122634s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122634s.pdf",
"extension": "pdf",
"size": "4.14 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H0GkV5P1qo",
"doi": "10.1109/TVCG.2022.3209423",
"abstract": "We propose a contrastive dimensionality reduction approach (CDR) for interactive visual cluster analysis. Although dimensionality reduction of high-dimensional data is widely used in visual cluster analysis in conjunction with scatterplots, there are several limitations on effective visual cluster analysis. First, it is non-trivial for an embedding to present clear visual cluster separation when keeping neighborhood structures. Second, as cluster analysis is a subjective task, user steering is required. However, it is also non-trivial to enable interactions in dimensionality reduction. To tackle these problems, we introduce contrastive learning into dimensionality reduction for high-quality embedding. We then redefine the gradient of the loss function to the negative pairs to enhance the visual cluster separation of embedding results. Based on the contrastive learning scheme, we employ link-based interactions to steer embeddings. After that, we implement a prototype visual interface that integrates the proposed algorithms and a set of visualizations. Quantitative experiments demonstrate that CDR outperforms existing techniques in terms of preserving correct neighborhood structures and improving visual cluster separation. The ablation experiment demonstrates the effectiveness of gradient redefinition. The user study verifies that CDR outperforms t-SNE and UMAP in the task of cluster identification. We also showcase two use cases on real-world datasets to present the effectiveness of link-based interactions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a contrastive dimensionality reduction approach (CDR) for interactive visual cluster analysis. Although dimensionality reduction of high-dimensional data is widely used in visual cluster analysis in conjunction with scatterplots, there are several limitations on effective visual cluster analysis. First, it is non-trivial for an embedding to present clear visual cluster separation when keeping neighborhood structures. Second, as cluster analysis is a subjective task, user steering is required. However, it is also non-trivial to enable interactions in dimensionality reduction. To tackle these problems, we introduce contrastive learning into dimensionality reduction for high-quality embedding. We then redefine the gradient of the loss function to the negative pairs to enhance the visual cluster separation of embedding results. Based on the contrastive learning scheme, we employ link-based interactions to steer embeddings. After that, we implement a prototype visual interface that integrates the proposed algorithms and a set of visualizations. Quantitative experiments demonstrate that CDR outperforms existing techniques in terms of preserving correct neighborhood structures and improving visual cluster separation. The ablation experiment demonstrates the effectiveness of gradient redefinition. The user study verifies that CDR outperforms t-SNE and UMAP in the task of cluster identification. We also showcase two use cases on real-world datasets to present the effectiveness of link-based interactions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a contrastive dimensionality reduction approach (CDR) for interactive visual cluster analysis. Although dimensionality reduction of high-dimensional data is widely used in visual cluster analysis in conjunction with scatterplots, there are several limitations on effective visual cluster analysis. First, it is non-trivial for an embedding to present clear visual cluster separation when keeping neighborhood structures. Second, as cluster analysis is a subjective task, user steering is required. However, it is also non-trivial to enable interactions in dimensionality reduction. To tackle these problems, we introduce contrastive learning into dimensionality reduction for high-quality embedding. We then redefine the gradient of the loss function to the negative pairs to enhance the visual cluster separation of embedding results. Based on the contrastive learning scheme, we employ link-based interactions to steer embeddings. After that, we implement a prototype visual interface that integrates the proposed algorithms and a set of visualizations. Quantitative experiments demonstrate that CDR outperforms existing techniques in terms of preserving correct neighborhood structures and improving visual cluster separation. The ablation experiment demonstrates the effectiveness of gradient redefinition. The user study verifies that CDR outperforms t-SNE and UMAP in the task of cluster identification. We also showcase two use cases on real-world datasets to present the effectiveness of link-based interactions.",
"title": "Interactive Visual Cluster Analysis by Contrastive Dimensionality Reduction",
"normalizedTitle": "Interactive Visual Cluster Analysis by Contrastive Dimensionality Reduction",
"fno": "09904480",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Preparation",
"Data Visualisation",
"Learning Artificial Intelligence",
"Pattern Clustering",
"User Interfaces",
"CDR",
"Cluster Identification",
"Contrastive Dimensionality Reduction Approach",
"Contrastive Learning Scheme",
"Correct Neighborhood Structures",
"Gradient Redefinition",
"High Dimensional Data",
"High Quality Embedding",
"Interactive Visual Cluster Analysis",
"Link Based Interactions",
"Loss Function",
"Prototype Visual Interface",
"Scatterplots",
"Visual Cluster Separation",
"Visualization",
"Dimensionality Reduction",
"Task Analysis",
"Data Models",
"Space Heating",
"Clustering Algorithms",
"Prototypes",
"Dimensionality Reduction",
"Visual Cluster Analysis",
"Contrastive Learning"
],
"authors": [
{
"givenName": "Jiazhi",
"surname": "Xia",
"fullName": "Jiazhi Xia",
"affiliation": "School of Computer Science and Engineering, Central South University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Linquan",
"surname": "Huang",
"fullName": "Linquan Huang",
"affiliation": "School of Computer Science and Engineering, Central South University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weixing",
"surname": "Lin",
"fullName": "Weixing Lin",
"affiliation": "School of Computer Science and Engineering, Central South University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xin",
"surname": "Zhao",
"fullName": "Xin Zhao",
"affiliation": "School of Computer Science and Engineering, Central South University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jing",
"surname": "Wu",
"fullName": "Jing Wu",
"affiliation": "Cardiff University, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yang",
"surname": "Chen",
"fullName": "Yang Chen",
"affiliation": "School of Computer Science and Engineering, Central South University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ying",
"surname": "Zhao",
"fullName": "Ying Zhao",
"affiliation": "School of Computer Science and Engineering, Central South University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "734-744",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dicta/2010/4271/0/4271a349",
"title": "Robust Dimensionality Reduction for Human Action Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2010/4271a349/12OmNxwWoRF",
"parentPublication": {
"id": "proceedings/dicta/2010/4271/0",
"title": "2010 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2011/4596/0/4596a865",
"title": "Transferable Discriminative Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2011/4596a865/12OmNy3iFuF",
"parentPublication": {
"id": "proceedings/ictai/2011/4596/0",
"title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009060993",
"title": "Interactive Dimensionality Reduction Through User-defined Combinations of Quality Metrics",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009060993/13rRUEgs2tm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/06/ttp2011061147",
"title": "Multiple Kernel Learning for Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2011/06/ttp2011061147/13rRUwgQprO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2000/06/i0623",
"title": "Fractional-Step Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2000/06/i0623/13rRUxNmPET",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805461",
"title": "Supporting Analysis of Dimensionality Reduction Results with Contrastive Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805461/1cG4ulCK5S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2019/5686/0/568600a577",
"title": "Autoencoder Based Dimensionality Reduction of Feature Vectors for Object Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2019/568600a577/1j9xB188lAk",
"parentPublication": {
"id": "proceedings/sitis/2019/5686/0",
"title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/06/09428524",
"title": "EvoSets: Tracking the Sensitivity of Dimensionality Reduction Results Across Subspaces",
"doi": null,
"abstractUrl": "/journal/bd/2022/06/09428524/1twasK9vXtS",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555244",
"title": "Interactive Dimensionality Reduction for Comparative Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555244/1xjR1QZtkTS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a026",
"title": "Semantic Explanation of Interactive Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a026/1yXuftZECbe",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09903342",
"articleId": "1GZolJg0E7e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904619",
"articleId": "1H1ggvuBvMc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9ylZFE0JG",
"name": "ttg202301-09904480s1-supp2-3209423.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09904480s1-supp2-3209423.pdf",
"extension": "pdf",
"size": "30.3 MB",
"__typename": "WebExtraType"
},
{
"id": "1J9ymeFPvZ6",
"name": "ttg202301-09904480s1-supp1-3209423.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09904480s1-supp1-3209423.mp4",
"extension": "mp4",
"size": "19.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cG4ulCK5S8",
"doi": "10.1109/TVCG.2019.2934251",
"abstract": "Dimensionality reduction (DR) is frequently used for analyzing and visualizing high-dimensional data as it provides a good first glance of the data. However, to interpret the DR result for gaining useful insights from the data, it would take additional analysis effort such as identifying clusters and understanding their characteristics. While there are many automatic methods (e.g., density-based clustering methods) to identify clusters, effective methods for understanding a cluster's characteristics are still lacking. A cluster can be mostly characterized by its distribution of feature values. Reviewing the original feature values is not a straightforward task when the number of features is large. To address this challenge, we present a visual analytics method that effectively highlights the essential features of a cluster in a DR result. To extract the essential features, we introduce an enhanced usage of contrastive principal component analysis (cPCA). Our method, called ccPCA (contrasting clusters in PCA), can calculate each feature's relative contribution to the contrast between one cluster and other clusters. With ccPCA, we have created an interactive system including a scalable visualization of clusters' feature contributions. We demonstrate the effectiveness of our method and system with case studies using several publicly available datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dimensionality reduction (DR) is frequently used for analyzing and visualizing high-dimensional data as it provides a good first glance of the data. However, to interpret the DR result for gaining useful insights from the data, it would take additional analysis effort such as identifying clusters and understanding their characteristics. While there are many automatic methods (e.g., density-based clustering methods) to identify clusters, effective methods for understanding a cluster's characteristics are still lacking. A cluster can be mostly characterized by its distribution of feature values. Reviewing the original feature values is not a straightforward task when the number of features is large. To address this challenge, we present a visual analytics method that effectively highlights the essential features of a cluster in a DR result. To extract the essential features, we introduce an enhanced usage of contrastive principal component analysis (cPCA). Our method, called ccPCA (contrasting clusters in PCA), can calculate each feature's relative contribution to the contrast between one cluster and other clusters. With ccPCA, we have created an interactive system including a scalable visualization of clusters' feature contributions. We demonstrate the effectiveness of our method and system with case studies using several publicly available datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dimensionality reduction (DR) is frequently used for analyzing and visualizing high-dimensional data as it provides a good first glance of the data. However, to interpret the DR result for gaining useful insights from the data, it would take additional analysis effort such as identifying clusters and understanding their characteristics. While there are many automatic methods (e.g., density-based clustering methods) to identify clusters, effective methods for understanding a cluster's characteristics are still lacking. A cluster can be mostly characterized by its distribution of feature values. Reviewing the original feature values is not a straightforward task when the number of features is large. To address this challenge, we present a visual analytics method that effectively highlights the essential features of a cluster in a DR result. To extract the essential features, we introduce an enhanced usage of contrastive principal component analysis (cPCA). Our method, called ccPCA (contrasting clusters in PCA), can calculate each feature's relative contribution to the contrast between one cluster and other clusters. With ccPCA, we have created an interactive system including a scalable visualization of clusters' feature contributions. We demonstrate the effectiveness of our method and system with case studies using several publicly available datasets.",
"title": "Supporting Analysis of Dimensionality Reduction Results with Contrastive Learning",
"normalizedTitle": "Supporting Analysis of Dimensionality Reduction Results with Contrastive Learning",
"fno": "08805461",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Reduction",
"Data Visualisation",
"Feature Extraction",
"Learning Artificial Intelligence",
"Pattern Clustering",
"Principal Component Analysis",
"Feature Extraction",
"Scalable Visualization",
"Contrastive Principal Component Analysis",
"Visual Analytics Method",
"Density Based Clustering Methods",
"High Dimensional Data Visualization",
"Contrastive Learning",
"Dimensionality Reduction",
"Principal Component Analysis",
"Task Analysis",
"Data Visualization",
"Visual Analytics",
"Dimensionality Reduction",
"Feature Extraction",
"Dimensionality Reduction",
"Contrastive Learning",
"Principal Component Analysis",
"High Dimensional Data",
"Visual Analytics"
],
"authors": [
{
"givenName": "Takanori",
"surname": "Fujiwara",
"fullName": "Takanori Fujiwara",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oh-Hyun",
"surname": "Kwon",
"fullName": "Oh-Hyun Kwon",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "45-55",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ialp/2009/3904/0/3904a259",
"title": "Approaches of Dimensionality Reduction for Telugu Document Classification",
"doi": null,
"abstractUrl": "/proceedings-article/ialp/2009/3904a259/12OmNzayNcB",
"parentPublication": {
"id": "proceedings/ialp/2009/3904/0",
"title": "Asian Language Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2018/5488/0/08621556",
"title": "Comparison of dimensionality reduction methods for TCM symptom information",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2018/08621556/17D45Xbl4O3",
"parentPublication": {
"id": "proceedings/bibm/2018/5488/0",
"title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904480",
"title": "Interactive Visual Cluster Analysis by Contrastive Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904480/1H0GkV5P1qo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809834",
"title": "An Incremental Dimensionality Reduction Method for Visualizing Streaming Multidimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809834/1cHEiLzaKw8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a228",
"title": "User-guided Dimensionality Reduction Ensembles",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a228/1cMF9VUpFgA",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216630",
"title": "A Visual Analytics Framework for Reviewing Multivariate Time-Series Data with Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216630/1nJsMUFa6f6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a111",
"title": "DRUID<inf>JS</inf> — A JavaScript Library for Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a111/1qRNP6eEG52",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/03/09543512",
"title": "Unsupervised Dimensionality Reduction Based on Fusing Multiple Clustering Results",
"doi": null,
"abstractUrl": "/journal/tk/2023/03/09543512/1x4UGJ56Qpy",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552226",
"title": "Revisiting Dimensionality Reduction Techniques for Visual Cluster Analysis: An Empirical Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552226/1xicaXrIayI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555244",
"title": "Interactive Dimensionality Reduction for Comparative Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555244/1xjR1QZtkTS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08805421",
"articleId": "1cG4DbgmnNm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08807255",
"articleId": "1cG6piAXFwQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1BzPxbqLS6I",
"title": "April",
"year": "2022",
"issueNum": "04",
"idPrefix": "tk",
"pubType": "journal",
"volume": "34",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1kmkvJGgz96",
"doi": "10.1109/TKDE.2020.2999504",
"abstract": "Manifold learning-based methods, such as LLE, capture the geometry of the data based on the assumption that the local structure of a manifold is linear. However, these methods may extract an inaccurate local structure when the nonlinearity of the data is obvious. In this paper, we propose a novel dimensionality reduction method with the ability to characterize the locally nonlinear geometry of the data by multilocal linearity. Specifically, we first construct a local area for each data point. And based on the overlapping of local areas, each data point will belong to and be linearly reconstructed from several local areas. Next, the set of linear coefficients used to reconstruct the data point constitutes the multilocal linear pattern (MLLP) which is used to characterize the local geometry of the data. Geometrically, the MLLP of a data point represents the hyperplanes in different directions passing through the current point. And the locally nonlinear surface where the data point is located is approximated by these hyperplanes, which is more accurate to reflect the geometry of the data. Then, MLLP is preserved to the embedding data space, and the dimension-reduced data can be obtained by minimizing the reconstruction errors. Finally, experiment results on various datasets demonstrate the effectiveness of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Manifold learning-based methods, such as LLE, capture the geometry of the data based on the assumption that the local structure of a manifold is linear. However, these methods may extract an inaccurate local structure when the nonlinearity of the data is obvious. In this paper, we propose a novel dimensionality reduction method with the ability to characterize the locally nonlinear geometry of the data by multilocal linearity. Specifically, we first construct a local area for each data point. And based on the overlapping of local areas, each data point will belong to and be linearly reconstructed from several local areas. Next, the set of linear coefficients used to reconstruct the data point constitutes the multilocal linear pattern (MLLP) which is used to characterize the local geometry of the data. Geometrically, the MLLP of a data point represents the hyperplanes in different directions passing through the current point. And the locally nonlinear surface where the data point is located is approximated by these hyperplanes, which is more accurate to reflect the geometry of the data. Then, MLLP is preserved to the embedding data space, and the dimension-reduced data can be obtained by minimizing the reconstruction errors. Finally, experiment results on various datasets demonstrate the effectiveness of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Manifold learning-based methods, such as LLE, capture the geometry of the data based on the assumption that the local structure of a manifold is linear. However, these methods may extract an inaccurate local structure when the nonlinearity of the data is obvious. In this paper, we propose a novel dimensionality reduction method with the ability to characterize the locally nonlinear geometry of the data by multilocal linearity. Specifically, we first construct a local area for each data point. And based on the overlapping of local areas, each data point will belong to and be linearly reconstructed from several local areas. Next, the set of linear coefficients used to reconstruct the data point constitutes the multilocal linear pattern (MLLP) which is used to characterize the local geometry of the data. Geometrically, the MLLP of a data point represents the hyperplanes in different directions passing through the current point. And the locally nonlinear surface where the data point is located is approximated by these hyperplanes, which is more accurate to reflect the geometry of the data. Then, MLLP is preserved to the embedding data space, and the dimension-reduced data can be obtained by minimizing the reconstruction errors. Finally, experiment results on various datasets demonstrate the effectiveness of the proposed method.",
"title": "Dimensionality Reduction Based on Multilocal Linear Pattern Preservation",
"normalizedTitle": "Dimensionality Reduction Based on Multilocal Linear Pattern Preservation",
"fno": "09107495",
"hasPdf": true,
"idPrefix": "tk",
"keywords": [
"Data Preparation",
"Data Reduction",
"Learning Artificial Intelligence",
"Multilocal Linearity",
"Data Point",
"Linear Coefficients",
"Local Geometry",
"Locally Nonlinear Surface",
"Embedding Data Space",
"Dimension Reduced Data",
"Multilocal Linear Pattern Preservation",
"Manifold Learning Based Methods",
"Dimensionality Reduction Method",
"Locally Nonlinear Geometry",
"Hyperplane Representation",
"Geometry",
"Manifolds",
"Feature Extraction",
"Linearity",
"Principal Component Analysis",
"Dimension Reduction",
"Manifold Learning",
"Linear Coefficients",
"Nonlinear Geometry",
"Multilocal Linear Pattern"
],
"authors": [
{
"givenName": "Shujie",
"surname": "Zhang",
"fullName": "Shujie Zhang",
"affiliation": "School of Electronics and Communication Engineering, Sun Yat-Sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhengming",
"surname": "Ma",
"fullName": "Zhengming Ma",
"affiliation": "School of Electronics and Communication Engineering, Sun Yat-Sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guokai",
"surname": "Zhang",
"fullName": "Guokai Zhang",
"affiliation": "School of Electronics and Communication Engineering, Sun Yat-Sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Weichao",
"surname": "Gan",
"fullName": "Weichao Gan",
"affiliation": "School of Electronics and Communication Engineering, Sun Yat-Sen University, Guangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1696-1709",
"year": "2022",
"issn": "1041-4347",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2013/0015/0/06607550",
"title": "Nonlinear dimensionality reduction approaches applied to music and textural sounds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607550/12OmNrAdsvq",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761254",
"title": "Local Regularized Least-Square Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761254/12OmNviZlgj",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/07/07293680",
"title": "Nonlinear Dimensionality Reduction via Path-Based Isometric Mapping",
"doi": null,
"abstractUrl": "/journal/tp/2016/07/07293680/13rRUwfZC1K",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/04/v0459",
"title": "Robust Linear Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2004/04/v0459/13rRUxBJhFl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/01/04016549",
"title": "Graph Embedding and Extensions: A General Framework for Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2007/01/04016549/13rRUxEhFtN",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/01/08226989",
"title": "Probabilistic Dimensionality Reduction via Structure Learning",
"doi": null,
"abstractUrl": "/journal/tp/2019/01/08226989/17D45XDIXQx",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/11/08723614",
"title": "MOSES: A Streaming Algorithm for Linear Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2020/11/08723614/1aqKRzAZSow",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2017/1618/0/161800a126",
"title": "Local Linear Dimensionality Reduction Algorithm Based on Nonlinear Manifolds Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2017/161800a126/1dUn9oRfDAk",
"parentPublication": {
"id": "proceedings/icnisc/2017/1618/0",
"title": "2017 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/03/09543512",
"title": "Unsupervised Dimensionality Reduction Based on Fusing Multiple Clustering Results",
"doi": null,
"abstractUrl": "/journal/tk/2023/03/09543512/1x4UGJ56Qpy",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552226",
"title": "Revisiting Dimensionality Reduction Techniques for Visual Cluster Analysis: An Empirical Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552226/1xicaXrIayI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09110764",
"articleId": "1kuDg24PMm4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09099996",
"articleId": "1k93iJckauk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xjR1QZtkTS",
"doi": "10.1109/TVCG.2021.3114807",
"abstract": "Finding the similarities and differences between groups of datasets is a fundamental analysis task. For high-dimensional data, dimensionality reduction (DR) methods are often used to find the characteristics of each group. However, existing DR methods provide limited capability and flexibility for such comparative analysis as each method is designed only for a narrow analysis target, such as identifying factors that most differentiate groups. This paper presents an interactive DR framework where we integrate our new DR method, called ULCA (unified linear comparative analysis), with an interactive visual interface. ULCA unifies two DR schemes, discriminant analysis and contrastive learning, to support various comparative analysis tasks. To provide flexibility for comparative analysis, we develop an optimization algorithm that enables analysts to interactively refine ULCA results. Additionally, the interactive visualization interface facilitates interpretation and refinement of the ULCA results. We evaluate ULCA and the optimization algorithm to show their efficiency as well as present multiple case studies using real-world datasets to demonstrate the usefulness of this framework.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Finding the similarities and differences between groups of datasets is a fundamental analysis task. For high-dimensional data, dimensionality reduction (DR) methods are often used to find the characteristics of each group. However, existing DR methods provide limited capability and flexibility for such comparative analysis as each method is designed only for a narrow analysis target, such as identifying factors that most differentiate groups. This paper presents an interactive DR framework where we integrate our new DR method, called ULCA (unified linear comparative analysis), with an interactive visual interface. ULCA unifies two DR schemes, discriminant analysis and contrastive learning, to support various comparative analysis tasks. To provide flexibility for comparative analysis, we develop an optimization algorithm that enables analysts to interactively refine ULCA results. Additionally, the interactive visualization interface facilitates interpretation and refinement of the ULCA results. We evaluate ULCA and the optimization algorithm to show their efficiency as well as present multiple case studies using real-world datasets to demonstrate the usefulness of this framework.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Finding the similarities and differences between groups of datasets is a fundamental analysis task. For high-dimensional data, dimensionality reduction (DR) methods are often used to find the characteristics of each group. However, existing DR methods provide limited capability and flexibility for such comparative analysis as each method is designed only for a narrow analysis target, such as identifying factors that most differentiate groups. This paper presents an interactive DR framework where we integrate our new DR method, called ULCA (unified linear comparative analysis), with an interactive visual interface. ULCA unifies two DR schemes, discriminant analysis and contrastive learning, to support various comparative analysis tasks. To provide flexibility for comparative analysis, we develop an optimization algorithm that enables analysts to interactively refine ULCA results. Additionally, the interactive visualization interface facilitates interpretation and refinement of the ULCA results. We evaluate ULCA and the optimization algorithm to show their efficiency as well as present multiple case studies using real-world datasets to demonstrate the usefulness of this framework.",
"title": "Interactive Dimensionality Reduction for Comparative Analysis",
"normalizedTitle": "Interactive Dimensionality Reduction for Comparative Analysis",
"fno": "09555244",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Principal Component Analysis",
"Visualization",
"Optimization",
"Task Analysis",
"Dimensionality Reduction",
"Tools",
"Libraries",
"Dimensionality Reduction",
"Discriminant Analysis",
"Contrastive Learning",
"Comparative Analysis",
"Interpretability",
"Visual Analytics"
],
"authors": [
{
"givenName": "Takanori",
"surname": "Fujiwara",
"fullName": "Takanori Fujiwara",
"affiliation": "University of California, Davis, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xinhai",
"surname": "Wei",
"fullName": "Xinhai Wei",
"affiliation": "University of Waterloo, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Zhao",
"fullName": "Jian Zhao",
"affiliation": "University of Waterloo, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis, United States",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "758-768",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2018/3788/0/08545659",
"title": "Generalized Fisher Discriminant Analysis as A Dimensionality Reduction Technique",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545659/17D45WK5Apy",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546198",
"title": "Maximum Gradient Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546198/17D45XzbnLm",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904480",
"title": "Interactive Visual Cluster Analysis by Contrastive Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904480/1H0GkV5P1qo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805461",
"title": "Supporting Analysis of Dimensionality Reduction Results with Contrastive Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805461/1cG4ulCK5S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809834",
"title": "An Incremental Dimensionality Reduction Method for Visualizing Streaming Multidimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809834/1cHEiLzaKw8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a228",
"title": "User-guided Dimensionality Reduction Ensembles",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a228/1cMF9VUpFgA",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09216630",
"title": "A Visual Analytics Framework for Reviewing Multivariate Time-Series Data with Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09216630/1nJsMUFa6f6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a111",
"title": "DRUID<inf>JS</inf> — A JavaScript Library for Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a111/1qRNP6eEG52",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552226",
"title": "Revisiting Dimensionality Reduction Techniques for Visual Cluster Analysis: An Empirical Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552226/1xicaXrIayI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a026",
"title": "Semantic Explanation of Interactive Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a026/1yXuftZECbe",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09555620",
"articleId": "1xlvYtlxuKY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552237",
"articleId": "1xic2ZoShgI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zJiHpIFACA",
"name": "ttg202201-09555244s1-supp1-3114807.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555244s1-supp1-3114807.mp4",
"extension": "mp4",
"size": "65.3 MB",
"__typename": "WebExtraType"
},
{
"id": "1zJiH4K3yCY",
"name": "ttg202201-09555244s1-supp2-3114807.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09555244s1-supp2-3114807.pdf",
"extension": "pdf",
"size": "1.92 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNy3iFnV",
"title": "July/August",
"year": "2004",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "July/August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBJhFl",
"doi": "10.1109/TVCG.2004.17",
"abstract": "Abstract—We present a novel family of data-driven linear transformations, aimed at finding low-dimensional embeddings of multivariate data, in a way that optimally preserves the structure of the data. The well-studied PCA and Fisher's LDA are shown to be special members in this family of transformations, and we demonstrate how to generalize these two methods such as to enhance their performance. Furthermore, our technique is the only one, to the best of our knowledge, that reflects in the resulting embedding both the data coordinates and pairwise relationships between the data elements. Even more so, when information on the clustering (labeling) decomposition of the data is known, this information can also be integrated in the linear transformation, resulting in embeddings that clearly show the separation between the clusters, as well as their internal structure. All of this makes our technique very flexible and powerful, and lets us cope with kinds of data that other techniques fail to describe properly.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We present a novel family of data-driven linear transformations, aimed at finding low-dimensional embeddings of multivariate data, in a way that optimally preserves the structure of the data. The well-studied PCA and Fisher's LDA are shown to be special members in this family of transformations, and we demonstrate how to generalize these two methods such as to enhance their performance. Furthermore, our technique is the only one, to the best of our knowledge, that reflects in the resulting embedding both the data coordinates and pairwise relationships between the data elements. Even more so, when information on the clustering (labeling) decomposition of the data is known, this information can also be integrated in the linear transformation, resulting in embeddings that clearly show the separation between the clusters, as well as their internal structure. All of this makes our technique very flexible and powerful, and lets us cope with kinds of data that other techniques fail to describe properly.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We present a novel family of data-driven linear transformations, aimed at finding low-dimensional embeddings of multivariate data, in a way that optimally preserves the structure of the data. The well-studied PCA and Fisher's LDA are shown to be special members in this family of transformations, and we demonstrate how to generalize these two methods such as to enhance their performance. Furthermore, our technique is the only one, to the best of our knowledge, that reflects in the resulting embedding both the data coordinates and pairwise relationships between the data elements. Even more so, when information on the clustering (labeling) decomposition of the data is known, this information can also be integrated in the linear transformation, resulting in embeddings that clearly show the separation between the clusters, as well as their internal structure. All of this makes our technique very flexible and powerful, and lets us cope with kinds of data that other techniques fail to describe properly.",
"title": "Robust Linear Dimensionality Reduction",
"normalizedTitle": "Robust Linear Dimensionality Reduction",
"fno": "v0459",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Dimensionality Reduction",
"Visualization",
"Classification",
"Feature Extraction",
"Projection",
"Linear Transformation",
"Principal Component Analysis",
"Fishers Linear Discriminant Analysis"
],
"authors": [
{
"givenName": "Yehuda",
"surname": "Koren",
"fullName": "Yehuda Koren",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Liran",
"surname": "Carmel",
"fullName": "Liran Carmel",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2004-07-01 00:00:00",
"pubType": "trans",
"pages": "459-470",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/gcis/2009/3571/2/3571b509",
"title": "A New Method for Linear Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/gcis/2009/3571b509/12OmNqFrGrQ",
"parentPublication": {
"id": "proceedings/gcis/2009/3571/2",
"title": "2009 WRI Global Congress on Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2011/4596/0/4596a865",
"title": "Transferable Discriminative Dimensionality Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2011/4596a865/12OmNy3iFuF",
"parentPublication": {
"id": "proceedings/ictai/2011/4596/0",
"title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/20550016",
"title": "Visualization of Labeled Data Using Linear Transformations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/20550016/12OmNyQGRZi",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infvis/2003/8154/0/01249017",
"title": "Visualization of labeled data using linear transformations",
"doi": null,
"abstractUrl": "/proceedings-article/infvis/2003/01249017/12OmNyUWR03",
"parentPublication": {
"id": "proceedings/infvis/2003/8154/0",
"title": "IEEE Symposium on Information Visualization 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2009/3804/4/3804e275",
"title": "SVM-Induced Dimensionality Reduction and Classification",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804e275/12OmNzG4gwg",
"parentPublication": {
"id": "proceedings/icicta/2009/3804/4",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/02/ttp2011020338",
"title": "Learning Linear Discriminant Projections for Dimensionality Reduction of Image Descriptors",
"doi": null,
"abstractUrl": "/journal/tp/2011/02/ttp2011020338/13rRUwInvKC",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/03/ttp2008030535",
"title": "Dimensionality Reduction of Clustered Data Sets",
"doi": null,
"abstractUrl": "/journal/tp/2008/03/ttp2008030535/13rRUx0geqZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2019/04/08369159",
"title": "A New Formulation of Linear Discriminant Analysis for Robust Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tk/2019/04/08369159/13rRUxASuvN",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2011/09/ttp2011091776",
"title": "Maximal Linear Embedding for Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2011/09/ttp2011091776/13rRUxBrGi5",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2000/06/i0623",
"title": "Fractional-Step Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tp/2000/06/i0623/13rRUxNmPET",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0447",
"articleId": "13rRUxOve9B",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0471",
"articleId": "13rRUwInuWm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GZooOkjYzK",
"doi": "10.1109/TVCG.2022.3209463",
"abstract": "Projection and ranking are frequently used analysis techniques in multi-attribute data exploration. Both families of techniques help analysts with tasks such as identifying similarities between observations and determining ordered subgroups, and have shown good performances in multi-attribute data exploration. However, they often exhibit problems such as distorted projection layouts, obscure semantic interpretations, and non-intuitive effects produced by selecting a subset of (weighted) attributes. Moreover, few studies have attempted to combine projection and ranking into the same exploration space to complement each other's strengths and weaknesses. For this reason, we propose <italic>RankAxis</italic>, a visual analytics system that systematically combines projection and ranking to facilitate the mutual interpretation of these two techniques and jointly support multi-attribute data exploration. A real-world case study, expert feedback, and a user study demonstrate the efficacy of <italic>RankAxis</italic>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projection and ranking are frequently used analysis techniques in multi-attribute data exploration. Both families of techniques help analysts with tasks such as identifying similarities between observations and determining ordered subgroups, and have shown good performances in multi-attribute data exploration. However, they often exhibit problems such as distorted projection layouts, obscure semantic interpretations, and non-intuitive effects produced by selecting a subset of (weighted) attributes. Moreover, few studies have attempted to combine projection and ranking into the same exploration space to complement each other's strengths and weaknesses. For this reason, we propose <italic>RankAxis</italic>, a visual analytics system that systematically combines projection and ranking to facilitate the mutual interpretation of these two techniques and jointly support multi-attribute data exploration. A real-world case study, expert feedback, and a user study demonstrate the efficacy of <italic>RankAxis</italic>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projection and ranking are frequently used analysis techniques in multi-attribute data exploration. Both families of techniques help analysts with tasks such as identifying similarities between observations and determining ordered subgroups, and have shown good performances in multi-attribute data exploration. However, they often exhibit problems such as distorted projection layouts, obscure semantic interpretations, and non-intuitive effects produced by selecting a subset of (weighted) attributes. Moreover, few studies have attempted to combine projection and ranking into the same exploration space to complement each other's strengths and weaknesses. For this reason, we propose RankAxis, a visual analytics system that systematically combines projection and ranking to facilitate the mutual interpretation of these two techniques and jointly support multi-attribute data exploration. A real-world case study, expert feedback, and a user study demonstrate the efficacy of RankAxis.",
"title": "RankAxis: Towards a Systematic Combination of Projection and Ranking in Multi-Attribute Data Exploration",
"normalizedTitle": "RankAxis: Towards a Systematic Combination of Projection and Ranking in Multi-Attribute Data Exploration",
"fno": "09903343",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Multiattribute Data Exploration",
"Projection",
"Rank Axis",
"Ranking",
"Systematic Combination",
"Visual Analytics System",
"Semantics",
"Layout",
"Data Visualization",
"Space Exploration",
"Dimensionality Reduction",
"Visual Analytics",
"Task Analysis",
"Ranking",
"Projection",
"Multi Attribute Data Exploration"
],
"authors": [
{
"givenName": "Qiangqiang",
"surname": "Liu",
"fullName": "Qiangqiang Liu",
"affiliation": "ShanghaiTech, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yukun",
"surname": "Ren",
"fullName": "Yukun Ren",
"affiliation": "Corporate Development Group, Tencent, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhihua",
"surname": "Zhu",
"fullName": "Zhihua Zhu",
"affiliation": "Corporate Development Group, Tencent, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dai",
"surname": "Li",
"fullName": "Dai Li",
"affiliation": "Corporate Development Group, Tencent, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaojuan",
"surname": "Ma",
"fullName": "Xiaojuan Ma",
"affiliation": "The Hong Kong University of Science and Technology, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Quan",
"surname": "Li",
"fullName": "Quan Li",
"affiliation": "School of Information Science and Technology, ShanghaiTech University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "701-711",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2010/4109/0/4109a380",
"title": "Dimensionality Reduction for Distributed Vision Systems Using Random Projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a380/12OmNC8uRhA",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b071",
"title": "Attribute-Graph: A Graph Based Approach to Image Ranking",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b071/12OmNCxtyO4",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2014/4274/0/4274a175",
"title": "Two-Phase Attribute Ordering for Unsupervised Ranking of Multi-attribute Objects",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2014/4274a175/12OmNyTfg8F",
"parentPublication": {
"id": "proceedings/icdmw/2014/4274/0",
"title": "2014 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2006/12/k1600",
"title": "Multi-Output Regularized Feature Projection",
"doi": null,
"abstractUrl": "/journal/tk/2006/12/k1600/13rRUILtJzN",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019863",
"title": "Podium: Ranking Data Using Mixed-Initiative Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019863/13rRUwwaKtd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/05/ttg2011050584",
"title": "Automated Analytical Methods to Support Visual Exploration of High-Dimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2011/05/ttg2011050584/13rRUxZ0o1v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a130",
"title": "Volume Puzzle: visual analysis of segmented volume data with multivariate attributes",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a130/1J6h8CqzyKc",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a065",
"title": "FairFuse: Interactive Visual Support for Fair Consensus Ranking",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a065/1J6haP1jUt2",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10068257",
"title": "Interactive Subspace Cluster Analysis Guided by Semantic Attribute Associations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10068257/1LtR7CeyeHe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222353",
"title": "Implicit Multidimensional Projection of Local Subspaces",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222353/1nTqcxPMEIE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09906971",
"articleId": "1H5EWMQX9ZK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904437",
"articleId": "1H1gnemxdqE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9z94b8Ta8",
"name": "ttg202301-09903343s1-supp1-3209463.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09903343s1-supp1-3209463.mp4",
"extension": "mp4",
"size": "127 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HMOX2J2VMY",
"doi": "10.1109/TVCG.2022.3216919",
"abstract": "Exploring high-dimensional data is a common task in many scientific disciplines. To address this task, two-dimensional embeddings, such as tSNE and UMAP, are widely used. While these determine the 2D position of data items, effectively encoding the first two dimensions, suitable visual encodings can be employed to communicate higher-dimensional features. To investigate such encodings, we have evaluated two commonly used glyph types, namely flower glyphs and star glyphs. To evaluate their capabilities for communicating higher-dimensional features in two-dimensional embeddings, we ran a large set of crowd-sourced user studies using real-world data obtained from data.gov. During these studies, participants completed a broad set of relevant tasks derived from related research. This paper describes the evaluated glyph designs, details our tasks, and the quantitative study setup before discussing the results. Finally, we will present insights and provide guidance on the choice of glyph encodings when exploring high-dimensional data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Exploring high-dimensional data is a common task in many scientific disciplines. To address this task, two-dimensional embeddings, such as tSNE and UMAP, are widely used. While these determine the 2D position of data items, effectively encoding the first two dimensions, suitable visual encodings can be employed to communicate higher-dimensional features. To investigate such encodings, we have evaluated two commonly used glyph types, namely flower glyphs and star glyphs. To evaluate their capabilities for communicating higher-dimensional features in two-dimensional embeddings, we ran a large set of crowd-sourced user studies using real-world data obtained from data.gov. During these studies, participants completed a broad set of relevant tasks derived from related research. This paper describes the evaluated glyph designs, details our tasks, and the quantitative study setup before discussing the results. Finally, we will present insights and provide guidance on the choice of glyph encodings when exploring high-dimensional data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Exploring high-dimensional data is a common task in many scientific disciplines. To address this task, two-dimensional embeddings, such as tSNE and UMAP, are widely used. While these determine the 2D position of data items, effectively encoding the first two dimensions, suitable visual encodings can be employed to communicate higher-dimensional features. To investigate such encodings, we have evaluated two commonly used glyph types, namely flower glyphs and star glyphs. To evaluate their capabilities for communicating higher-dimensional features in two-dimensional embeddings, we ran a large set of crowd-sourced user studies using real-world data obtained from data.gov. During these studies, participants completed a broad set of relevant tasks derived from related research. This paper describes the evaluated glyph designs, details our tasks, and the quantitative study setup before discussing the results. Finally, we will present insights and provide guidance on the choice of glyph encodings when exploring high-dimensional data.",
"title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"normalizedTitle": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"fno": "09930144",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Stars",
"Visualization",
"Encoding",
"Data Visualization",
"Dimensionality Reduction",
"Image Color Analysis",
"Glyph Visualization",
"High Dimensional Data Visualization",
"Two Dimensional Embeddings"
],
"authors": [
{
"givenName": "Christian",
"surname": "van Onzenoodt",
"fullName": "Christian van Onzenoodt",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pere-Pau",
"surname": "Vázquez",
"fullName": "Pere-Pau Vázquez",
"affiliation": "ViRVIG Group, UPC Barcelona, Barcelona, Barcelona",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timo",
"surname": "Ropinski",
"fullName": "Timo Ropinski",
"affiliation": "Visual Computing Group, Ulm University, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-infovis/2005/2790/0/27900019",
"title": "Multivariate Glyphs for Multi-Object Clusters",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2005/27900019/12OmNxE2n28",
"parentPublication": {
"id": "proceedings/ieee-infovis/2005/2790/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/07/07445239",
"title": "A Systematic Review of Experimental Studies on Data Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2017/07/07445239/13rRUNvgz4m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875973",
"title": "The Influence of Contour on Similarity Perception of Star Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875973/13rRUwhHcQV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a058",
"title": "Visualizing Multidimensional Data in Treemaps with Adaptive Glyphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a058/17D45XeKgvR",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a157",
"title": "Evaluation of Effectiveness of Glyphs to Enhance ChronoView",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a157/1cMF9mvWMFO",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08967136",
"title": "Glyphboard: Visual Exploration of High-Dimensional Data Combining Glyphs with Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08967136/1gPjxXgWQM0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09067088",
"title": "AgentVis: Visual Analysis of Agent Behavior With Hierarchical Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09067088/1j1lyTz50k0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/05/09128033",
"title": "Interpretation of Structural Preservation in Low-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tk/2022/05/09128033/1l3u8JV5SP6",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552929",
"title": "Attribute-based Explanation of Non-Linear Embeddings of High-Dimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552929/1xic3zJwVwI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557223",
"title": "GlyphCreator: Towards Example-based Automatic Generation of Circular Glyphs",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557223/1xlvZajdjmo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09928218",
"articleId": "1HJuJYF342Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09930626",
"articleId": "1HMOYkaK9Ww",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HOtlkinUTC",
"name": "ttg555501-09930144s1-access-3216919-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09930144s1-access-3216919-mm.zip",
"extension": "zip",
"size": "27.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1CdACzpvTPi",
"title": "May",
"year": "2022",
"issueNum": "05",
"idPrefix": "tk",
"pubType": "journal",
"volume": "34",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1l3u8JV5SP6",
"doi": "10.1109/TKDE.2020.3005878",
"abstract": "Despite being commonly used in big-data analytics; the outcome of dimensionality reduction remains a black-box to most of its users. Understanding the quality of a low-dimensional embedding is important as not only it enables trust in the transformed data, but it can also help to select the most appropriate dimensionality reduction algorithm in a given scenario. As existing research primarily focuses on the visual exploration of embeddings, there is still a need for enhancing interpretability of such algorithms. To bridge this gap, we propose two novel interactive explanation techniques for low-dimensional embeddings obtained from <italic>any</italic> dimensionality reduction algorithm. The first technique LAPS produces a local approximation of the neighborhood structure to generate interpretable explanations on the preserved locality for a single instance. The second method GAPS explains the retained global structure of a high-dimensional dataset in its embedding, by combining non-redundant local-approximations from a coarse discretization of the projection space. We demonstrate the applicability of the proposed techniques using 16 real-life tabular, text, image, and audio datasets. Our extensive experimental evaluation shows the utility of the proposed techniques in interpreting the quality of low-dimensional embeddings, as well as with selecting the most suitable dimensionality reduction algorithm for any given dataset.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite being commonly used in big-data analytics; the outcome of dimensionality reduction remains a black-box to most of its users. Understanding the quality of a low-dimensional embedding is important as not only it enables trust in the transformed data, but it can also help to select the most appropriate dimensionality reduction algorithm in a given scenario. As existing research primarily focuses on the visual exploration of embeddings, there is still a need for enhancing interpretability of such algorithms. To bridge this gap, we propose two novel interactive explanation techniques for low-dimensional embeddings obtained from <italic>any</italic> dimensionality reduction algorithm. The first technique LAPS produces a local approximation of the neighborhood structure to generate interpretable explanations on the preserved locality for a single instance. The second method GAPS explains the retained global structure of a high-dimensional dataset in its embedding, by combining non-redundant local-approximations from a coarse discretization of the projection space. We demonstrate the applicability of the proposed techniques using 16 real-life tabular, text, image, and audio datasets. Our extensive experimental evaluation shows the utility of the proposed techniques in interpreting the quality of low-dimensional embeddings, as well as with selecting the most suitable dimensionality reduction algorithm for any given dataset.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite being commonly used in big-data analytics; the outcome of dimensionality reduction remains a black-box to most of its users. Understanding the quality of a low-dimensional embedding is important as not only it enables trust in the transformed data, but it can also help to select the most appropriate dimensionality reduction algorithm in a given scenario. As existing research primarily focuses on the visual exploration of embeddings, there is still a need for enhancing interpretability of such algorithms. To bridge this gap, we propose two novel interactive explanation techniques for low-dimensional embeddings obtained from any dimensionality reduction algorithm. The first technique LAPS produces a local approximation of the neighborhood structure to generate interpretable explanations on the preserved locality for a single instance. The second method GAPS explains the retained global structure of a high-dimensional dataset in its embedding, by combining non-redundant local-approximations from a coarse discretization of the projection space. We demonstrate the applicability of the proposed techniques using 16 real-life tabular, text, image, and audio datasets. Our extensive experimental evaluation shows the utility of the proposed techniques in interpreting the quality of low-dimensional embeddings, as well as with selecting the most suitable dimensionality reduction algorithm for any given dataset.",
"title": "Interpretation of Structural Preservation in Low-Dimensional Embeddings",
"normalizedTitle": "Interpretation of Structural Preservation in Low-Dimensional Embeddings",
"fno": "09128033",
"hasPdf": true,
"idPrefix": "tk",
"keywords": [
"Big Data",
"Data Analysis",
"Data Mining",
"Data Visualisation",
"Feature Extraction",
"Graph Theory",
"Learning Artificial Intelligence",
"Low Dimensional Embedding",
"Appropriate Dimensionality Reduction Algorithm",
"High Dimensional Dataset",
"Suitable Dimensionality Reduction Algorithm",
"Dimensionality Reduction",
"Visualization",
"Approximation Algorithms",
"Optimization",
"Manifolds",
"Bridges",
"Data Visualization",
"Interactive Data Exploration And Discovery",
"Algorithms For Data And Knowledge Management",
"Data And Knowledge Visualization"
],
"authors": [
{
"givenName": "Aindrila",
"surname": "Ghosh",
"fullName": "Aindrila Ghosh",
"affiliation": "Department of Electrical and Computer Engineering, University of Alberta, Edmonton, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mona",
"surname": "Nashaat",
"fullName": "Mona Nashaat",
"affiliation": "Department of Electrical and Computer Engineering, University of Alberta, Edmonton, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "James",
"surname": "Miller",
"fullName": "James Miller",
"affiliation": "Department of Electrical and Computer Engineering, University of Alberta, Edmonton, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shaikh",
"surname": "Quader",
"fullName": "Shaikh Quader",
"affiliation": "IBM Toronto Software Lab, Toronto, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "2227-2240",
"year": "2022",
"issn": "1041-4347",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2022/2335/0/233500a011",
"title": "Incorporating Texture Information into Dimensionality Reduction for High-Dimensional Images",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2022/233500a011/1E2wiOFBEbe",
"parentPublication": {
"id": "proceedings/pacificvis/2022/2335/0",
"title": "2022 IEEE 15th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930144",
"title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdcat/2022/6090/0/609000a259",
"title": "Focused Stochastic Neighbor Embedding for Better Preserving Points of Interest",
"doi": null,
"abstractUrl": "/proceedings-article/bdcat/2022/609000a259/1Lu4dcxRUNq",
"parentPublication": {
"id": "proceedings/bdcat/2022/6090/0",
"title": "2022 IEEE/ACM International Conference on Big Data Computing, Applications and Technologies (BDCAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09301222",
"title": "<italic>embComp</italic>: Visual Interactive Comparison of Vector Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09301222/1pK0Opgn59m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09349198",
"title": "DimLift: Interactive Hierarchical Data Exploration Through Dimensional Bundling",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09349198/1qYmbJluuBi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377769",
"title": "Towards Tabular Embeddings, Training the Relational Models",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377769/1s64KLHTXHi",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412900",
"title": "q-SNE: Visualizing Data using q-Gaussian Distributed Stochastic Neighbor Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412900/1tmhROYroSA",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/06/09428524",
"title": "EvoSets: Tracking the Sensitivity of Dimensionality Reduction Results Across Subspaces",
"doi": null,
"abstractUrl": "/journal/bd/2022/06/09428524/1twasK9vXtS",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/03/09543512",
"title": "Unsupervised Dimensionality Reduction Based on Fusing Multiple Clustering Results",
"doi": null,
"abstractUrl": "/journal/tk/2023/03/09543512/1x4UGJ56Qpy",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552929",
"title": "Attribute-based Explanation of Non-Linear Embeddings of High-Dimensional Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552929/1xic3zJwVwI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09123592",
"articleId": "1kTwFMTjDKE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09146768",
"articleId": "1lHjLgfR8OY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1CdAGaHiuBO",
"name": "ttk202205-09128033s1-supp1-3005878.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttk202205-09128033s1-supp1-3005878.pdf",
"extension": "pdf",
"size": "1.52 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1ECXHMu0OWc",
"title": "Aug.",
"year": "2022",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1pK0Opgn59m",
"doi": "10.1109/TVCG.2020.3045918",
"abstract": "This article introduces <italic>embComp</italic>, a novel approach for comparing two embeddings that capture the similarity between objects, such as word and document embeddings. We survey scenarios where comparing these embedding spaces is useful. From those scenarios, we derive common tasks, introduce visual analysis methods that support these tasks, and combine them into a comprehensive system. One of <italic>embComp</italic>’s central features are overview visualizations that are based on metrics for measuring differences in the local structure around objects. Summarizing these local metrics over the embeddings provides global overviews of similarities and differences. Detail views allow comparison of the local structure around selected objects and relating this local information to the global views. Integrating and connecting all of these components, <italic>embComp</italic> supports a range of analysis workflows that help understand similarities and differences between embedding spaces. We assess our approach by applying it in several use cases, including understanding corpora differences via word vector embeddings, and understanding algorithmic differences in generating embeddings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This article introduces <italic>embComp</italic>, a novel approach for comparing two embeddings that capture the similarity between objects, such as word and document embeddings. We survey scenarios where comparing these embedding spaces is useful. From those scenarios, we derive common tasks, introduce visual analysis methods that support these tasks, and combine them into a comprehensive system. One of <italic>embComp</italic>’s central features are overview visualizations that are based on metrics for measuring differences in the local structure around objects. Summarizing these local metrics over the embeddings provides global overviews of similarities and differences. Detail views allow comparison of the local structure around selected objects and relating this local information to the global views. Integrating and connecting all of these components, <italic>embComp</italic> supports a range of analysis workflows that help understand similarities and differences between embedding spaces. We assess our approach by applying it in several use cases, including understanding corpora differences via word vector embeddings, and understanding algorithmic differences in generating embeddings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This article introduces embComp, a novel approach for comparing two embeddings that capture the similarity between objects, such as word and document embeddings. We survey scenarios where comparing these embedding spaces is useful. From those scenarios, we derive common tasks, introduce visual analysis methods that support these tasks, and combine them into a comprehensive system. One of embComp’s central features are overview visualizations that are based on metrics for measuring differences in the local structure around objects. Summarizing these local metrics over the embeddings provides global overviews of similarities and differences. Detail views allow comparison of the local structure around selected objects and relating this local information to the global views. Integrating and connecting all of these components, embComp supports a range of analysis workflows that help understand similarities and differences between embedding spaces. We assess our approach by applying it in several use cases, including understanding corpora differences via word vector embeddings, and understanding algorithmic differences in generating embeddings.",
"title": "<italic>embComp</italic>: Visual Interactive Comparison of Vector Embeddings",
"normalizedTitle": "embComp: Visual Interactive Comparison of Vector Embeddings",
"fno": "09301222",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Interactive Systems",
"Text Analysis",
"Visual Interactive Comparison",
"Embedding Spaces",
"Visual Analysis",
"Emb Comp",
"Local Structure",
"Local Metrics",
"Local Information",
"Analysis Workflows",
"Corpora Differences",
"Word Vector Embeddings",
"Measurement",
"Visualization",
"Dimensionality Reduction",
"Task Analysis",
"Two Dimensional Displays",
"Stress",
"Object Recognition",
"Visual Analytics",
"Visual Comparison",
"Machine Learning",
"Vector Embeddings"
],
"authors": [
{
"givenName": "Florian",
"surname": "Heimerl",
"fullName": "Florian Heimerl",
"affiliation": "Department of Computer Science, University of Wisconsin-Madison (UW-Madison), Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christoph",
"surname": "Kralj",
"fullName": "Christoph Kralj",
"affiliation": "Faculty of Computer Sciences and Data Science, Uni Vienna Sensengasse 6, University of Vienna, Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Torsten",
"surname": "Möller",
"fullName": "Torsten Möller",
"affiliation": "Faculty of Computer Sciences and Data Science, Uni Vienna Sensengasse 6, University of Vienna, Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Gleicher",
"fullName": "Michael Gleicher",
"affiliation": "Department of Computer Science, University of Wisconsin-Madison (UW-Madison), Madison, WI, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "2953-2969",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2023/01/09903343",
"title": "RankAxis: Towards a Systematic Combination of Projection and Ranking in Multi-Attribute Data Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09903343/1GZooOkjYzK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600j306",
"title": "VGSE: Visually-Grounded Semantic Embeddings for Zero-Shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600j306/1H1j2mN3nuE",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h424",
"title": "ViCo: Word Embeddings From Visual Co-Occurrences",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h424/1hQqhuUL5i8",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/05/09128033",
"title": "Interpretation of Structural Preservation in Low-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tk/2022/05/09128033/1l3u8JV5SP6",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09264232",
"title": "VisExPreS: A Visual Interactive Toolkit for User-Driven Evaluations of Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09264232/1oSTZS811XW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis4dh/2020/9153/0/915300a007",
"title": "Bio-inspired Structure Identification in Language Embeddings",
"doi": null,
"abstractUrl": "/proceedings-article/vis4dh/2020/915300a007/1pZ0Xs0EEqk",
"parentPublication": {
"id": "proceedings/vis4dh/2020/9153/0",
"title": "2020 IEEE 5th Workshop on Visualization for the Digital Humanities (VIS4DH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377769",
"title": "Towards Tabular Embeddings, Training the Relational Models",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377769/1s64KLHTXHi",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/06/09428524",
"title": "EvoSets: Tracking the Sensitivity of Dimensionality Reduction Results Across Subspaces",
"doi": null,
"abstractUrl": "/journal/bd/2022/06/09428524/1twasK9vXtS",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555244",
"title": "Interactive Dimensionality Reduction for Comparative Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555244/1xjR1QZtkTS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09645173",
"title": "<italic>GUCCI</italic> - Guided Cardiac Cohort Investigation of Blood Flow Data",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09645173/1zc6CvdsNMc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09314221",
"articleId": "1q8UaZOmTsc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09303392",
"articleId": "1pLFQxpKDIY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1HMOit1lSk8",
"title": "Dec.",
"year": "2022",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1ugE7gaINC8",
"doi": "10.1109/TVCG.2021.3087273",
"abstract": "Searching similarity between a pair of shapes or data is an important problem in data analysis and visualization. The problem of computing similarity measures using scalar topology has been studied extensively and proven useful in the shape and data matching. Even though multi-field or multivariate (consists of multiple scalar fields) topology reveals richer topological features, research on building tools for computing similarity measures using multi-field topology is still in its infancy. In the current article, we propose a novel similarity measure between two piecewise-linear multi-fields based on their multi-resolution Reeb spaces - a newly developed data-structure that captures the topology of a multi-field. Overall, our method consists of two steps: (i) building a multi-resolution Reeb space corresponding to each of the multi-fields and (ii) proposing a similarity measure between two multi-resolution Reeb spaces by computing a list of topologically consistent matching pairs (of nodes) and the similarity between them. We demonstrate the effectiveness of the proposed similarity measure in detecting topological features from real time-varying multi-field data in two application domains - one from computational physics and one from computational chemistry.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Searching similarity between a pair of shapes or data is an important problem in data analysis and visualization. The problem of computing similarity measures using scalar topology has been studied extensively and proven useful in the shape and data matching. Even though multi-field or multivariate (consists of multiple scalar fields) topology reveals richer topological features, research on building tools for computing similarity measures using multi-field topology is still in its infancy. In the current article, we propose a novel similarity measure between two piecewise-linear multi-fields based on their multi-resolution Reeb spaces - a newly developed data-structure that captures the topology of a multi-field. Overall, our method consists of two steps: (i) building a multi-resolution Reeb space corresponding to each of the multi-fields and (ii) proposing a similarity measure between two multi-resolution Reeb spaces by computing a list of topologically consistent matching pairs (of nodes) and the similarity between them. We demonstrate the effectiveness of the proposed similarity measure in detecting topological features from real time-varying multi-field data in two application domains - one from computational physics and one from computational chemistry.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Searching similarity between a pair of shapes or data is an important problem in data analysis and visualization. The problem of computing similarity measures using scalar topology has been studied extensively and proven useful in the shape and data matching. Even though multi-field or multivariate (consists of multiple scalar fields) topology reveals richer topological features, research on building tools for computing similarity measures using multi-field topology is still in its infancy. In the current article, we propose a novel similarity measure between two piecewise-linear multi-fields based on their multi-resolution Reeb spaces - a newly developed data-structure that captures the topology of a multi-field. Overall, our method consists of two steps: (i) building a multi-resolution Reeb space corresponding to each of the multi-fields and (ii) proposing a similarity measure between two multi-resolution Reeb spaces by computing a list of topologically consistent matching pairs (of nodes) and the similarity between them. We demonstrate the effectiveness of the proposed similarity measure in detecting topological features from real time-varying multi-field data in two application domains - one from computational physics and one from computational chemistry.",
"title": "A Topological Similarity Measure Between Multi-Resolution Reeb Spaces",
"normalizedTitle": "A Topological Similarity Measure Between Multi-Resolution Reeb Spaces",
"fno": "09448469",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Biology Computing",
"Computational Geometry",
"Computer Graphics",
"Data Analysis",
"Data Structures",
"Data Visualisation",
"Feature Extraction",
"Graph Theory",
"Image Representation",
"Solid Modelling",
"Topology",
"Data Analysis",
"Data Matching",
"Multifield Topology",
"Multiple Scalar Fields",
"Multiresolution Reeb Space",
"Newly Developed Data Structure",
"Piecewise Linear Multifields",
"Richer Topological Features",
"Scalar Topology",
"Searching Similarity",
"Time Varying Multifield Data",
"Topological Similarity Measure",
"Topologically Consistent Matching Pairs",
"Visualization",
"Topology",
"Measurement",
"Extraterrestrial Measurements",
"Current Measurement",
"Distortion Measurement",
"Particle Measurements",
"Multi Field",
"Reeb Space",
"Similarity Measure",
"Multi Resolution",
"Joint Contour Net"
],
"authors": [
{
"givenName": "Yashwanth",
"surname": "Ramamurthi",
"fullName": "Yashwanth Ramamurthi",
"affiliation": "International Institute of Information Technology (IIIT), Bangalore, Karnataka, India",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tripti",
"surname": "Agarwal",
"fullName": "Tripti Agarwal",
"affiliation": "SCI, University of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Amit",
"surname": "Chattopadhyay",
"fullName": "Amit Chattopadhyay",
"affiliation": "International Institute of Information Technology (IIIT), Bangalore, Karnataka, India",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "4360-4374",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdmw/2017/3800/0/3800a569",
"title": "A Novel Method for Fast and Accurate Similarity Measure in Time Series Field",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2017/3800a569/12OmNBSjJ44",
"parentPublication": {
"id": "proceedings/icdmw/2017/3800/0",
"title": "2017 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a067",
"title": "An Improvable Structure for Similarity Searching in Metric Spaces: Application on Image Databases",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a067/12OmNBvkdla",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2002/1862/0/18620465",
"title": "Topological Morphing Using Reeb Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2002/18620465/12OmNCwCLpE",
"parentPublication": {
"id": "proceedings/cw/2002/1862/0",
"title": "First International Symposium on Cyber Worlds, 2002. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2016/0662/0/0662a266",
"title": "Semantic Word Error Rate for Sentence Similarity",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a266/12OmNqzcvCl",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2012/9999/0/06395975",
"title": "A holistic Knowledge Management framework for Higher Education Institutions",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2012/06395975/12OmNvDI3RU",
"parentPublication": {
"id": "proceedings/icccnt/2012/9999/0",
"title": "2012 Third International Conference on Computing, Communication and Networking Technologies (ICCCNT 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdataservice/2016/2251/0/2251a142",
"title": "A Hybrid Geometric Approach for Measuring Similarity Level Among Documents and Document Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/bigdataservice/2016/2251a142/12OmNweBUID",
"parentPublication": {
"id": "proceedings/bigdataservice/2016/2251/0",
"title": "2016 IEEE Second International Conference on Big Data Computing Service and Applications (BigDataService)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isme/2010/7669/2/05573833",
"title": "Q-convergence Theory of Nets of Fuzzy Sets in Fuzzy Topological Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/isme/2010/05573833/12OmNzahcfJ",
"parentPublication": {
"id": "proceedings/isme/2010/7669/2",
"title": "2010 International Conference of Information Science and Management Engineering. ISME 2010",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800b054",
"title": "Multimapper: Data Density Sensitive Topological Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800b054/18rqu7pdOPm",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09891778",
"title": "A Learned Index for Exact Similarity Search in Metric Spaces",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09891778/1GF6i7X5DpK",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101566",
"title": "HowSim: A General and Effective Similarity Measure on Heterogeneous Information Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101566/1kaMMip5AKA",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09447222",
"articleId": "1ua0ob5v97O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09449973",
"articleId": "1uiiQsEsi6A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwGqBqg",
"title": "November/December",
"year": "2009",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "15",
"label": "November/December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwIF69f",
"doi": "10.1109/TVCG.2009.172",
"abstract": "The semi-transparent nature of direct volume rendered images is useful to depict layered structures in a volume. However, obtaining a semi-transparent result with the layers clearly revealed is difficult and may involve tedious adjustment on opacity and other rendering parameters. Furthermore, the visual quality of layers also depends on various perceptual factors. In this paper, we propose an auto-correction method for enhancing the perceived quality of the semi-transparent layers in direct volume rendered images. We introduce a suite of new measures based on psychological principles to evaluate the perceptual quality of transparent structures in the rendered images. By optimizing rendering parameters within an adaptive and intuitive user interaction process, the quality of the images is enhanced such that specific user requirements can be met. Experimental results on various datasets demonstrate the effectiveness and robustness of our method",
"abstracts": [
{
"abstractType": "Regular",
"content": "The semi-transparent nature of direct volume rendered images is useful to depict layered structures in a volume. However, obtaining a semi-transparent result with the layers clearly revealed is difficult and may involve tedious adjustment on opacity and other rendering parameters. Furthermore, the visual quality of layers also depends on various perceptual factors. In this paper, we propose an auto-correction method for enhancing the perceived quality of the semi-transparent layers in direct volume rendered images. We introduce a suite of new measures based on psychological principles to evaluate the perceptual quality of transparent structures in the rendered images. By optimizing rendering parameters within an adaptive and intuitive user interaction process, the quality of the images is enhanced such that specific user requirements can be met. Experimental results on various datasets demonstrate the effectiveness and robustness of our method",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The semi-transparent nature of direct volume rendered images is useful to depict layered structures in a volume. However, obtaining a semi-transparent result with the layers clearly revealed is difficult and may involve tedious adjustment on opacity and other rendering parameters. Furthermore, the visual quality of layers also depends on various perceptual factors. In this paper, we propose an auto-correction method for enhancing the perceived quality of the semi-transparent layers in direct volume rendered images. We introduce a suite of new measures based on psychological principles to evaluate the perceptual quality of transparent structures in the rendered images. By optimizing rendering parameters within an adaptive and intuitive user interaction process, the quality of the images is enhanced such that specific user requirements can be met. Experimental results on various datasets demonstrate the effectiveness and robustness of our method",
"title": "Perception-Based Transparency Optimization for Direct Volume Rendering",
"normalizedTitle": "Perception-Based Transparency Optimization for Direct Volume Rendering",
"fno": "ttg2009061283",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Direct Volume Rendering",
"Image Enhancement",
"Layer Perception"
],
"authors": [
{
"givenName": "Ming-Yuen",
"surname": "Chan",
"fullName": "Ming-Yuen Chan",
"affiliation": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wai-Ho",
"surname": "Mak",
"fullName": "Wai-Ho Mak",
"affiliation": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Chen",
"fullName": "Wei Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huamin",
"surname": "Qu",
"fullName": "Huamin Qu",
"affiliation": "Department of Computer Science and Engineering, The Hong Kong University of Science and Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2009-11-01 00:00:00",
"pubType": "trans",
"pages": "1283-1290",
"year": "2009",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a275",
"title": "Direct Volume Rendering and Clipping Technology of Radar Beams",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a275/12OmNApLGRp",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apvis/2007/0808/0/04126230",
"title": "Particle-based volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/apvis/2007/04126230/12OmNCeaPW4",
"parentPublication": {
"id": "proceedings/apvis/2007/0808/0",
"title": "Asia-Pacific Symposium on Visualisation 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870003",
"title": "Interval Set: A Volume Rendering Technique Generalizing Isosurface Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870003/12OmNs5rl20",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660038",
"title": "Scale-Invariant Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780039",
"title": "Two-Level Volume Rendering-Fusing MIP and DVR",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780039/12OmNxzMnWP",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2001/7200/0/7200kniss",
"title": "Interactive Volume Rendering Using Multi-Dimensional Transfer Functions and Direct Manipulation Widgets",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2001/7200kniss/12OmNz61dAW",
"parentPublication": {
"id": "proceedings/ieee-vis/2001/7200/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2002/03/v0270",
"title": "Multidimensional Transfer Functions for Interactive Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2002/03/v0270/13rRUB7a1fG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/06/v0673",
"title": "Visualization of Vector Fields Using Seed LIC and Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2004/06/v0673/13rRUIM2VBw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122144",
"title": "An Efficient Direct Volume Rendering Approach for Dichromats",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122144/13rRUNvgz9H",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061505",
"title": "Direct Interval Volume Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061505/13rRUxcsYLN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2009061275",
"articleId": "13rRUyY28Yp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2009061291",
"articleId": "13rRUwwaKt1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvSbBJO",
"title": "March",
"year": "2013",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvyw",
"doi": "10.1109/TVCG.2012.144",
"abstract": "Visualizing complex volume data usually renders selected parts of the volume semitransparently to see inner structures of the volume or provide a context. This presents a challenge for volume rendering methods to produce images with unambiguous depth-ordering perception. Existing methods use visual cues such as halos and shadows to enhance depth perception. Along with other limitations, these methods introduce redundant information and require additional overhead. This paper presents a new approach to enhancing depth-ordering perception of volume rendered images without using additional visual cues. We set up an energy function based on quantitative perception models to measure the quality of the images in terms of the effectiveness of depth-ordering and transparency perception as well as the faithfulness of the information revealed. Guided by the function, we use a conjugate gradient method to iteratively and judiciously enhance the results. Our method can complement existing systems for enhancing volume rendering results. The experimental results demonstrate the usefulness and effectiveness of our approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualizing complex volume data usually renders selected parts of the volume semitransparently to see inner structures of the volume or provide a context. This presents a challenge for volume rendering methods to produce images with unambiguous depth-ordering perception. Existing methods use visual cues such as halos and shadows to enhance depth perception. Along with other limitations, these methods introduce redundant information and require additional overhead. This paper presents a new approach to enhancing depth-ordering perception of volume rendered images without using additional visual cues. We set up an energy function based on quantitative perception models to measure the quality of the images in terms of the effectiveness of depth-ordering and transparency perception as well as the faithfulness of the information revealed. Guided by the function, we use a conjugate gradient method to iteratively and judiciously enhance the results. Our method can complement existing systems for enhancing volume rendering results. The experimental results demonstrate the usefulness and effectiveness of our approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualizing complex volume data usually renders selected parts of the volume semitransparently to see inner structures of the volume or provide a context. This presents a challenge for volume rendering methods to produce images with unambiguous depth-ordering perception. Existing methods use visual cues such as halos and shadows to enhance depth perception. Along with other limitations, these methods introduce redundant information and require additional overhead. This paper presents a new approach to enhancing depth-ordering perception of volume rendered images without using additional visual cues. We set up an energy function based on quantitative perception models to measure the quality of the images in terms of the effectiveness of depth-ordering and transparency perception as well as the faithfulness of the information revealed. Guided by the function, we use a conjugate gradient method to iteratively and judiciously enhance the results. Our method can complement existing systems for enhancing volume rendering results. The experimental results demonstrate the usefulness and effectiveness of our approach.",
"title": "Perceptually-Based Depth-Ordering Enhancement for Direct Volume Rendering",
"normalizedTitle": "Perceptually-Based Depth-Ordering Enhancement for Direct Volume Rendering",
"fno": "ttg2013030446",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Junctions",
"Image Color Analysis",
"Transfer Functions",
"Visualization",
"Optimization",
"Solid Modeling",
"Visualization",
"Volume Rendering",
"Depth Ordering",
"Depth Perception",
"Transparency"
],
"authors": [
{
"givenName": null,
"surname": "Lin Zheng",
"fullName": "Lin Zheng",
"affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Yingcai Wu",
"fullName": "Yingcai Wu",
"affiliation": "Microsoft Res. Asia, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Kwan-Liu Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "Dept. of Comput. Sci., Univ. of California, Davis, Davis, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2013-03-01 00:00:00",
"pubType": "trans",
"pages": "446-459",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2015/7568/0/7568a536",
"title": "Enhancing Visual Perception and Directing Viewer's Attention in Interactive Direct Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a536/12OmNAWpymA",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2013/5099/0/5099a123",
"title": "Explorable Volumetric Depth Images from Raycasting",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2013/5099a123/12OmNwBT1oL",
"parentPublication": {
"id": "proceedings/sibgrapi/2013/5099/0",
"title": "2013 XXVI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596131",
"title": "Evaluation of Depth of Field for depth perception in DVR",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596131/12OmNx76TOn",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/038P1A38",
"title": "A learning-based framework for depth ordering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/038P1A38/12OmNz5JBXb",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0253",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/06/v0695",
"title": "Image-Space Visibility Ordering for Cell Projection Volume Rendering of Unstructured Data",
"doi": null,
"abstractUrl": "/journal/tg/2004/06/v0695/13rRUyY294t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010067",
"title": "Lighting System for Visual Perception Enhancement in Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010067/13rRUyYSWsT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2013/02/mcs2013020041",
"title": "Effectively Visualizing the Spatial Structure of Cerebral Blood Vessels",
"doi": null,
"abstractUrl": "/magazine/cs/2013/02/mcs2013020041/13rRUyeTVlS",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08226853",
"title": "Multi-Material Volume Rendering with a Physically-Based Surface Reflection Model",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08226853/14H4WMQegms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552224",
"title": "Differentiable Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552224/1xibZvRmYzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013030433",
"articleId": "13rRUwbaqUO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013030460",
"articleId": "13rRUyv53Fq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAle6Qq",
"title": "July-September",
"year": "2001",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "7",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxbTMyH",
"doi": "10.1109/2945.942693",
"abstract": "Abstract—Accurately and automatically conveying the structure of a volume model is a problem not fully solved by existing volume rendering approaches. Physics-based volume rendering approaches create images which may match the appearance of translucent materials in nature, but may not embody important structural details. Transfer function approaches allow flexible design of the volume appearance, but generally require substantial hand tuning for each new data set in order to be effective. We introduce the volume illustration approach, combining the familiarity of a physics-based illumination model with the ability to enhance important features using nonphotorealistic rendering techniques. Since features to be enhanced are defined on the basis of local volume characteristics rather than volume sample value, the application of volume illustration techniques requires less manual tuning than the design of a good transfer function. Volume illustration provides a flexible unified framework for enhancing structural perception of volume models through the amplification of features and the addition of illumination effects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Accurately and automatically conveying the structure of a volume model is a problem not fully solved by existing volume rendering approaches. Physics-based volume rendering approaches create images which may match the appearance of translucent materials in nature, but may not embody important structural details. Transfer function approaches allow flexible design of the volume appearance, but generally require substantial hand tuning for each new data set in order to be effective. We introduce the volume illustration approach, combining the familiarity of a physics-based illumination model with the ability to enhance important features using nonphotorealistic rendering techniques. Since features to be enhanced are defined on the basis of local volume characteristics rather than volume sample value, the application of volume illustration techniques requires less manual tuning than the design of a good transfer function. Volume illustration provides a flexible unified framework for enhancing structural perception of volume models through the amplification of features and the addition of illumination effects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Accurately and automatically conveying the structure of a volume model is a problem not fully solved by existing volume rendering approaches. Physics-based volume rendering approaches create images which may match the appearance of translucent materials in nature, but may not embody important structural details. Transfer function approaches allow flexible design of the volume appearance, but generally require substantial hand tuning for each new data set in order to be effective. We introduce the volume illustration approach, combining the familiarity of a physics-based illumination model with the ability to enhance important features using nonphotorealistic rendering techniques. Since features to be enhanced are defined on the basis of local volume characteristics rather than volume sample value, the application of volume illustration techniques requires less manual tuning than the design of a good transfer function. Volume illustration provides a flexible unified framework for enhancing structural perception of volume models through the amplification of features and the addition of illumination effects.",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"normalizedTitle": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"fno": "v0253",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Rendering",
"Nonphotorealistic Rendering",
"Illustration",
"Lighting Models",
"Shading",
"Transfer Functions",
"Visualization"
],
"authors": [
{
"givenName": "Penny",
"surname": "Rheingans",
"fullName": "Penny Rheingans",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Ebert",
"fullName": "David Ebert",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2001-07-01 00:00:00",
"pubType": "trans",
"pages": "253-264",
"year": "2001",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2000/6478/0/64780037",
"title": "Volume Illustration: Non-Photorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780037/12OmNC0y5FO",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2002/7641/0/76410131",
"title": "Shading for Fourier Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410131/12OmNCwlajb",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660038",
"title": "Scale-Invariant Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498lu",
"title": "Non-Photorealistic Volume Rendering Using Stippling Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498lu/12OmNy9Prft",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2003/02/v0127",
"title": "Illustrative Interactive Stipple Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2003/02/v0127/13rRUIIVlcA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2003/04/mcg2003040044",
"title": "Nonphotorealistic Rendering of Medical Volume Data",
"doi": null,
"abstractUrl": "/magazine/cg/2003/04/mcg2003040044/13rRUwkfAT0",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/04/ttg2010040560",
"title": "Per-Pixel Opacity Modulation for Feature Enhancement in Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2010/04/ttg2010040560/13rRUxOdD8f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2003/03/v0298",
"title": "Interactive Clipping Techniques for Texture-Based Volume Visualization and Volume Shading",
"doi": null,
"abstractUrl": "/journal/tg/2003/03/v0298/13rRUxjyX3L",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/04/ttg2010040548",
"title": "Local Ambient Occlusion in Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2010/04/ttg2010040548/13rRUy0HYRk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2005/03/mcg2005030031",
"title": "Illustration Motifs for Effective Medical Volume Illustration",
"doi": null,
"abstractUrl": "/magazine/cg/2005/03/mcg2005030031/13rRUyft7x2",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0242",
"articleId": "13rRUxC0SOO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0265",
"articleId": "13rRUILLkvg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxvwoNX",
"title": "July/August",
"year": "2010",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "July/August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy0HYRk",
"doi": "10.1109/TVCG.2009.45",
"abstract": "This paper presents a novel technique to efficiently compute illumination for Direct Volume Rendering using a local approximation of ambient occlusion to integrate the intensity of incident light for each voxel. An advantage with this local approach is that fully shadowed regions are avoided, a desirable feature in many applications of volume rendering such as medical visualization. Additional transfer function interactions are also presented, for instance, to highlight specific structures with luminous tissue effects and create an improved context for semitransparent tissues with a separate absorption control for the illumination settings. Multiresolution volume management and GPU-based computation are used to accelerate the calculations and support large data sets. The scheme yields interactive frame rates with an adaptive sampling approach for incrementally refined illumination under arbitrary transfer function changes. The illumination effects can give a better understanding of the shape and density of tissues and so has the potential to increase the diagnostic value of medical volume rendering. Since the proposed method is gradient-free, it is especially beneficial at the borders of clip planes, where gradients are undefined, and for noisy data sets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a novel technique to efficiently compute illumination for Direct Volume Rendering using a local approximation of ambient occlusion to integrate the intensity of incident light for each voxel. An advantage with this local approach is that fully shadowed regions are avoided, a desirable feature in many applications of volume rendering such as medical visualization. Additional transfer function interactions are also presented, for instance, to highlight specific structures with luminous tissue effects and create an improved context for semitransparent tissues with a separate absorption control for the illumination settings. Multiresolution volume management and GPU-based computation are used to accelerate the calculations and support large data sets. The scheme yields interactive frame rates with an adaptive sampling approach for incrementally refined illumination under arbitrary transfer function changes. The illumination effects can give a better understanding of the shape and density of tissues and so has the potential to increase the diagnostic value of medical volume rendering. Since the proposed method is gradient-free, it is especially beneficial at the borders of clip planes, where gradients are undefined, and for noisy data sets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a novel technique to efficiently compute illumination for Direct Volume Rendering using a local approximation of ambient occlusion to integrate the intensity of incident light for each voxel. An advantage with this local approach is that fully shadowed regions are avoided, a desirable feature in many applications of volume rendering such as medical visualization. Additional transfer function interactions are also presented, for instance, to highlight specific structures with luminous tissue effects and create an improved context for semitransparent tissues with a separate absorption control for the illumination settings. Multiresolution volume management and GPU-based computation are used to accelerate the calculations and support large data sets. The scheme yields interactive frame rates with an adaptive sampling approach for incrementally refined illumination under arbitrary transfer function changes. The illumination effects can give a better understanding of the shape and density of tissues and so has the potential to increase the diagnostic value of medical volume rendering. Since the proposed method is gradient-free, it is especially beneficial at the borders of clip planes, where gradients are undefined, and for noisy data sets.",
"title": "Local Ambient Occlusion in Direct Volume Rendering",
"normalizedTitle": "Local Ambient Occlusion in Direct Volume Rendering",
"fno": "ttg2010040548",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Local Illumination",
"Volumetric Ambient Occlusion",
"Volume Rendering",
"Medical Visualization",
"Emissive Tissues",
"Shading",
"Shadowing"
],
"authors": [
{
"givenName": "Frida",
"surname": "Hernell",
"fullName": "Frida Hernell",
"affiliation": "Linköping University, Norrköping",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Patric",
"surname": "Ljung",
"fullName": "Patric Ljung",
"affiliation": "Siemens Corporate Research, Princeton",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anders",
"surname": "Ynnerman",
"fullName": "Anders Ynnerman",
"affiliation": "Linköping University, Norrköping",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2010-07-01 00:00:00",
"pubType": "trans",
"pages": "548-559",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2000/6478/0/64780037",
"title": "Volume Illustration: Non-Photorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780037/12OmNC0y5FO",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2002/7641/0/76410131",
"title": "Shading for Fourier Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2002/76410131/12OmNCwlajb",
"parentPublication": {
"id": "proceedings/vv/2002/7641/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2001/7200/0/7200dong",
"title": "Volume Rendering of Fine Details Within Medical Data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2001/7200dong/12OmNx6xHlc",
"parentPublication": {
"id": "proceedings/ieee-vis/2001/7200/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660038",
"title": "Scale-Invariant Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532808",
"title": "Scale-invariant volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532808/12OmNyoAA5X",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/05/ttg2010050752",
"title": "Real-Time Volume-Based Ambient Occlusion",
"doi": null,
"abstractUrl": "/journal/tg/2010/05/ttg2010050752/13rRUwbs2gn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1990/02/mcg1990020033",
"title": "Volume Rendering",
"doi": null,
"abstractUrl": "/magazine/cg/1990/02/mcg1990020033/13rRUwcAquw",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030447",
"title": "Efficient Visibility Encoding for Dynamic Illumination in Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030447/13rRUxAATgu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0242",
"title": "Two-Level Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0242/13rRUxC0SOO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0253",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010040533",
"articleId": "13rRUILtJzr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2010040560",
"articleId": "13rRUxOdD8f",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyGtjf5",
"title": "April",
"year": "2019",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "181W9oUhqP9",
"doi": "10.1109/TVCG.2018.2816059",
"abstract": "We present a technique to synthesize and analyze volume-rendered images using generative models. We use the Generative Adversarial Network (GAN) framework to compute a model from a large collection of volume renderings, conditioned on (1) viewpoint and (2) transfer functions for opacity and color. Our approach facilitates tasks for volume analysis that are challenging to achieve using existing rendering techniques such as ray casting or texture-based methods. We show how to guide the user in transfer function editing by quantifying expected change in the output image. Additionally, the generative model transforms transfer functions into a view-invariant latent space specifically designed to synthesize volume-rendered images. We use this space directly for rendering, enabling the user to explore the space of volume-rendered images. As our model is independent of the choice of volume rendering process, we show how to analyze volume-rendered images produced by direct and global illumination lighting, for a variety of volume datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a technique to synthesize and analyze volume-rendered images using generative models. We use the Generative Adversarial Network (GAN) framework to compute a model from a large collection of volume renderings, conditioned on (1) viewpoint and (2) transfer functions for opacity and color. Our approach facilitates tasks for volume analysis that are challenging to achieve using existing rendering techniques such as ray casting or texture-based methods. We show how to guide the user in transfer function editing by quantifying expected change in the output image. Additionally, the generative model transforms transfer functions into a view-invariant latent space specifically designed to synthesize volume-rendered images. We use this space directly for rendering, enabling the user to explore the space of volume-rendered images. As our model is independent of the choice of volume rendering process, we show how to analyze volume-rendered images produced by direct and global illumination lighting, for a variety of volume datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a technique to synthesize and analyze volume-rendered images using generative models. We use the Generative Adversarial Network (GAN) framework to compute a model from a large collection of volume renderings, conditioned on (1) viewpoint and (2) transfer functions for opacity and color. Our approach facilitates tasks for volume analysis that are challenging to achieve using existing rendering techniques such as ray casting or texture-based methods. We show how to guide the user in transfer function editing by quantifying expected change in the output image. Additionally, the generative model transforms transfer functions into a view-invariant latent space specifically designed to synthesize volume-rendered images. We use this space directly for rendering, enabling the user to explore the space of volume-rendered images. As our model is independent of the choice of volume rendering process, we show how to analyze volume-rendered images produced by direct and global illumination lighting, for a variety of volume datasets.",
"title": "A Generative Model for Volume Rendering",
"normalizedTitle": "A Generative Model for Volume Rendering",
"fno": "08316963",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Colour Analysis",
"Rendering Computer Graphics",
"Transfer Functions",
"Volume Rendering Process",
"Generative Adversarial Network Framework",
"GAN Framework",
"Volume Rendered Image Analysis",
"Ray Casting",
"Texture Based Methods",
"Transfer Function",
"View Invariant Latent Space",
"Volume Rendered Image Synthesis",
"Global Illumination Lighting",
"Direct Illumination Lighting",
"Generative Model Transforms Transfer Functions",
"Rendering Computer Graphics",
"Transfer Functions",
"Solid Modeling",
"Gallium Nitride",
"Image Color Analysis",
"Computational Modeling",
"Sensitivity",
"Volume Rendering",
"Generative Models",
"Deep Learning",
"Generative Adversarial Networks"
],
"authors": [
{
"givenName": "Matthew",
"surname": "Berger",
"fullName": "Matthew Berger",
"affiliation": "Department of Computer Science, University of Arizona, Tucson, AZ",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jixian",
"surname": "Li",
"fullName": "Jixian Li",
"affiliation": "Department of Computer Science, University of Arizona, Tucson, AZ",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joshua A.",
"surname": "Levine",
"fullName": "Joshua A. Levine",
"affiliation": "Department of Computer Science, University of Arizona, Tucson, AZ",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2019-04-01 00:00:00",
"pubType": "trans",
"pages": "1636-1650",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/visual/1990/2083/0/00146362",
"title": "A procedural interface for volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1990/00146362/12OmNApLGMS",
"parentPublication": {
"id": "proceedings/visual/1990/2083/0",
"title": "1990 First IEEE Conference on Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2013/4893/0/06455193",
"title": "Volume Rendering of Ultrasonic Sequential Images Based on Resampled Data",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2013/06455193/12OmNBQkx1D",
"parentPublication": {
"id": "proceedings/isdea/2013/4893/0",
"title": "2013 Third International Conference on Intelligent System Design and Engineering Applications (ISDEA 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061283",
"title": "Perception-Based Transparency Optimization for Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061283/13rRUwIF69f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/03/ttg2013030446",
"title": "Perceptually-Based Depth-Ordering Enhancement for Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2013/03/ttg2013030446/13rRUwInvyw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1307",
"title": "Progressive Volume Rendering of Large Unstructured Grids",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1307/13rRUwfZC05",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0242",
"title": "Two-Level Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0242/13rRUxC0SOO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2022/5670/0/567000a342",
"title": "3inGAN: Learning a 3D Generative Model from Images of a Self-similar Scene",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2022/567000a342/1KYsw4zjeUw",
"parentPublication": {
"id": "proceedings/3dv/2022/5670/0",
"title": "2022 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a282",
"title": "DNN-VolVis: Interactive Volume Visualization Supported by Deep Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a282/1cMF6YRpyH6",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222562",
"title": "Homomorphic-Encrypted Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222562/1nTqvh6tnr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552224",
"title": "Differentiable Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552224/1xibZvRmYzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "08322258",
"articleId": "17YCN5E6cAE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nTqvh6tnr2",
"doi": "10.1109/TVCG.2020.3030436",
"abstract": "Computationally demanding tasks are typically calculated in dedicated data centers, and real-time visualizations also follow this trend. Some rendering tasks, however, require the highest level of confidentiality so that no other party, besides the owner, can read or see the sensitive data. Here we present a direct volume rendering approach that performs volume rendering directly on encrypted volume data by using the homomorphic Paillier encryption algorithm. This approach ensures that the volume data and rendered image are uninterpretable to the rendering server. Our volume rendering pipeline introduces novel approaches for encrypted-data compositing, interpolation, and opacity modulation, as well as simple transfer function design, where each of these routines maintains the highest level of privacy. We present performance and memory overhead analysis that is associated with our privacy-preserving scheme. Our approach is open and secure by design, as opposed to secure through obscurity. Owners of the data only have to keep their secure key confidential to guarantee the privacy of their volume data and the rendered images. Our work is, to our knowledge, the first privacy-preserving remote volume-rendering approach that does not require that any server involved be trustworthy; even in cases when the server is compromised, no sensitive data will be leaked to a foreign party.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Computationally demanding tasks are typically calculated in dedicated data centers, and real-time visualizations also follow this trend. Some rendering tasks, however, require the highest level of confidentiality so that no other party, besides the owner, can read or see the sensitive data. Here we present a direct volume rendering approach that performs volume rendering directly on encrypted volume data by using the homomorphic Paillier encryption algorithm. This approach ensures that the volume data and rendered image are uninterpretable to the rendering server. Our volume rendering pipeline introduces novel approaches for encrypted-data compositing, interpolation, and opacity modulation, as well as simple transfer function design, where each of these routines maintains the highest level of privacy. We present performance and memory overhead analysis that is associated with our privacy-preserving scheme. Our approach is open and secure by design, as opposed to secure through obscurity. Owners of the data only have to keep their secure key confidential to guarantee the privacy of their volume data and the rendered images. Our work is, to our knowledge, the first privacy-preserving remote volume-rendering approach that does not require that any server involved be trustworthy; even in cases when the server is compromised, no sensitive data will be leaked to a foreign party.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Computationally demanding tasks are typically calculated in dedicated data centers, and real-time visualizations also follow this trend. Some rendering tasks, however, require the highest level of confidentiality so that no other party, besides the owner, can read or see the sensitive data. Here we present a direct volume rendering approach that performs volume rendering directly on encrypted volume data by using the homomorphic Paillier encryption algorithm. This approach ensures that the volume data and rendered image are uninterpretable to the rendering server. Our volume rendering pipeline introduces novel approaches for encrypted-data compositing, interpolation, and opacity modulation, as well as simple transfer function design, where each of these routines maintains the highest level of privacy. We present performance and memory overhead analysis that is associated with our privacy-preserving scheme. Our approach is open and secure by design, as opposed to secure through obscurity. Owners of the data only have to keep their secure key confidential to guarantee the privacy of their volume data and the rendered images. Our work is, to our knowledge, the first privacy-preserving remote volume-rendering approach that does not require that any server involved be trustworthy; even in cases when the server is compromised, no sensitive data will be leaked to a foreign party.",
"title": "Homomorphic-Encrypted Volume Rendering",
"normalizedTitle": "Homomorphic-Encrypted Volume Rendering",
"fno": "09222562",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cryptography",
"Data Privacy",
"Data Visualisation",
"Rendering Computer Graphics",
"Homomorphic Encrypted Volume Rendering",
"Realtime Visualization",
"Rendering Tasks",
"Direct Volume Rendering Approach",
"Encrypted Volume Data",
"Homomorphic Paillier Encryption Algorithm",
"Rendered Image",
"Rendering Server",
"Encrypted Data Compositing",
"Transfer Function Design",
"Memory Overhead Analysis",
"Privacy Preserving Scheme",
"Privacy Preserving Remote Volume Rendering Approach",
"Rendering Computer Graphics",
"Servers",
"Cloud Computing",
"Data Privacy",
"Encryption",
"Transfer Functions",
"Volume Rendering",
"Transfer Function",
"Homomorphic Encryption",
"Paillier"
],
"authors": [
{
"givenName": "Sebastian",
"surname": "Mazza",
"fullName": "Sebastian Mazza",
"affiliation": "TU Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Patel",
"fullName": "Daniel Patel",
"affiliation": "Western Norway University of Applied Sciences, Norway",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ivan",
"surname": "Viola",
"fullName": "Ivan Viola",
"affiliation": "King Abdullah University of Science and Technology (KAUST), Saudi Arabia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "635-644",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/visual/1990/2083/0/00146362",
"title": "A procedural interface for volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1990/00146362/12OmNApLGMS",
"parentPublication": {
"id": "proceedings/visual/1990/2083/0",
"title": "1990 First IEEE Conference on Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2003/1946/0/19460002",
"title": "Hardware Assisted Multichannel Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2003/19460002/12OmNCdk2xM",
"parentPublication": {
"id": "proceedings/cgi/2003/1946/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532808",
"title": "Scale-invariant volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532808/12OmNyoAA5X",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720357",
"title": "Importance-Aware Composition for Illustrative Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720357/12OmNz5apMR",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/03/v0253",
"title": "Volume Illustration: Nonphotorealistic Rendering of Volume Models",
"doi": null,
"abstractUrl": "/journal/tg/2001/03/v0253/13rRUxbTMyH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/04069241",
"title": "Topology-Controlled Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/04069241/13rRUytF41s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08226853",
"title": "Multi-Material Volume Rendering with a Physically-Based Surface Reflection Model",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08226853/14H4WMQegms",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/04/08316963",
"title": "A Generative Model for Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2019/04/08316963/181W9oUhqP9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933631",
"title": "Hybrid Grids for Sparse Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933631/1fTgGByNmda",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552224",
"title": "Differentiable Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552224/1xibZvRmYzm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09222372",
"articleId": "1nTqdQ0THGw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09224646",
"articleId": "1nWK2KRnd60",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLaXeAt0zK",
"name": "ttg202102-09222562s1-tvcg-3030436-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09222562s1-tvcg-3030436-mm.zip",
"extension": "zip",
"size": "10.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAtstbb",
"title": "August",
"year": "2006",
"issueNum": "08",
"idPrefix": "tc",
"pubType": "journal",
"volume": "55",
"label": "August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIM2VB2",
"doi": "10.1109/TC.2006.115",
"abstract": "As the complexity of 3D scenes is on the increase, the search for an effective visibility culling method has become one of the most important issues to be addressed in the design of 3D rendering processors. In this paper, we propose a new rasterization pipeline with visibility culling; the proposed architecture performs the visibility culling at an early stage of the rasterization pipeline (especially at the traversal stage) by retrieving data in a pixel cache without any significant hardware logics such as the hierarchical z{\\hbox{-}}{\\rm buffer}. If the data to be retrieved does not exist in the pixel cache, the proposed architecture performs a prefetch operation in order to reduce the miss penalty of the pixel cache. That is, the cache miss penalty can be reduced as the transfer of a missed cache block from the frame memory into the pixel cache can be handled simultaneously with the rasterization pipeline executions. Simulation results show that the proposed architecture can achieve a performance gain of about 32 percent compared with the conventional pretexturing architecture and about 7 percent compared to the hierarchical z{\\hbox{-}}{\\rm buffer} visibility scheme.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As the complexity of 3D scenes is on the increase, the search for an effective visibility culling method has become one of the most important issues to be addressed in the design of 3D rendering processors. In this paper, we propose a new rasterization pipeline with visibility culling; the proposed architecture performs the visibility culling at an early stage of the rasterization pipeline (especially at the traversal stage) by retrieving data in a pixel cache without any significant hardware logics such as the hierarchical z{\\hbox{-}}{\\rm buffer}. If the data to be retrieved does not exist in the pixel cache, the proposed architecture performs a prefetch operation in order to reduce the miss penalty of the pixel cache. That is, the cache miss penalty can be reduced as the transfer of a missed cache block from the frame memory into the pixel cache can be handled simultaneously with the rasterization pipeline executions. Simulation results show that the proposed architecture can achieve a performance gain of about 32 percent compared with the conventional pretexturing architecture and about 7 percent compared to the hierarchical z{\\hbox{-}}{\\rm buffer} visibility scheme.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As the complexity of 3D scenes is on the increase, the search for an effective visibility culling method has become one of the most important issues to be addressed in the design of 3D rendering processors. In this paper, we propose a new rasterization pipeline with visibility culling; the proposed architecture performs the visibility culling at an early stage of the rasterization pipeline (especially at the traversal stage) by retrieving data in a pixel cache without any significant hardware logics such as the hierarchical z{\\hbox{-}}{\\rm buffer}. If the data to be retrieved does not exist in the pixel cache, the proposed architecture performs a prefetch operation in order to reduce the miss penalty of the pixel cache. That is, the cache miss penalty can be reduced as the transfer of a missed cache block from the frame memory into the pixel cache can be handled simultaneously with the rasterization pipeline executions. Simulation results show that the proposed architecture can achieve a performance gain of about 32 percent compared with the conventional pretexturing architecture and about 7 percent compared to the hierarchical z{\\hbox{-}}{\\rm buffer} visibility scheme.",
"title": "An Effective Visibility Culling Method Based on Cache Block",
"normalizedTitle": "An Effective Visibility Culling Method Based on Cache Block",
"fno": "t1024",
"hasPdf": true,
"idPrefix": "tc",
"keywords": [
"Computer Graphics",
"Cache Memories",
"Graphics Processors",
"Visible Surface Algorithms"
],
"authors": [
{
"givenName": "Moon-Hee",
"surname": "Choi",
"fullName": "Moon-Hee Choi",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Woo-Chan",
"surname": "Park",
"fullName": "Woo-Chan Park",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Francis",
"surname": "Neelamkavil",
"fullName": "Francis Neelamkavil",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tack-Don",
"surname": "Han",
"fullName": "Tack-Don Han",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shin-Dug",
"surname": "Kim",
"fullName": "Shin-Dug Kim",
"affiliation": "IEEE Computer Society",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2006-08-01 00:00:00",
"pubType": "trans",
"pages": "1024-1032",
"year": "2006",
"issn": "0018-9340",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2004/8788/0/87880147",
"title": "Visibility Culling for Time-Varying Volume Rendering Using Temporal Occlusion Coherence",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880147/12OmNAY79mS",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300045",
"title": "Visibility Culling Using Plenoptic Opacity Functions for Large Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300045/12OmNBhZ4fE",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/2/3336d058",
"title": "Efficient Occlusion Culling with Occupancy Proportion",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336d058/12OmNvDZEXS",
"parentPublication": {
"id": "proceedings/csse/2008/3336/6",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a047",
"title": "Visibility Cuts: A System for Rendering Dynamic Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a047/12OmNxGAL3M",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvg/2001/7223/0/72230067",
"title": "Parallel View-Dependent Isosurface Extraction Using Multi-Pass Occlusion Culling",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2001/72230067/12OmNynJMPK",
"parentPublication": {
"id": "proceedings/pvg/2001/7223/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2001/04/v0365",
"title": "Efficient Conservative Visibility Culling Using the Prioritized-Layered Projection Algorithm",
"doi": null,
"abstractUrl": "/journal/tg/2001/04/v0365/13rRUwgQpDd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/02/v0143",
"title": "Fast and Reliable Collision Culling Using Graphics Hardware",
"doi": null,
"abstractUrl": "/journal/tg/2006/02/v0143/13rRUwgyOjb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/05/ttg2010050707",
"title": "Two Fast Methods for High-Quality Line Visibility",
"doi": null,
"abstractUrl": "/journal/tg/2010/05/ttg2010050707/13rRUxC0SW4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552900",
"title": "Probabilistic Occlusion Culling using Confidence Maps for High-Quality Rendering of Large Particle Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552900/1xic6f4Sc6I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "t1011",
"articleId": "13rRUygT7xs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "t1033",
"articleId": "13rRUxly8WM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCaLEju",
"title": "Jan.",
"year": "2018",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly9dZ",
"doi": "10.1109/TVCG.2017.2743979",
"abstract": "Molecular dynamics (MD) simulations are crucial to investigating important processes in physics and thermodynamics. The simulated atoms are usually visualized as hard spheres with Phong shading, where individual particles and their local density can be perceived well in close-up views. However, for large-scale simulations with 10 million particles or more, the visualization of large fields-of-view usually suffers from strong aliasing artifacts, because the mismatch between data size and output resolution leads to severe under-sampling of the geometry. Excessive super-sampling can alleviate this problem, but is prohibitively expensive. This paper presents a novel visualization method for large-scale particle data that addresses aliasing while enabling interactive high-quality rendering. We introduce the novel concept of screen-space normal distribution functions (S-NDFs) for particle data. S-NDFs represent the distribution of surface normals that map to a given pixel in screen space, which enables high-quality re-lighting without re-rendering particles. In order to facilitate interactive zooming, we cache S-NDFs in a screen-space mipmap (S-MIP). Together, these two concepts enable interactive, scale-consistent re-lighting and shading changes, as well as zooming, without having to re-sample the particle data. We show how our method facilitates the interactive exploration of real-world large-scale MD simulation data in different scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Molecular dynamics (MD) simulations are crucial to investigating important processes in physics and thermodynamics. The simulated atoms are usually visualized as hard spheres with Phong shading, where individual particles and their local density can be perceived well in close-up views. However, for large-scale simulations with 10 million particles or more, the visualization of large fields-of-view usually suffers from strong aliasing artifacts, because the mismatch between data size and output resolution leads to severe under-sampling of the geometry. Excessive super-sampling can alleviate this problem, but is prohibitively expensive. This paper presents a novel visualization method for large-scale particle data that addresses aliasing while enabling interactive high-quality rendering. We introduce the novel concept of screen-space normal distribution functions (S-NDFs) for particle data. S-NDFs represent the distribution of surface normals that map to a given pixel in screen space, which enables high-quality re-lighting without re-rendering particles. In order to facilitate interactive zooming, we cache S-NDFs in a screen-space mipmap (S-MIP). Together, these two concepts enable interactive, scale-consistent re-lighting and shading changes, as well as zooming, without having to re-sample the particle data. We show how our method facilitates the interactive exploration of real-world large-scale MD simulation data in different scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Molecular dynamics (MD) simulations are crucial to investigating important processes in physics and thermodynamics. The simulated atoms are usually visualized as hard spheres with Phong shading, where individual particles and their local density can be perceived well in close-up views. However, for large-scale simulations with 10 million particles or more, the visualization of large fields-of-view usually suffers from strong aliasing artifacts, because the mismatch between data size and output resolution leads to severe under-sampling of the geometry. Excessive super-sampling can alleviate this problem, but is prohibitively expensive. This paper presents a novel visualization method for large-scale particle data that addresses aliasing while enabling interactive high-quality rendering. We introduce the novel concept of screen-space normal distribution functions (S-NDFs) for particle data. S-NDFs represent the distribution of surface normals that map to a given pixel in screen space, which enables high-quality re-lighting without re-rendering particles. In order to facilitate interactive zooming, we cache S-NDFs in a screen-space mipmap (S-MIP). Together, these two concepts enable interactive, scale-consistent re-lighting and shading changes, as well as zooming, without having to re-sample the particle data. We show how our method facilitates the interactive exploration of real-world large-scale MD simulation data in different scenarios.",
"title": "Screen-Space Normal Distribution Function Caching for Consistent Multi-Resolution Rendering of Large Particle Data",
"normalizedTitle": "Screen-Space Normal Distribution Function Caching for Consistent Multi-Resolution Rendering of Large Particle Data",
"fno": "08017605",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Data Visualization",
"Casting",
"Gaussian Distribution",
"Probability Density Function",
"Lighting",
"Force",
"Multiresolution Techniques",
"Point Based Data",
"Glyph Based Techniques",
"Scalability Issues",
"Molecular Visualization"
],
"authors": [
{
"givenName": "Mohamed",
"surname": "Ibrahim",
"fullName": "Mohamed Ibrahim",
"affiliation": "King Abdullah University of Science and Technology (KAUST), Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Patrick",
"surname": "Wickenhäuser",
"fullName": "Patrick Wickenhäuser",
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Rautek",
"fullName": "Peter Rautek",
"affiliation": "King Abdullah University of Science and Technology (KAUST), Saudi Arabia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guido",
"surname": "Reina",
"fullName": "Guido Reina",
"affiliation": "Visualization Research Center (VISUS), University of Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus",
"surname": "Hadwiger",
"fullName": "Markus Hadwiger",
"affiliation": "King Abdullah University of Science and Technology (KAUST), Saudi Arabia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2018-01-01 00:00:00",
"pubType": "trans",
"pages": "944-953",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/scc/2012/6218/0/06496057",
"title": "Poster: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/scc/2012/06496057/12OmNAlNiKK",
"parentPublication": {
"id": "proceedings/scc/2012/6218/0",
"title": "2012 SC Companion: High Performance Computing, Networking, Storage and Analysis (SCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2012/4899/0/4899a074",
"title": "Adaptive Particle Size Setting and Normal Calculation Methods in Fluid Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2012/4899a074/12OmNAo45RW",
"parentPublication": {
"id": "proceedings/icdh/2012/4899/0",
"title": "4th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sccompanion/2012/4956/0/4956b481",
"title": "Poster: Remote Visualization for Large-Scale Simulation Using Particle-Based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/sccompanion/2012/4956b481/12OmNAoUTbm",
"parentPublication": {
"id": "proceedings/sccompanion/2012/4956/0",
"title": "2012 SC Companion: High Performance Computing, Networking Storage and Analysis",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcbs/2009/3739/0/3739a573",
"title": "Applying Particle Swarm Optimization to Transfer Function Specification for Direct Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ijcbs/2009/3739a573/12OmNwDACyo",
"parentPublication": {
"id": "proceedings/ijcbs/2009/3739/0",
"title": "2009 International Joint Conference on Bioinformatics, Systems Biology and Intelligent Computing (IJCBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2017/5738/0/08031605",
"title": "Implicit Sphere Shadow Maps",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2017/08031605/12OmNxcMShN",
"parentPublication": {
"id": "proceedings/pacificvis/2017/5738/0",
"title": "2017 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2003/1988/0/19880396",
"title": "Rendering recognizably unique textures",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2003/19880396/12OmNy5zsmO",
"parentPublication": {
"id": "proceedings/iv/2003/1988/0",
"title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a246",
"title": "Rendering and Modeling of Stratus Cloud Using Weather Forecast Data",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a246/12OmNzwpUo7",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/06/ttg2012060925",
"title": "A Versatile Optical Model for Hybrid Rendering of Volume Data",
"doi": null,
"abstractUrl": "/journal/tg/2012/06/ttg2012060925/13rRUwjGoFX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090444",
"title": "Lighting Estimation via Differentiable Screen-Space Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090444/1jIxr4u5rFK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552900",
"title": "Probabilistic Occlusion Culling using Confidence Maps for High-Quality Rendering of Large Particle Data",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552900/1xic6f4Sc6I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08017601",
"articleId": "13rRUyogGAg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08017633",
"articleId": "13rRUwgQpqN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesP5",
"name": "ttg201801-08017605s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201801-08017605s1.zip",
"extension": "zip",
"size": "25.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzd7bmf",
"title": "March",
"year": "2011",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPi5",
"doi": "10.1109/TVCG.2009.105",
"abstract": "This paper presents a study of gradient estimation methods for rendering unstructured-mesh volume data. Gradient estimation is necessary for rendering shaded isosurfaces and specular highlights, which provide important cues for shape and depth. Gradient estimation has been widely studied and deployed for regular-grid volume data to achieve local illumination effects, but has been, otherwise, for unstructured-mesh data. As a result, most of the unstructured-mesh volume visualizations made so far were unlit. In this paper, we present a comprehensive study of gradient estimation methods for unstructured meshes with respect to their cost and performance. Through a number of benchmarks, we discuss the effects of mesh quality and scalar function complexity in the accuracy of the reconstruction, and their impact in lighting-enabled volume rendering. Based on our study, we also propose two heuristic improvements to the gradient reconstruction process. The first heuristic improves the rendering quality with a hybrid algorithm that combines the results of the multiple reconstruction methods, based on the properties of a given mesh. The second heuristic improves the efficiency of its GPU implementation, by restricting the computation of the gradient on a fixed-size local neighborhood.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a study of gradient estimation methods for rendering unstructured-mesh volume data. Gradient estimation is necessary for rendering shaded isosurfaces and specular highlights, which provide important cues for shape and depth. Gradient estimation has been widely studied and deployed for regular-grid volume data to achieve local illumination effects, but has been, otherwise, for unstructured-mesh data. As a result, most of the unstructured-mesh volume visualizations made so far were unlit. In this paper, we present a comprehensive study of gradient estimation methods for unstructured meshes with respect to their cost and performance. Through a number of benchmarks, we discuss the effects of mesh quality and scalar function complexity in the accuracy of the reconstruction, and their impact in lighting-enabled volume rendering. Based on our study, we also propose two heuristic improvements to the gradient reconstruction process. The first heuristic improves the rendering quality with a hybrid algorithm that combines the results of the multiple reconstruction methods, based on the properties of a given mesh. The second heuristic improves the efficiency of its GPU implementation, by restricting the computation of the gradient on a fixed-size local neighborhood.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a study of gradient estimation methods for rendering unstructured-mesh volume data. Gradient estimation is necessary for rendering shaded isosurfaces and specular highlights, which provide important cues for shape and depth. Gradient estimation has been widely studied and deployed for regular-grid volume data to achieve local illumination effects, but has been, otherwise, for unstructured-mesh data. As a result, most of the unstructured-mesh volume visualizations made so far were unlit. In this paper, we present a comprehensive study of gradient estimation methods for unstructured meshes with respect to their cost and performance. Through a number of benchmarks, we discuss the effects of mesh quality and scalar function complexity in the accuracy of the reconstruction, and their impact in lighting-enabled volume rendering. Based on our study, we also propose two heuristic improvements to the gradient reconstruction process. The first heuristic improves the rendering quality with a hybrid algorithm that combines the results of the multiple reconstruction methods, based on the properties of a given mesh. The second heuristic improves the efficiency of its GPU implementation, by restricting the computation of the gradient on a fixed-size local neighborhood.",
"title": "A Comparison of Gradient Estimation Methods for Volume Rendering on Unstructured Meshes",
"normalizedTitle": "A Comparison of Gradient Estimation Methods for Volume Rendering on Unstructured Meshes",
"fno": "ttg2011030305",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Rendering",
"Gradient Estimation",
"Local Illumination",
"Unstructured Meshes",
"Flow Visualization"
],
"authors": [
{
"givenName": "Carlos D.",
"surname": "Correa",
"fullName": "Carlos D. Correa",
"affiliation": "University of California, Davis, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Hero",
"fullName": "Robert Hero",
"affiliation": "University of California, Davis, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis, Davis",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2011-03-01 00:00:00",
"pubType": "trans",
"pages": "305-319",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2003/2030/0/20300025",
"title": "Feature-Space Analysis of Unstructured Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300025/12OmNAgY7kY",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780016",
"title": "Hardware-Accelerated Volume and Isosurface Rendering Based on Cell-Projection",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780016/12OmNBuL1lz",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2011/4548/0/4548a093",
"title": "Accurate Volume Rendering of Unstructured Hexahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2011/4548a093/12OmNCcbE5T",
"parentPublication": {
"id": "proceedings/sibgrapi/2011/4548/0",
"title": "2011 24th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2004/8781/0/87810009",
"title": "A Fast High Accuracy Volume Renderer for Unstructured Data",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2004/87810009/12OmNs0kyFD",
"parentPublication": {
"id": "proceedings/vv/2004/8781/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880433",
"title": "TetSplat Real-Time Rendering and Volume Clipping of Large Unstructured Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880433/12OmNzRHOOj",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300044",
"title": "Hardware-Based Ray Casting for Tetrahedral Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300044/12OmNzXnNw2",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1996/03/v0242",
"title": "Frequency Analysis of Gradient Estimators in Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/1996/03/v0242/13rRUwwaKsT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/01/v0037",
"title": "A High Accuracy Volume Renderer for Unstructured Data",
"doi": null,
"abstractUrl": "/journal/tg/1998/01/v0037/13rRUwwaKsU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/06/v0695",
"title": "Image-Space Visibility Ordering for Cell Projection Volume Rendering of Unstructured Data",
"doi": null,
"abstractUrl": "/journal/tg/2004/06/v0695/13rRUyY294t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061487",
"title": "Pre-Integrated Volume Rendering with Non-Linear Gradient Interpolation",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061487/13rRUyuNswV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011030290",
"articleId": "13rRUwInvsL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011030320",
"articleId": "13rRUwgQpqJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxEjY3I",
"title": "May/June",
"year": "2005",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "11",
"label": "May/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOdD89",
"doi": "10.1109/TVCG.2005.46",
"abstract": "Harvesting the power of modern graphics hardware to solve the complex problem of real-time rendering of large unstructured meshes is a major research goal in the volume visualization community. While, for regular grids, texture-based techniques are well-suited for current GPUs, the steps necessary for rendering unstructured meshes are not so easily mapped to current hardware. We propose a novel volume rendering technique that simplifies the CPU-based processing and shifts much of the sorting burden to the GPU, where it can be performed more efficiently. Our hardware-assisted visibility sorting algorithm is a hybrid technique that operates in both object-space and image-space. In object-space, the algorithm performs a partial sort of the 3D primitives in preparation for rasterization. The goal of the partial sort is to create a list of primitives that generate fragments in nearly sorted order. In image-space, the fragment stream is incrementally sorted using a fixed-depth sorting network. In our algorithm, the object-space work is performed by the CPU and the fragment-level sorting is done completely on the GPU. A prototype implementation of the algorithm demonstrates that the fragment-level sorting achieves rendering rates of between one and six million tetrahedral cells per second on an ATI Radeon 9800.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Harvesting the power of modern graphics hardware to solve the complex problem of real-time rendering of large unstructured meshes is a major research goal in the volume visualization community. While, for regular grids, texture-based techniques are well-suited for current GPUs, the steps necessary for rendering unstructured meshes are not so easily mapped to current hardware. We propose a novel volume rendering technique that simplifies the CPU-based processing and shifts much of the sorting burden to the GPU, where it can be performed more efficiently. Our hardware-assisted visibility sorting algorithm is a hybrid technique that operates in both object-space and image-space. In object-space, the algorithm performs a partial sort of the 3D primitives in preparation for rasterization. The goal of the partial sort is to create a list of primitives that generate fragments in nearly sorted order. In image-space, the fragment stream is incrementally sorted using a fixed-depth sorting network. In our algorithm, the object-space work is performed by the CPU and the fragment-level sorting is done completely on the GPU. A prototype implementation of the algorithm demonstrates that the fragment-level sorting achieves rendering rates of between one and six million tetrahedral cells per second on an ATI Radeon 9800.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Harvesting the power of modern graphics hardware to solve the complex problem of real-time rendering of large unstructured meshes is a major research goal in the volume visualization community. While, for regular grids, texture-based techniques are well-suited for current GPUs, the steps necessary for rendering unstructured meshes are not so easily mapped to current hardware. We propose a novel volume rendering technique that simplifies the CPU-based processing and shifts much of the sorting burden to the GPU, where it can be performed more efficiently. Our hardware-assisted visibility sorting algorithm is a hybrid technique that operates in both object-space and image-space. In object-space, the algorithm performs a partial sort of the 3D primitives in preparation for rasterization. The goal of the partial sort is to create a list of primitives that generate fragments in nearly sorted order. In image-space, the fragment stream is incrementally sorted using a fixed-depth sorting network. In our algorithm, the object-space work is performed by the CPU and the fragment-level sorting is done completely on the GPU. A prototype implementation of the algorithm demonstrates that the fragment-level sorting achieves rendering rates of between one and six million tetrahedral cells per second on an ATI Radeon 9800.",
"title": "Hardware-Assisted Visibility Sorting for Unstructured Volume Rendering",
"normalizedTitle": "Hardware-Assisted Visibility Sorting for Unstructured Volume Rendering",
"fno": "v0285",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Visualization",
"Graphics Processors",
"Visibility Sorting"
],
"authors": [
{
"givenName": "Steven P.",
"surname": "Callahan",
"fullName": "Steven P. Callahan",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Milan",
"surname": "Ikits",
"fullName": "Milan Ikits",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jo?o L.D.",
"surname": "Comba",
"fullName": "Jo?o L.D. Comba",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cl?udio T.",
"surname": "Silva",
"fullName": "Cl?udio T. Silva",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2005-05-01 00:00:00",
"pubType": "trans",
"pages": "285-295",
"year": "2005",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2001/7200/0/7200kraus",
"title": "Cell-Projection of Cyclic Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2001/7200kraus/12OmNA2cYzt",
"parentPublication": {
"id": "proceedings/ieee-vis/2001/7200/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2003/2030/0/20300038",
"title": "Acceleration Techniques for GPU-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870003",
"title": "Interval Set: A Volume Rendering Technique Generalizing Isosurface Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870003/12OmNs5rl20",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620035",
"title": "The VSBUFFER: visibility ordering of unstructured volume primitives by polygon drawing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620035/12OmNvTTcc7",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/01/v0058",
"title": "Binary-Space-Partitioned Images for Resolving Image-Based Visibility",
"doi": null,
"abstractUrl": "/journal/tg/2004/01/v0058/13rRUwdrdSp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/05/v0584",
"title": "Interactive Transparency Rendering for Large CAD Models",
"doi": null,
"abstractUrl": "/journal/tg/2005/05/v0584/13rRUwhpBO0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020192",
"title": "Visibility Histograms and Visibility-Driven Transfer Functions",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020192/13rRUwwaKt3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/06/v0695",
"title": "Image-Space Visibility Ordering for Cell Projection Volume Rendering of Unstructured Data",
"doi": null,
"abstractUrl": "/journal/tg/2004/06/v0695/13rRUyY294t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/02/ttg2014020238",
"title": "Memory-Hazard-Aware K-Buffer Algorithm for Order-Independent Transparency Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2014/02/ttg2014020238/13rRUypp57F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09409710",
"title": "Interactive Focus+Context Rendering for Hexahedral Mesh Inspection",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09409710/1sXjFab9xYc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0273",
"articleId": "13rRUxYrbM4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0296",
"articleId": "13rRUyYSWsG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H1gpFOnUeQ",
"doi": "10.1109/TVCG.2022.3209418",
"abstract": "We propose a simple yet effective method for clustering finite elements to improve preprocessing times and rendering performance of unstructured volumetric grids without requiring auxiliary connectivity data. Rather than building bounding volume hierarchies (BVHs) over individual elements, we sort elements along with a Hilbert curve and aggregate neighboring elements together, improving BVH memory consumption by over an order of magnitude. Then to further reduce memory consumption, we cluster the mesh on the fly into sub-meshes with smaller indices using a series of efficient parallel mesh re-indexing operations. These clusters are then passed to a highly optimized ray tracing API for point containment queries and ray-cluster intersection testing. Each cluster is assigned a maximum extinction value for adaptive sampling, which we rasterize into non-overlapping view-aligned bins allocated along the ray. These maximum extinction bins are then used to guide the placement of samples along the ray during visualization, reducing the number of samples required by multiple orders of magnitude (depending on the dataset), thereby improving overall visualization interactivity. Using our approach, we improve rendering performance over a competitive baseline on the NASA Mars Lander dataset from 6× (1 <italic>frame per second</italic> (fps) and <italic>1.0 M rays per second</italic> (rps) up to now <italic>6 fps</italic> and <italic>12.4 M rps</italic>, now including volumetric shadows) while simultaneously reducing memory consumption by 3×<italic>(33 GB</italic> down to <italic>11 GB)</italic> and avoiding any offline preprocessing steps, enabling high-quality interactive visualization on consumer graphics cards. Then by utilizing the full 48 GB of an RTX 8000, we improve the performance of Lander by 17 × (1 <italic>fps</italic> up to <italic>17 fps, 1.0 M rps</italic> up to <italic>35.6 M rps)</italic>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a simple yet effective method for clustering finite elements to improve preprocessing times and rendering performance of unstructured volumetric grids without requiring auxiliary connectivity data. Rather than building bounding volume hierarchies (BVHs) over individual elements, we sort elements along with a Hilbert curve and aggregate neighboring elements together, improving BVH memory consumption by over an order of magnitude. Then to further reduce memory consumption, we cluster the mesh on the fly into sub-meshes with smaller indices using a series of efficient parallel mesh re-indexing operations. These clusters are then passed to a highly optimized ray tracing API for point containment queries and ray-cluster intersection testing. Each cluster is assigned a maximum extinction value for adaptive sampling, which we rasterize into non-overlapping view-aligned bins allocated along the ray. These maximum extinction bins are then used to guide the placement of samples along the ray during visualization, reducing the number of samples required by multiple orders of magnitude (depending on the dataset), thereby improving overall visualization interactivity. Using our approach, we improve rendering performance over a competitive baseline on the NASA Mars Lander dataset from 6× (1 <italic>frame per second</italic> (fps) and <italic>1.0 M rays per second</italic> (rps) up to now <italic>6 fps</italic> and <italic>12.4 M rps</italic>, now including volumetric shadows) while simultaneously reducing memory consumption by 3×<italic>(33 GB</italic> down to <italic>11 GB)</italic> and avoiding any offline preprocessing steps, enabling high-quality interactive visualization on consumer graphics cards. Then by utilizing the full 48 GB of an RTX 8000, we improve the performance of Lander by 17 × (1 <italic>fps</italic> up to <italic>17 fps, 1.0 M rps</italic> up to <italic>35.6 M rps)</italic>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a simple yet effective method for clustering finite elements to improve preprocessing times and rendering performance of unstructured volumetric grids without requiring auxiliary connectivity data. Rather than building bounding volume hierarchies (BVHs) over individual elements, we sort elements along with a Hilbert curve and aggregate neighboring elements together, improving BVH memory consumption by over an order of magnitude. Then to further reduce memory consumption, we cluster the mesh on the fly into sub-meshes with smaller indices using a series of efficient parallel mesh re-indexing operations. These clusters are then passed to a highly optimized ray tracing API for point containment queries and ray-cluster intersection testing. Each cluster is assigned a maximum extinction value for adaptive sampling, which we rasterize into non-overlapping view-aligned bins allocated along the ray. These maximum extinction bins are then used to guide the placement of samples along the ray during visualization, reducing the number of samples required by multiple orders of magnitude (depending on the dataset), thereby improving overall visualization interactivity. Using our approach, we improve rendering performance over a competitive baseline on the NASA Mars Lander dataset from 6× (1 frame per second (fps) and 1.0 M rays per second (rps) up to now 6 fps and 12.4 M rps, now including volumetric shadows) while simultaneously reducing memory consumption by 3×(33 GB down to 11 GB) and avoiding any offline preprocessing steps, enabling high-quality interactive visualization on consumer graphics cards. Then by utilizing the full 48 GB of an RTX 8000, we improve the performance of Lander by 17 × (1 fps up to 17 fps, 1.0 M rps up to 35.6 M rps).",
"title": "Quick Clusters: A GPU-Parallel Partitioning for Efficient Path Tracing of Unstructured Volumetric Grids",
"normalizedTitle": "Quick Clusters: A GPU-Parallel Partitioning for Efficient Path Tracing of Unstructured Volumetric Grids",
"fno": "09904457",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Application Program Interfaces",
"Data Visualisation",
"Graphics Processing Units",
"Mesh Generation",
"Pattern Clustering",
"Ray Tracing",
"Rendering Computer Graphics",
"Adaptive Sampling",
"Bounding Volume Hierarchies",
"BVH Memory Consumption",
"Consumer Graphics Cards",
"Extinction Bins",
"Extinction Value",
"Finite Element Clustering",
"GPU Parallel Partitioning",
"High Quality Interactive Visualization",
"Hilbert Curve",
"Memory Size 48 0 G Byte",
"NASA Mars Lander Dataset",
"Nonoverlapping View Aligned Bins",
"Parallel Mesh Re Indexing",
"Path Tracing",
"Point Containment Queries",
"Ray Tracing API",
"Ray Cluster Intersection Testing",
"Rendering Performance",
"RTX 8000",
"Sub Meshes",
"Unstructured Volumetric Grids",
"Visualization Interactivity",
"Volumetric Shadows",
"Rendering Computer Graphics",
"Ray Tracing",
"Memory Management",
"Graphics Processing Units",
"Data Visualization",
"NASA",
"Monte Carlo Methods",
"Ray Tracing",
"Path Tracing",
"Volume Rendering",
"Scientific Visualization",
"Delta Tracking"
],
"authors": [
{
"givenName": "Nate",
"surname": "Morrical",
"fullName": "Nate Morrical",
"affiliation": "SCI Institute, University of Utah, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alper",
"surname": "Sahistan",
"fullName": "Alper Sahistan",
"affiliation": "Bilkent University, Turkey",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Uğur",
"surname": "Güdükbay",
"fullName": "Uğur Güdükbay",
"affiliation": "Bilkent University, Turkey",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ingo",
"surname": "Wald",
"fullName": "Ingo Wald",
"affiliation": "NVIDIA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Valerio",
"surname": "Pascucci",
"fullName": "Valerio Pascucci",
"affiliation": "SCI Institute, University of Utah, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "537-547",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pvg/2003/2091/0/20910012",
"title": "Distributed Interactive Ray Tracing for Large Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2003/20910012/12OmNBsue7j",
"parentPublication": {
"id": "proceedings/pvg/2003/2091/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2007/1629/0/04342598",
"title": "Realtime Ray Tracing on GPU with BVH-based Packet Traversal",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2007/04342598/12OmNqJZgLN",
"parentPublication": {
"id": "proceedings/rt/2007/1629/0",
"title": "IEEE/ EG Symposium on Interactive Ray Tracing 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2011/4648/0/4648a011",
"title": "GPU-Based Data Structure for a Parallel Ray Tracing Illumination Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2011/4648a011/12OmNvwC5ve",
"parentPublication": {
"id": "proceedings/sbgames/2011/4648/0",
"title": "2011 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2008/2741/0/04634619",
"title": "Large ray packets for real-time Whitted ray tracing",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2008/04634619/12OmNwcCITG",
"parentPublication": {
"id": "proceedings/rt/2008/2741/0",
"title": "Symposium on Interactive Ray Tracing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870027",
"title": "A Hardware Acceleration Method for Volumetric Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870027/12OmNxHJ9p1",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvgs/2003/8122/0/01249046",
"title": "Distributed interactive ray tracing for large volume visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pvgs/2003/01249046/12OmNz2TCDv",
"parentPublication": {
"id": "proceedings/pvgs/2003/8122/0",
"title": "IEEE Symposium on Parallel and Large-Data Visualization and Graphics 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1997/01/mcg1997010042",
"title": "Faster Ray Tracing Using Adaptive Grids",
"doi": null,
"abstractUrl": "/magazine/cg/1997/01/mcg1997010042/13rRUEgs2vy",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micro/2022/6272/0/627200a263",
"title": "Vulkan-Sim: A GPU Architecture Simulator for Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/micro/2022/627200a263/1HMSwhI3lO8",
"parentPublication": {
"id": "proceedings/micro/2022/6272/0",
"title": "2022 55th IEEE/ACM International Symposium on Microarchitecture (MICRO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09940545",
"title": "Temporal Coherence-Based Distributed Ray Tracing of Massive Scenes",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09940545/1I6O5QqMxQ4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933539",
"title": "Efficient Space Skipping and Adaptive Sampling of Unstructured Volumes Using Hardware Accelerated Ray Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933539/1fTgIhNytI4",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09903677",
"articleId": "1GZoj8mhSPS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904439",
"articleId": "1H0GgY7IaCk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9yYWoGljy",
"name": "ttg202301-09904457s1-tvcg-3209418-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09904457s1-tvcg-3209418-mm.zip",
"extension": "zip",
"size": "142 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1ECXHMu0OWc",
"title": "Aug.",
"year": "2022",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1porhlu0eEo",
"doi": "10.1109/TVCG.2020.3042930",
"abstract": "We present a technique that leverages ray tracing hardware available in recent Nvidia RTX GPUs to solve a problem other than classical ray tracing. Specifically, we demonstrate how to use these units to accelerate the point location of general unstructured elements consisting of both planar and bilinear faces. This unstructured mesh point location problem has previously been challenging to accelerate on GPU architectures; yet, the performance of these queries is crucial to many unstructured volume rendering and compute applications. Starting with a CUDA reference method, we describe and evaluate three approaches that reformulate these point queries to incrementally map algorithmic complexity to these new hardware ray tracing units. Each variant replaces the simpler problem of point queries with a more complex one of ray queries. Initial variants exploit ray tracing cores for accelerated BVH traversal, and subsequent variants use ray-triangle intersections and per-face metadata to detect point-in-element intersections. Although these later variants are more algorithmically complex, they are significantly faster than the reference method thanks to hardware acceleration. Using our approach, we improve the performance of an unstructured volume renderer by up to <inline-formula><tex-math notation=\"LaTeX\">Z_$4\\times$_Z</tex-math></inline-formula> for tetrahedral meshes and up to <inline-formula><tex-math notation=\"LaTeX\">Z_$15\\times$_Z</tex-math></inline-formula> for general bilinear element meshes, matching, or out-performing state-of-the-art solutions while simultaneously improving on robustness and ease-of-implementation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a technique that leverages ray tracing hardware available in recent Nvidia RTX GPUs to solve a problem other than classical ray tracing. Specifically, we demonstrate how to use these units to accelerate the point location of general unstructured elements consisting of both planar and bilinear faces. This unstructured mesh point location problem has previously been challenging to accelerate on GPU architectures; yet, the performance of these queries is crucial to many unstructured volume rendering and compute applications. Starting with a CUDA reference method, we describe and evaluate three approaches that reformulate these point queries to incrementally map algorithmic complexity to these new hardware ray tracing units. Each variant replaces the simpler problem of point queries with a more complex one of ray queries. Initial variants exploit ray tracing cores for accelerated BVH traversal, and subsequent variants use ray-triangle intersections and per-face metadata to detect point-in-element intersections. Although these later variants are more algorithmically complex, they are significantly faster than the reference method thanks to hardware acceleration. Using our approach, we improve the performance of an unstructured volume renderer by up to <inline-formula><tex-math notation=\"LaTeX\">$4\\times$</tex-math><alternatives><mml:math><mml:mrow><mml:mn>4</mml:mn><mml:mo>×</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"morrical-ieq1-3042930.gif\"/></alternatives></inline-formula> for tetrahedral meshes and up to <inline-formula><tex-math notation=\"LaTeX\">$15\\times$</tex-math><alternatives><mml:math><mml:mrow><mml:mn>15</mml:mn><mml:mo>×</mml:mo></mml:mrow></mml:math><inline-graphic xlink:href=\"morrical-ieq2-3042930.gif\"/></alternatives></inline-formula> for general bilinear element meshes, matching, or out-performing state-of-the-art solutions while simultaneously improving on robustness and ease-of-implementation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a technique that leverages ray tracing hardware available in recent Nvidia RTX GPUs to solve a problem other than classical ray tracing. Specifically, we demonstrate how to use these units to accelerate the point location of general unstructured elements consisting of both planar and bilinear faces. This unstructured mesh point location problem has previously been challenging to accelerate on GPU architectures; yet, the performance of these queries is crucial to many unstructured volume rendering and compute applications. Starting with a CUDA reference method, we describe and evaluate three approaches that reformulate these point queries to incrementally map algorithmic complexity to these new hardware ray tracing units. Each variant replaces the simpler problem of point queries with a more complex one of ray queries. Initial variants exploit ray tracing cores for accelerated BVH traversal, and subsequent variants use ray-triangle intersections and per-face metadata to detect point-in-element intersections. Although these later variants are more algorithmically complex, they are significantly faster than the reference method thanks to hardware acceleration. Using our approach, we improve the performance of an unstructured volume renderer by up to - for tetrahedral meshes and up to - for general bilinear element meshes, matching, or out-performing state-of-the-art solutions while simultaneously improving on robustness and ease-of-implementation.",
"title": "Accelerating Unstructured Mesh Point Location With RT Cores",
"normalizedTitle": "Accelerating Unstructured Mesh Point Location With RT Cores",
"fno": "09286513",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Complexity",
"Graphics Processing Units",
"Hardware Accelerators",
"Mesh Generation",
"Meta Data",
"Parallel Architectures",
"Ray Tracing",
"Rendering Computer Graphics",
"GPU Architectures",
"Unstructured Volume Rendering",
"CUDA Reference",
"Point Queries",
"Algorithmic Complexity",
"Hardware Ray Tracing Units",
"Ray Queries",
"Ray Triangle Intersections",
"Per Face Metadata",
"Point In Element Intersections",
"Hardware Acceleration",
"Tetrahedral Meshes",
"General Bilinear Element Meshes",
"Nvidia RTX GP Us",
"Unstructured Mesh Point Location",
"RT Cores",
"Ray Tracing Hardware",
"BVH Traversal",
"Ray Tracing",
"Acceleration",
"Hardware",
"Graphics Processing Units",
"Rendering Computer Graphics",
"Computer Architecture",
"Tensors",
"Scientific Ray Tracing",
"Unstructured Scalar Data",
"GPGPU",
"Simulation",
"Volume Rendering"
],
"authors": [
{
"givenName": "Nate",
"surname": "Morrical",
"fullName": "Nate Morrical",
"affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ingo",
"surname": "Wald",
"fullName": "Ingo Wald",
"affiliation": "NVIDIA, Santa Clara, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Will",
"surname": "Usher",
"fullName": "Will Usher",
"affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Valerio",
"surname": "Pascucci",
"fullName": "Valerio Pascucci",
"affiliation": "SCI Institute, University of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2022-08-01 00:00:00",
"pubType": "trans",
"pages": "2852-2866",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tc/2023/02/09772247",
"title": "High Performance Hierarchical Tucker Tensor Learning Using GPU Tensor Cores",
"doi": null,
"abstractUrl": "/journal/tc/2023/02/09772247/1DgjEPL5dss",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09925111",
"title": "Analytical Tensor Voting in ND Space and its Properties",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09925111/1HBHWeQCASc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2023/06/09924585",
"title": "Tensor Based Multivariate Polynomial Modulo Multiplier for Cryptographic Applications",
"doi": null,
"abstractUrl": "/journal/tc/2023/06/09924585/1HBI0BSZqjC",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2022/12/09913653",
"title": "Algorithm and Hardware Co-Design of Energy-Efficient LSTM Networks for Video Recognition With Hierarchical Tucker Tensor Decomposition",
"doi": null,
"abstractUrl": "/journal/tc/2022/12/09913653/1HmgfOUW9vG",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2023/06/09987675",
"title": "High-Performance Tensor Learning Primitives Using GPU Tensor Cores",
"doi": null,
"abstractUrl": "/journal/tc/2023/06/09987675/1J7RPYvN6YU",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/08/09369083",
"title": "Robust Low-Tubal-Rank Tensor Recovery From Binary Measurements",
"doi": null,
"abstractUrl": "/journal/tp/2022/08/09369083/1rFvS23KDAI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2022/11/09372779",
"title": "Radio Frequency Fingerprinting on the Edge",
"doi": null,
"abstractUrl": "/journal/tm/2022/11/09372779/1rNOBlMpZWE",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2022/03/09354558",
"title": "An Efficient CRT-Based Bit-Parallel Multiplier for Special Pentanomials",
"doi": null,
"abstractUrl": "/journal/tc/2022/03/09354558/1reXzOLMhX2",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2022/03/09361107",
"title": "A Generic Deep Learning Based Cough Analysis System From Clinically Validated Samples for Point-of-Need Covid-19 Test and Severity Levels",
"doi": null,
"abstractUrl": "/journal/sc/2022/03/09361107/1rsepNK8KLC",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2022/11/09650723",
"title": "<inline-formula><tex-math notation=\"LaTeX\">Z_$TC-Stream$_Z</tex-math></inline-formula>: Large-Scale Graph Triangle Counting on a Single Machine Using GPUs",
"doi": null,
"abstractUrl": "/journal/td/2022/11/09650723/1zkp1OCIUHS",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09293392",
"articleId": "1pyomiXbJQs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09318559",
"articleId": "1qdT8m4QLIY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1ECXPcJJra0",
"name": "ttg202208-09286513s1-tvcg-3042930-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202208-09286513s1-tvcg-3042930-mm.zip",
"extension": "zip",
"size": "43.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz5apxc",
"title": "July",
"year": "2017",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly8T1",
"doi": "10.1109/TVCG.2016.2570755",
"abstract": "Progressive Visual Analytics aims at improving the interactivity in existing analytics techniques by means of visualization as well as interaction with intermediate results. One key method for data analysis is dimensionality reduction, for example, to produce 2D embeddings that can be visualized and analyzed efficiently. t-Distributed Stochastic Neighbor Embedding (tSNE) is a well-suited technique for the visualization of high-dimensional data. tSNE can create meaningful intermediate results but suffers from a slow initialization that constrains its application in Progressive Visual Analytics. We introduce a controllable tSNE approximation (A-tSNE), which trades off speed and accuracy, to enable interactive data exploration. We offer real-time visualization techniques, including a density-based solution and a Magic Lens to inspect the degree of approximation. With this feedback, the user can decide on local refinements and steer the approximation level during the analysis. We demonstrate our technique with several datasets, in a real-world research scenario and for the real-time analysis of high-dimensional streams to illustrate its effectiveness for interactive data analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Progressive Visual Analytics aims at improving the interactivity in existing analytics techniques by means of visualization as well as interaction with intermediate results. One key method for data analysis is dimensionality reduction, for example, to produce 2D embeddings that can be visualized and analyzed efficiently. t-Distributed Stochastic Neighbor Embedding (tSNE) is a well-suited technique for the visualization of high-dimensional data. tSNE can create meaningful intermediate results but suffers from a slow initialization that constrains its application in Progressive Visual Analytics. We introduce a controllable tSNE approximation (A-tSNE), which trades off speed and accuracy, to enable interactive data exploration. We offer real-time visualization techniques, including a density-based solution and a Magic Lens to inspect the degree of approximation. With this feedback, the user can decide on local refinements and steer the approximation level during the analysis. We demonstrate our technique with several datasets, in a real-world research scenario and for the real-time analysis of high-dimensional streams to illustrate its effectiveness for interactive data analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Progressive Visual Analytics aims at improving the interactivity in existing analytics techniques by means of visualization as well as interaction with intermediate results. One key method for data analysis is dimensionality reduction, for example, to produce 2D embeddings that can be visualized and analyzed efficiently. t-Distributed Stochastic Neighbor Embedding (tSNE) is a well-suited technique for the visualization of high-dimensional data. tSNE can create meaningful intermediate results but suffers from a slow initialization that constrains its application in Progressive Visual Analytics. We introduce a controllable tSNE approximation (A-tSNE), which trades off speed and accuracy, to enable interactive data exploration. We offer real-time visualization techniques, including a density-based solution and a Magic Lens to inspect the degree of approximation. With this feedback, the user can decide on local refinements and steer the approximation level during the analysis. We demonstrate our technique with several datasets, in a real-world research scenario and for the real-time analysis of high-dimensional streams to illustrate its effectiveness for interactive data analysis.",
"title": "Approximated and User Steerable tSNE for Progressive Visual Analytics",
"normalizedTitle": "Approximated and User Steerable tSNE for Progressive Visual Analytics",
"fno": "07473883",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visual Analytics",
"Algorithm Design And Analysis",
"Approximation Algorithms",
"Real Time Systems",
"Computational Complexity",
"High Dimensional Data",
"Dimensionality Reduction",
"Progressive Visual Analytics",
"Approximate Computation"
],
"authors": [
{
"givenName": "Nicola",
"surname": "Pezzotti",
"fullName": "Nicola Pezzotti",
"affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Boudewijn P. F.",
"surname": "Lelieveldt",
"fullName": "Boudewijn P. F. Lelieveldt",
"affiliation": "Division of Image Processing, Department of Radiology, Leiden University Medical Center, Leiden, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Laurens van der",
"surname": "Maaten",
"fullName": "Laurens van der Maaten",
"affiliation": "Pattern Recognition and Bioinformatics group, Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Höllt",
"fullName": "Thomas Höllt",
"affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elmar",
"surname": "Eisemann",
"fullName": "Elmar Eisemann",
"affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anna",
"surname": "Vilanova",
"fullName": "Anna Vilanova",
"affiliation": "Computer Graphics and Visualization group, Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2017-07-01 00:00:00",
"pubType": "trans",
"pages": "1739-1752",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2017/0831/0/0831a422",
"title": "Visual Analytics for Electronic Intelligence: Challenges and Opportunities",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a422/12OmNB7LvBm",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892b495",
"title": "A Role for Reasoning in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892b495/12OmNqJ8tq4",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/01/mcg2008010018",
"title": "An Information-Theoretic View of Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2008/01/mcg2008010018/13rRUB6SpRW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/08/mco2013080090",
"title": "Bixplorer: Visual Analytics with Biclusters",
"doi": null,
"abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019872",
"title": "DeepEyes: Progressive Visual Analytics for Designing Deep Neural Networks",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019872/13rRUxlgxTs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876049",
"title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/02/08462793",
"title": "PANENE: A Progressive Algorithm for Indexing and Querying Approximate <italic>k</italic>-Nearest Neighbors",
"doi": null,
"abstractUrl": "/journal/tg/2020/02/08462793/13w3lozFWqB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08467535",
"title": "Visual Analytics for Topic Model Optimization based on User-Steerable Speculative Execution",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08467535/17D45XeKgtW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08943144",
"title": "ProReveal: Progressive Visual Analytics With Safeguards",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08943144/1g3bi26D34k",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vizsec/2019/3876/0/09161633",
"title": "NetCapVis: Web-based Progressive Visual Analytics for Network Packet Captures",
"doi": null,
"abstractUrl": "/proceedings-article/vizsec/2019/09161633/1m6hHX7VF7y",
"parentPublication": {
"id": "proceedings/vizsec/2019/3876/0",
"title": "2019 IEEE Symposium on Visualization for Cyber Security (VizSec)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07448467",
"articleId": "13rRUygBw7e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07452672",
"articleId": "13rRUx0xPZB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRX4",
"name": "ttg201707-07473883s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201707-07473883s1.zip",
"extension": "zip",
"size": "46.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAtstbv",
"title": "Feb.",
"year": "2020",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13w3lozFWqB",
"doi": "10.1109/TVCG.2018.2869149",
"abstract": "We present PANENE, a progressive algorithm for approximate nearest neighbor indexing and querying. Although the use of k-nearest neighbor (KNN) libraries is common in many data analysis methods, most KNN algorithms can only be queried when the whole dataset has been indexed, i.e., they are not online. Even the few online implementations are not progressive in the sense that the time to index incoming data is not bounded and cannot satisfy the latency requirements of progressive systems. This long latency has significantly limited the use of many machine learning methods, such as t-SNE, in interactive visual analytics. PANENE is a novel algorithm for Progressive Approximate k-NEarest NEighbors, enabling fast KNN queries while continuously indexing new batches of data. Following the progressive computation paradigm, PANENE operations can be bounded in time, allowing analysts to access running results within an interactive latency. PANENE can also incrementally build and maintain a cache data structure, a KNN lookup table, to enable constant-time lookups for KNN queries. Finally, we present three progressive applications of PANENE, such as regression, density estimation, and responsive t-SNE, opening up new opportunities to use complex algorithms in interactive systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present PANENE, a progressive algorithm for approximate nearest neighbor indexing and querying. Although the use of k-nearest neighbor (KNN) libraries is common in many data analysis methods, most KNN algorithms can only be queried when the whole dataset has been indexed, i.e., they are not online. Even the few online implementations are not progressive in the sense that the time to index incoming data is not bounded and cannot satisfy the latency requirements of progressive systems. This long latency has significantly limited the use of many machine learning methods, such as t-SNE, in interactive visual analytics. PANENE is a novel algorithm for Progressive Approximate k-NEarest NEighbors, enabling fast KNN queries while continuously indexing new batches of data. Following the progressive computation paradigm, PANENE operations can be bounded in time, allowing analysts to access running results within an interactive latency. PANENE can also incrementally build and maintain a cache data structure, a KNN lookup table, to enable constant-time lookups for KNN queries. Finally, we present three progressive applications of PANENE, such as regression, density estimation, and responsive t-SNE, opening up new opportunities to use complex algorithms in interactive systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present PANENE, a progressive algorithm for approximate nearest neighbor indexing and querying. Although the use of k-nearest neighbor (KNN) libraries is common in many data analysis methods, most KNN algorithms can only be queried when the whole dataset has been indexed, i.e., they are not online. Even the few online implementations are not progressive in the sense that the time to index incoming data is not bounded and cannot satisfy the latency requirements of progressive systems. This long latency has significantly limited the use of many machine learning methods, such as t-SNE, in interactive visual analytics. PANENE is a novel algorithm for Progressive Approximate k-NEarest NEighbors, enabling fast KNN queries while continuously indexing new batches of data. Following the progressive computation paradigm, PANENE operations can be bounded in time, allowing analysts to access running results within an interactive latency. PANENE can also incrementally build and maintain a cache data structure, a KNN lookup table, to enable constant-time lookups for KNN queries. Finally, we present three progressive applications of PANENE, such as regression, density estimation, and responsive t-SNE, opening up new opportunities to use complex algorithms in interactive systems.",
"title": "PANENE: A Progressive Algorithm for Indexing and Querying Approximate <italic>k</italic>-Nearest Neighbors",
"normalizedTitle": "PANENE: A Progressive Algorithm for Indexing and Querying Approximate k-Nearest Neighbors",
"fno": "08462793",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cache Storage",
"Data Structures",
"Data Visualisation",
"Interactive Systems",
"Learning Artificial Intelligence",
"Query Processing",
"Table Lookup",
"Index Incoming Data",
"Latency Requirements",
"Progressive Systems",
"Machine Learning Methods",
"Interactive Visual Analytics",
"KNN Queries",
"Progressive Computation Paradigm",
"PANENE Operations",
"Interactive Latency",
"Cache Data Structure",
"KNN Lookup Table",
"Constant Time Lookups",
"Complex Algorithms",
"Progressive Algorithm",
"Approximate Nearest Neighbor Indexing",
"Neighbor Libraries",
"Data Analysis Methods",
"Approximate K Nearest Neighbor Querying",
"Approximation Algorithms",
"Visual Analytics",
"Indexing",
"Vegetation",
"Data Structures",
"Data Analysis",
"Approximate Italic Xmlns Ali Http Www Niso Org Schemas Ali 1 0 Xmlns Mml Http Www W 3 Org 1998 Math Math ML Xmlns Xlink Http Www W 3 Org 1999 Xlink Xmlns Xsi Http Www W 3 Org 2001 XML Schema Instance K Italic Nearest Neighbors",
"Progressive Data Analysis",
"Algorithm",
"Real Time"
],
"authors": [
{
"givenName": "Jaemin",
"surname": "Jo",
"fullName": "Jaemin Jo",
"affiliation": "Seoul National University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinwook",
"surname": "Seo",
"fullName": "Jinwook Seo",
"affiliation": "Seoul National University, Seoul, Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean-Daniel",
"surname": "Fekete",
"fullName": "Jean-Daniel Fekete",
"affiliation": "Inria, Le Chesnay, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "02",
"pubDate": "2020-02-01 00:00:00",
"pubType": "trans",
"pages": "1347-1360",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/gcc/2010/4313/0/4313a052",
"title": "A New Classification Algorithm Using Mutual Nearest Neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/gcc/2010/4313a052/12OmNAS9zzh",
"parentPublication": {
"id": "proceedings/gcc/2010/4313/0",
"title": "Grid and Cloud Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsia/2017/2198/0/08339084",
"title": "A progressive k-d tree for approximate k-nearest neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/dsia/2017/08339084/12OmNAXPymK",
"parentPublication": {
"id": "proceedings/dsia/2017/2198/0",
"title": "2017 IEEE Workshop on Data Systems for Interactive Analysis (DSIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsaa/2016/5206/0/07796901",
"title": "Efficient Identification of Tanimoto Nearest Neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/dsaa/2016/07796901/12OmNAY79bI",
"parentPublication": {
"id": "proceedings/dsaa/2016/5206/0",
"title": "2016 IEEE 3rd International Conference on Data Science and Advanced Analytics (DSAA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a666",
"title": "Weighting of the k-Nearest-Neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a666/12OmNBQ2VVf",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micai/2015/0322/0/07429426",
"title": "Diminishing Prototype Size for k-Nearest Neighbors Classification",
"doi": null,
"abstractUrl": "/proceedings-article/micai/2015/07429426/12OmNx6PiCN",
"parentPublication": {
"id": "proceedings/micai/2015/0322/0",
"title": "2015 Fourteenth Mexican International Conference on Artificial Intelligence (MICAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2018/5035/0/08621894",
"title": "Deep Similarity-Enhanced K Nearest Neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2018/08621894/17D45WGGoMJ",
"parentPublication": {
"id": "proceedings/big-data/2018/5035/0",
"title": "2018 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2021/01/08676336",
"title": "K-Nearest Neighbor Search by Random Projection Forests",
"doi": null,
"abstractUrl": "/journal/bd/2021/01/08676336/18Nk65SQcWA",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spw/2019/3508/0/350800a001",
"title": "On the Robustness of Deep K-Nearest Neighbors",
"doi": null,
"abstractUrl": "/proceedings-article/spw/2019/350800a001/1dx8yAChWCc",
"parentPublication": {
"id": "proceedings/spw/2019/3508/0",
"title": "2019 IEEE Security and Privacy Workshops (SPW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300c834",
"title": "K-Nearest Neighbors Hashing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300c834/1gyrLaDZnO0",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378396",
"title": "AutoML for Stream k-Nearest Neighbors Classification",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378396/1s64Z6D2zLO",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08457312",
"articleId": "13Jkr98ynrb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08449116",
"articleId": "13rRUyeCkaq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1i57mIMGDlu",
"name": "ttg202002-08462793s1.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202002-08462793s1.pdf",
"extension": "pdf",
"size": "5.22 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cG6natfOKY",
"doi": "10.1109/TVCG.2019.2934799",
"abstract": "We present a method for data sampling in scatterplots by jointly optimizing point selection for different views or classes. Our method uses space-filling curves (Z-order curves) that partition a point set into subsets that, when covered each by one sample, provide a sampling or coreset with good approximation guarantees in relation to the original point set. For scatterplot matrices with multiple views, different views provide different space-filling curves, leading to different partitions of the given point set. For multi-class scatterplots, the focus on either per-class distribution or global distribution provides two different partitions of the given point set that need to be considered in the selection of the coreset. For both cases, we convert the coreset selection problem into an Exact Cover Problem (ECP), and demonstrate with quantitative and qualitative evaluations that an approximate solution that solves the ECP efficiently is able to provide high-quality samplings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method for data sampling in scatterplots by jointly optimizing point selection for different views or classes. Our method uses space-filling curves (Z-order curves) that partition a point set into subsets that, when covered each by one sample, provide a sampling or coreset with good approximation guarantees in relation to the original point set. For scatterplot matrices with multiple views, different views provide different space-filling curves, leading to different partitions of the given point set. For multi-class scatterplots, the focus on either per-class distribution or global distribution provides two different partitions of the given point set that need to be considered in the selection of the coreset. For both cases, we convert the coreset selection problem into an Exact Cover Problem (ECP), and demonstrate with quantitative and qualitative evaluations that an approximate solution that solves the ECP efficiently is able to provide high-quality samplings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method for data sampling in scatterplots by jointly optimizing point selection for different views or classes. Our method uses space-filling curves (Z-order curves) that partition a point set into subsets that, when covered each by one sample, provide a sampling or coreset with good approximation guarantees in relation to the original point set. For scatterplot matrices with multiple views, different views provide different space-filling curves, leading to different partitions of the given point set. For multi-class scatterplots, the focus on either per-class distribution or global distribution provides two different partitions of the given point set that need to be considered in the selection of the coreset. For both cases, we convert the coreset selection problem into an Exact Cover Problem (ECP), and demonstrate with quantitative and qualitative evaluations that an approximate solution that solves the ECP efficiently is able to provide high-quality samplings.",
"title": "Data Sampling in Multi-view and Multi-class Scatterplots via Set Cover Optimization",
"normalizedTitle": "Data Sampling in Multi-view and Multi-class Scatterplots via Set Cover Optimization",
"fno": "08807244",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Feature Selection",
"Matrix Algebra",
"Optimisation",
"Pattern Classification",
"Sampling Methods",
"Set Theory",
"Coreset Selection Problem",
"Exact Cover Problem",
"Data Sampling",
"Multiclass Scatterplots",
"Point Selection",
"Z Order Curves",
"Scatterplot Matrices",
"Space Filling Curves",
"Multiview Scatterplots",
"Set Cover Optimization",
"Data Visualization",
"Sampling Methods",
"Visualization",
"Optimization",
"Kernel",
"Two Dimensional Displays",
"Image Color Analysis",
"Measurement",
"Sampling",
"Scatterplot",
"SPLOM",
"Exact Cover Problem"
],
"authors": [
{
"givenName": "Ruizhen",
"surname": "Hu",
"fullName": "Ruizhen Hu",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tingkai",
"surname": "Sha",
"fullName": "Tingkai Sha",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oliver",
"surname": "Van Kaick",
"fullName": "Oliver Van Kaick",
"affiliation": "Carleton University, School of Computer Science, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oliver",
"surname": "Deussen",
"fullName": "Oliver Deussen",
"affiliation": "Konstanz University, Germany and Shenzhen VisuCA Key Lab, SIAT, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Huang",
"fullName": "Hui Huang",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "739-748",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a080",
"title": "A Nested Hierarchy of Localized Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a080/12OmNy7h3e0",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08447558",
"title": "Immersive Visualization of Abstract Information: An Evaluation on Dimensionally-Reduced Data Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08447558/13bd1tMztYK",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875982",
"title": "Visual Abstraction and Exploration of Multi-class Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875982/13rRUygT7ye",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440853",
"title": "Optimizing Color Assignment for Perception of Class Separability in Multiclass Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440853/17D45VTRoxJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809844",
"title": "A Recursive Subdivision Technique for Sampling Multi-class Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809844/1cHEfHRrSOQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a082",
"title": "Scatterplot Summarization by Constructing Fast and Robust Principal Graphs from Skeletons",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a082/1cMF8150We4",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a062",
"title": "A Technique for Selection and Drawing of Scatterplots for Multi-Dimensional Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a062/1cMF8TTAeAw",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08848845",
"title": "Winglets: Visualizing Association with Uncertainty in Multi-class Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08848845/1dC4K1H2UBG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222253",
"title": "Uncertainty in Continuous Scatterplots, Continuous Parallel Coordinates, and Fibers",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222253/1nTrrxWmyqs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09226404",
"title": "Evaluation of Sampling Methods for Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09226404/1nYqk0TjyeY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08809844",
"articleId": "1cHEfHRrSOQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08809850",
"articleId": "1cHEkrFpU76",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nYqk0TjyeY",
"doi": "10.1109/TVCG.2020.3030432",
"abstract": "Given a scatterplot with tens of thousands of points or even more, a natural question is which sampling method should be used to create a small but “good” scatterplot for a better abstraction. We present the results of a user study that investigates the influence of different sampling strategies on multi-class scatterplots. The main goal of this study is to understand the capability of sampling methods in preserving the density, outliers, and overall shape of a scatterplot. To this end, we comprehensively review the literature and select seven typical sampling strategies as well as eight representative datasets. We then design four experiments to understand the performance of different strategies in maintaining: 1) region density; 2) class density; 3) outliers; and 4) overall shape in the sampling results. The results show that: 1) random sampling is preferred for preserving region density; 2) blue noise sampling and random sampling have comparable performance with the three multi-class sampling strategies in preserving class density; 3) outlier biased density based sampling, recursive subdivision based sampling, and blue noise sampling perform the best in keeping outliers; and 4) blue noise sampling outperforms the others in maintaining the overall shape of a scatterplot.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Given a scatterplot with tens of thousands of points or even more, a natural question is which sampling method should be used to create a small but “good” scatterplot for a better abstraction. We present the results of a user study that investigates the influence of different sampling strategies on multi-class scatterplots. The main goal of this study is to understand the capability of sampling methods in preserving the density, outliers, and overall shape of a scatterplot. To this end, we comprehensively review the literature and select seven typical sampling strategies as well as eight representative datasets. We then design four experiments to understand the performance of different strategies in maintaining: 1) region density; 2) class density; 3) outliers; and 4) overall shape in the sampling results. The results show that: 1) random sampling is preferred for preserving region density; 2) blue noise sampling and random sampling have comparable performance with the three multi-class sampling strategies in preserving class density; 3) outlier biased density based sampling, recursive subdivision based sampling, and blue noise sampling perform the best in keeping outliers; and 4) blue noise sampling outperforms the others in maintaining the overall shape of a scatterplot.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Given a scatterplot with tens of thousands of points or even more, a natural question is which sampling method should be used to create a small but “good” scatterplot for a better abstraction. We present the results of a user study that investigates the influence of different sampling strategies on multi-class scatterplots. The main goal of this study is to understand the capability of sampling methods in preserving the density, outliers, and overall shape of a scatterplot. To this end, we comprehensively review the literature and select seven typical sampling strategies as well as eight representative datasets. We then design four experiments to understand the performance of different strategies in maintaining: 1) region density; 2) class density; 3) outliers; and 4) overall shape in the sampling results. The results show that: 1) random sampling is preferred for preserving region density; 2) blue noise sampling and random sampling have comparable performance with the three multi-class sampling strategies in preserving class density; 3) outlier biased density based sampling, recursive subdivision based sampling, and blue noise sampling perform the best in keeping outliers; and 4) blue noise sampling outperforms the others in maintaining the overall shape of a scatterplot.",
"title": "Evaluation of Sampling Methods for Scatterplots",
"normalizedTitle": "Evaluation of Sampling Methods for Scatterplots",
"fno": "09226404",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Sampling Methods",
"Outlier Biased Density Based Sampling",
"Scatterplot",
"Recursive Subdivision Based Sampling",
"Class Density",
"Multiclass Sampling Strategies",
"Random Sampling",
"Sampling Results",
"Multiclass Scatterplots",
"Sampling Method",
"Blue Noise Sampling",
"Sampling Methods",
"Task Analysis",
"Shape",
"Data Visualization",
"Visualization",
"Bibliographies",
"Scalability",
"Scatterplot",
"Data Sampling",
"Empirical Evaluation"
],
"authors": [
{
"givenName": "Jun",
"surname": "Yuan",
"fullName": "Jun Yuan",
"affiliation": "BNRist, Tsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shouxing",
"surname": "Xiang",
"fullName": "Shouxing Xiang",
"affiliation": "BNRist, Tsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiazhi",
"surname": "Xia",
"fullName": "Jiazhi Xia",
"affiliation": "Central South University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lingyun",
"surname": "Yu",
"fullName": "Lingyun Yu",
"affiliation": "Xi'an Jiaotong-Liverpool University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shixia",
"surname": "Liu",
"fullName": "Shixia Liu",
"affiliation": "BNRist, Tsinghua University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1720-1730",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/07/ttg2013071143",
"title": "Generalized Anisotropic Stratified Surface Sampling",
"doi": null,
"abstractUrl": "/journal/tg/2013/07/ttg2013071143/13rRUILc8fc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/10/ttg2012101784",
"title": "Variational Blue Noise Sampling",
"doi": null,
"abstractUrl": "/journal/tg/2012/10/ttg2012101784/13rRUILtJqQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539318",
"title": "Evaluation of Graph Sampling: A Visualization Perspective",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539318/13rRUxZzAhI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875982",
"title": "Visual Abstraction and Exploration of Multi-class Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875982/13rRUygT7ye",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse-euc/2017/3220/2/08005978",
"title": "A Blue Noise Pattern Sampling Methods to Prevent Aliasing",
"doi": null,
"abstractUrl": "/proceedings-article/cse-euc/2017/08005978/17D45XDIXUy",
"parentPublication": {
"id": "proceedings/cse-euc/2017/3220/2",
"title": "2017 IEEE International Conference on Computational Science and Engineering (CSE) and IEEE International Conference on Embedded and Ubiquitous Computing (EUC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807244",
"title": "Data Sampling in Multi-view and Multi-class Scatterplots via Set Cover Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807244/1cG6natfOKY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809844",
"title": "A Recursive Subdivision Technique for Sampling Multi-class Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809844/1cHEfHRrSOQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a062",
"title": "A Technique for Selection and Drawing of Scatterplots for Multi-Dimensional Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a062/1cMF8TTAeAw",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09495208",
"title": "Visual Clustering Factors in Scatterplots",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09495208/1vyjCkbMBvW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552916",
"title": "Pyramid-based Scatterplots Sampling for Progressive and Streaming Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552916/1xic8Hlfu4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09224191",
"articleId": "1nV59fPyCPe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09222249",
"articleId": "1nTroT3Yn72",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLefbkiM92",
"name": "ttg202102-09226404s1-tvcg-3030432-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09226404s1-tvcg-3030432-mm.zip",
"extension": "zip",
"size": "3.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwFid7w",
"title": "Jan.",
"year": "2019",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "17D45XH89nI",
"doi": "10.1109/TVCG.2018.2864491",
"abstract": "Labeling is intrinsically important for exploring and understanding complex environments and models in a variety of domains. We present a method for interactive labeling of crowded 3D scenes containing very many instances of objects spanning multiple scales in size. In contrast to previous labeling methods, we target cases where many instances of dozens of types are present and where the hierarchical structure of the objects in the scene presents an opportunity to choose the most suitable level for each placed label. Our solution builds on and goes beyond labeling techniques in medical 3D visualization, cartography, and biological illustrations from books and prints. In contrast to these techniques, the main characteristics of our new technique are: 1) a novel way of labeling objects as part of a bigger structure when appropriate, 2) visual clutter reduction by labeling only representative instances for each type of an object, and a strategy of selecting those. The appropriate level of label is chosen by analyzing the scene's depth buffer and the scene objects' hierarchy tree. We address the topic of communicating the parent-children relationship between labels by employing visual hierarchy concepts adapted from graphic design. Selecting representative instances considers several criteria tailored to the character of the data and is combined with a greedy optimization approach. We demonstrate the usage of our method with models from mesoscale biology where these two characteristics-multi-scale and multi-instance-are abundant, along with the fact that these scenes are extraordinarily dense.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Labeling is intrinsically important for exploring and understanding complex environments and models in a variety of domains. We present a method for interactive labeling of crowded 3D scenes containing very many instances of objects spanning multiple scales in size. In contrast to previous labeling methods, we target cases where many instances of dozens of types are present and where the hierarchical structure of the objects in the scene presents an opportunity to choose the most suitable level for each placed label. Our solution builds on and goes beyond labeling techniques in medical 3D visualization, cartography, and biological illustrations from books and prints. In contrast to these techniques, the main characteristics of our new technique are: 1) a novel way of labeling objects as part of a bigger structure when appropriate, 2) visual clutter reduction by labeling only representative instances for each type of an object, and a strategy of selecting those. The appropriate level of label is chosen by analyzing the scene's depth buffer and the scene objects' hierarchy tree. We address the topic of communicating the parent-children relationship between labels by employing visual hierarchy concepts adapted from graphic design. Selecting representative instances considers several criteria tailored to the character of the data and is combined with a greedy optimization approach. We demonstrate the usage of our method with models from mesoscale biology where these two characteristics-multi-scale and multi-instance-are abundant, along with the fact that these scenes are extraordinarily dense.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Labeling is intrinsically important for exploring and understanding complex environments and models in a variety of domains. We present a method for interactive labeling of crowded 3D scenes containing very many instances of objects spanning multiple scales in size. In contrast to previous labeling methods, we target cases where many instances of dozens of types are present and where the hierarchical structure of the objects in the scene presents an opportunity to choose the most suitable level for each placed label. Our solution builds on and goes beyond labeling techniques in medical 3D visualization, cartography, and biological illustrations from books and prints. In contrast to these techniques, the main characteristics of our new technique are: 1) a novel way of labeling objects as part of a bigger structure when appropriate, 2) visual clutter reduction by labeling only representative instances for each type of an object, and a strategy of selecting those. The appropriate level of label is chosen by analyzing the scene's depth buffer and the scene objects' hierarchy tree. We address the topic of communicating the parent-children relationship between labels by employing visual hierarchy concepts adapted from graphic design. Selecting representative instances considers several criteria tailored to the character of the data and is combined with a greedy optimization approach. We demonstrate the usage of our method with models from mesoscale biology where these two characteristics-multi-scale and multi-instance-are abundant, along with the fact that these scenes are extraordinarily dense.",
"title": "Labels on Levels: Labeling of Multi-Scale Multi-Instance and Crowded 3D Biological Environments",
"normalizedTitle": "Labels on Levels: Labeling of Multi-Scale Multi-Instance and Crowded 3D Biological Environments",
"fno": "08440077",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Biology Computing",
"Data Visualisation",
"Greedy Algorithms",
"Interactive Systems",
"Optimisation",
"3 D Biological Environments",
"Interactive Labeling",
"Placed Label",
"Labeling Techniques",
"Medical 3 D Visualization",
"Representative Instances",
"Object Labeling",
"Multiscale Multiinstance Labeling",
"Crowded 3 D Biological Environments",
"Greedy Optimization Approach",
"Mesoscale Biology",
"Scene Object Hierarchy Tree",
"Scene Depth Buffer",
"Visual Clutter Reduction",
"Crowded 3 D Scenes",
"Three Dimensional Displays",
"Labeling",
"Proteins",
"Visualization",
"Blood",
"Biological System Modeling",
"Labeling",
"Multi Scale Data",
"Multi Instance Data"
],
"authors": [
{
"givenName": "David",
"surname": "Kouřil",
"fullName": "David Kouřil",
"affiliation": "TU Wien",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ladislav",
"surname": "Čmolík",
"fullName": "Ladislav Čmolík",
"affiliation": "Faculty of Electrical Engineering, Czech Technical University, Prague",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Barbora",
"surname": "Kozlíková",
"fullName": "Barbora Kozlíková",
"affiliation": "Masaryk University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hslanc-Yun",
"surname": "Wu",
"fullName": "Hslanc-Yun Wu",
"affiliation": "TU Wien",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Graham",
"surname": "Johnson",
"fullName": "Graham Johnson",
"affiliation": "Allen Institute for Cell Science",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David S.",
"surname": "Goodsell",
"fullName": "David S. Goodsell",
"affiliation": "The Scripps Research Institute",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Arthur",
"surname": "Olson",
"fullName": "Arthur Olson",
"affiliation": "The Scripps Research Institute",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M. Eduard",
"surname": "Gröller",
"fullName": "M. Eduard Gröller",
"affiliation": "TU Wien",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ivan",
"surname": "Viola",
"fullName": "Ivan Viola",
"affiliation": "TU Wien",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2019-01-01 00:00:00",
"pubType": "trans",
"pages": "977-986",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2015/6683/0/6683b068",
"title": "Semantic Instance Labeling Leveraging Hierarchical Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683b068/12OmNBTawwj",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b577",
"title": "Instance-Aware Detailed Action Labeling in Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b577/12OmNyXMQc6",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019851",
"title": "Comparing Visual-Interactive Labeling with Active Learning: An Experimental Study",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019851/13rRUxBrGh7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797804",
"title": "Semantic Labeling and Object Registration for Augmented Reality Language Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797804/1cJ0NXcPJGo",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2019/1651/0/08935672",
"title": "Multi-label Classification Method Based on the Labeling-Importance Degree",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2019/08935672/1fThfLHMG7m",
"parentPublication": {
"id": "proceedings/snpd/2019/1651/0",
"title": "2019 20th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1599",
"title": "KeyPose: Multi-View 3D Labeling and Keypoint Estimation for Transparent Objects",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1599/1m3nOCwrA8o",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09207965",
"title": "Mixed Labeling: Integrating Internal and External Labels",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09207965/1nuwBNaxzjy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a099",
"title": "Shooting Labels: 3D Semantic Labeling by Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a099/1qpzznqhVHW",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/04/09437800",
"title": "Collaborative VR-Based 3D Labeling of Live-Captured Scenes by Remote Users",
"doi": null,
"abstractUrl": "/magazine/cg/2021/04/09437800/1tL6FQbaHG8",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552249",
"title": "Rapid Labels: Point-Feature Labeling on GPU",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552249/1xicaqbwmA0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08494830",
"articleId": "17D45XeKgwi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08456856",
"articleId": "17D45Xbl4Qi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1i4QDFLlOlW",
"name": "ttg201901-08440077s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201901-08440077s1.mp4",
"extension": "mp4",
"size": "134 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1BhzoX5mYSY",
"title": "April",
"year": "2022",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nuwBNaxzjy",
"doi": "10.1109/TVCG.2020.3027368",
"abstract": "In this article, we present an algorithm capable of mixed labeling of 2D and 3D objects. In mixed labeling, the given objects are labeled with both internal labels placed (at least partially) over the objects and external labels placed in the space around the objects and connected with the labeled objects with straight-line leaders. The proposed algorithm determines the position and type of each label based on the user-specified ambiguity threshold and eliminates overlaps between the labels, as well as between the internal labels and the straight-line leaders of external labels. The algorithm is a screen-space technique; it operates in an image where the 2D objects or projected 3D objects are encoded. In other words, we can use the algorithm whenever we can render the objects to an image, which makes the algorithm fit for use in many domains. The algorithm operates in real-time, giving the results immediately. Finally, we present results from an expert evaluation, in which a professional illustrator has evaluated the label layouts produced with the proposed algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this article, we present an algorithm capable of mixed labeling of 2D and 3D objects. In mixed labeling, the given objects are labeled with both internal labels placed (at least partially) over the objects and external labels placed in the space around the objects and connected with the labeled objects with straight-line leaders. The proposed algorithm determines the position and type of each label based on the user-specified ambiguity threshold and eliminates overlaps between the labels, as well as between the internal labels and the straight-line leaders of external labels. The algorithm is a screen-space technique; it operates in an image where the 2D objects or projected 3D objects are encoded. In other words, we can use the algorithm whenever we can render the objects to an image, which makes the algorithm fit for use in many domains. The algorithm operates in real-time, giving the results immediately. Finally, we present results from an expert evaluation, in which a professional illustrator has evaluated the label layouts produced with the proposed algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this article, we present an algorithm capable of mixed labeling of 2D and 3D objects. In mixed labeling, the given objects are labeled with both internal labels placed (at least partially) over the objects and external labels placed in the space around the objects and connected with the labeled objects with straight-line leaders. The proposed algorithm determines the position and type of each label based on the user-specified ambiguity threshold and eliminates overlaps between the labels, as well as between the internal labels and the straight-line leaders of external labels. The algorithm is a screen-space technique; it operates in an image where the 2D objects or projected 3D objects are encoded. In other words, we can use the algorithm whenever we can render the objects to an image, which makes the algorithm fit for use in many domains. The algorithm operates in real-time, giving the results immediately. Finally, we present results from an expert evaluation, in which a professional illustrator has evaluated the label layouts produced with the proposed algorithm.",
"title": "Mixed Labeling: Integrating Internal and External Labels",
"normalizedTitle": "Mixed Labeling: Integrating Internal and External Labels",
"fno": "09207965",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Geometry",
"Data Visualisation",
"Image Segmentation",
"Rendering Computer Graphics",
"Solid Modelling",
"Mixed Labeling",
"External Labels",
"Algorithm Capable",
"Given Objects",
"Internal Labels",
"Labeled Objects",
"Straight Line Leaders",
"Label Layouts",
"Labeling",
"Layout",
"Three Dimensional Displays",
"Shape",
"Solid Modeling",
"Two Dimensional Displays",
"Labeling",
"Mixed Labeling",
"Internal Labeling",
"External Labeling",
"Expert Evaluation"
],
"authors": [
{
"givenName": "Ladislav",
"surname": "Čmolík",
"fullName": "Ladislav Čmolík",
"affiliation": "Faculty of Electrical Engineering, Czech Technical University in Prague, Prague, Czechia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Václav",
"surname": "Pavlovec",
"fullName": "Václav Pavlovec",
"affiliation": "Faculty of Electrical Engineering, Czech Technical University in Prague, Prague, Czechia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hsiang-Yun",
"surname": "Wu",
"fullName": "Hsiang-Yun Wu",
"affiliation": "Institute of Visual Computing and Human-Centered Technology, TU Wien, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Martin",
"surname": "Nöllenburg",
"fullName": "Martin Nöllenburg",
"affiliation": "Institute of Logic and Computation, TU Wien, Vienna, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2022-04-01 00:00:00",
"pubType": "trans",
"pages": "1848-1861",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/apvis/2007/0808/0/04126220",
"title": "Many-to-one boundary labeling",
"doi": null,
"abstractUrl": "/proceedings-article/apvis/2007/04126220/12OmNBlFR2L",
"parentPublication": {
"id": "proceedings/apvis/2007/0808/0",
"title": "Asia-Pacific Symposium on Visualisation 2007",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802046",
"title": "Hedgehog labeling: View management techniques for external labels in 3D space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802046/12OmNzTppDb",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892321",
"title": "Evaluation of labelling layout methods in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892321/12OmNzZWbHq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/07/08355684",
"title": "Real-Time External Labeling of Ghosted Views",
"doi": null,
"abstractUrl": "/journal/tg/2019/07/08355684/13rRUygT7sM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a523",
"title": "Fine-Level Semantic Labeling of Large-Scale 3D Model by Active Learning",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a523/17D45WHONjT",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440077",
"title": "Labels on Levels: Labeling of Multi-Scale Multi-Instance and Crowded 3D Biological Environments",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440077/17D45XH89nI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a019",
"title": "3D Visualization of Network Including Nodes with Labels",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a019/1cMFcawCItW",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a099",
"title": "Shooting Labels: 3D Semantic Labeling by Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a099/1qpzznqhVHW",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552249",
"title": "Rapid Labels: Point-Feature Labeling on GPU",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552249/1xicaqbwmA0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09645242",
"title": "Labeling Out-of-View Objects in Immersive Analytics to Support Situated Visual Searching",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09645242/1zc6DjegSGY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09210753",
"articleId": "1nzvbOKX280",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09209171",
"articleId": "1nwbhfo8G52",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1BhzuveRNAI",
"name": "ttg202204-09207965s1-tvcg-3027368-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202204-09207965s1-tvcg-3027368-mm.zip",
"extension": "zip",
"size": "27.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1vg3jumzhF6",
"title": "July-Aug.",
"year": "2021",
"issueNum": "04",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "41",
"label": "July-Aug.",
"downloadables": {
"hasCover": true,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1tL6FQbaHG8",
"doi": "10.1109/MCG.2021.3082267",
"abstract": "Previous work on interactive 3D labeling focused on improving user experience based on virtual/augmented reality and, thereby, speeding-up the labeling of scenes. In this article, we present a novel interactive, collaborative VR-based 3D labeling system for live-captured scenes by multiple remotely connected users based on sparse multi-user input with automatic label propagation and completion. Hence, our system is particularly beneficial in the case of multiple users that are able to label different scene parts from the respectively adequate views in parallel. Our proposed system relies on 1) the RGB-D capture of an environment by a user, 2) a reconstruction client that integrates this stream into a 3D model, 3) a server that gets scene updates and manages the global 3D scene model as well as client requests and the integration/propagation of labels, 4) labeling clients that allow an independent VR-based scene exploration and labeling for each user, and 5) remotely connected users that provide a sparse 3D labeling used to control the label propagation over objects and the label prediction to other scene parts. Our evaluation demonstrates the intuitive collaborative 3D labeling experience as well as its capability to meet the efficiency constraints regarding reconstruction speed, data streaming, visualization, and labeling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Previous work on interactive 3D labeling focused on improving user experience based on virtual/augmented reality and, thereby, speeding-up the labeling of scenes. In this article, we present a novel interactive, collaborative VR-based 3D labeling system for live-captured scenes by multiple remotely connected users based on sparse multi-user input with automatic label propagation and completion. Hence, our system is particularly beneficial in the case of multiple users that are able to label different scene parts from the respectively adequate views in parallel. Our proposed system relies on 1) the RGB-D capture of an environment by a user, 2) a reconstruction client that integrates this stream into a 3D model, 3) a server that gets scene updates and manages the global 3D scene model as well as client requests and the integration/propagation of labels, 4) labeling clients that allow an independent VR-based scene exploration and labeling for each user, and 5) remotely connected users that provide a sparse 3D labeling used to control the label propagation over objects and the label prediction to other scene parts. Our evaluation demonstrates the intuitive collaborative 3D labeling experience as well as its capability to meet the efficiency constraints regarding reconstruction speed, data streaming, visualization, and labeling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Previous work on interactive 3D labeling focused on improving user experience based on virtual/augmented reality and, thereby, speeding-up the labeling of scenes. In this article, we present a novel interactive, collaborative VR-based 3D labeling system for live-captured scenes by multiple remotely connected users based on sparse multi-user input with automatic label propagation and completion. Hence, our system is particularly beneficial in the case of multiple users that are able to label different scene parts from the respectively adequate views in parallel. Our proposed system relies on 1) the RGB-D capture of an environment by a user, 2) a reconstruction client that integrates this stream into a 3D model, 3) a server that gets scene updates and manages the global 3D scene model as well as client requests and the integration/propagation of labels, 4) labeling clients that allow an independent VR-based scene exploration and labeling for each user, and 5) remotely connected users that provide a sparse 3D labeling used to control the label propagation over objects and the label prediction to other scene parts. Our evaluation demonstrates the intuitive collaborative 3D labeling experience as well as its capability to meet the efficiency constraints regarding reconstruction speed, data streaming, visualization, and labeling.",
"title": "Collaborative VR-Based 3D Labeling of Live-Captured Scenes by Remote Users",
"normalizedTitle": "Collaborative VR-Based 3D Labeling of Live-Captured Scenes by Remote Users",
"fno": "09437800",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Augmented Reality",
"Client Server Systems",
"Image Reconstruction",
"Virtual Reality",
"User Experience",
"Collaborative VR Based 3 D",
"Live Captured Scenes",
"Multiple Remotely Connected Users",
"Sparse Multiuser Input",
"Automatic Label Propagation",
"Multiple Users",
"Different Scene Parts",
"Global 3 D Scene Model",
"Independent VR Based Scene Exploration",
"Label Prediction",
"Intuitive Collaborative 3 D",
"Labeling",
"Three Dimensional Displays",
"Semantics",
"Solid Modeling",
"Image Reconstruction",
"Annotations",
"Collaboration"
],
"authors": [
{
"givenName": "Domenic",
"surname": "Zingsheim",
"fullName": "Domenic Zingsheim",
"affiliation": "Institute of Computer Science II, University of Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Patrick",
"surname": "Stotko",
"fullName": "Patrick Stotko",
"affiliation": "Institute of Computer Science II, University of Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefan",
"surname": "Krumpen",
"fullName": "Stefan Krumpen",
"affiliation": "Institute of Computer Science II, University of Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Weinmann",
"fullName": "Michael Weinmann",
"affiliation": "Institute of Computer Science II, University of Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Reinhard",
"surname": "Klein",
"fullName": "Reinhard Klein",
"affiliation": "Institute of Computer Science II, University of Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2021-07-01 00:00:00",
"pubType": "mags",
"pages": "90-98",
"year": "2021",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fskd/2009/3735/5/3735e292",
"title": "A Complete Label Set for 3D-sketch Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2009/3735e292/12OmNzlUKBC",
"parentPublication": {
"id": "proceedings/fskd/2009/3735/5",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836031",
"title": "Object co-labeling in multiple images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836031/12OmNzwHvch",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545378",
"title": "3D Geometry-Aware Semantic Labeling of Outdoor Street Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545378/17D45VtKiwd",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a523",
"title": "Fine-Level Semantic Labeling of Large-Scale 3D Model by Active Learning",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a523/17D45WHONjT",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a662",
"title": "A Data-Driven Prior on Facet Orientation for Semantic Mesh Labeling",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a662/17D45WgziON",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2018/9159/0/08594936",
"title": "Estimating Latent Relative Labeling Importances for Multi-label Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2018/08594936/17D45WrVgdl",
"parentPublication": {
"id": "proceedings/icdm/2018/9159/0",
"title": "2018 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200b588",
"title": "Active Semantic Labeling of Street View Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200b588/1cdOTDqbIha",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09207965",
"title": "Mixed Labeling: Integrating Internal and External Labels",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09207965/1nuwBNaxzjy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a099",
"title": "Shooting Labels: 3D Semantic Labeling by Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a099/1qpzznqhVHW",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552249",
"title": "Rapid Labels: Point-Feature Labeling on GPU",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552249/1xicaqbwmA0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09384236",
"articleId": "1scDA5NYISI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09487520",
"articleId": "1vg3jOq7WvK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1y2FkV9ZFKM",
"title": "Nov.",
"year": "2021",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1wpqs1dtKes",
"doi": "10.1109/TVCG.2021.3106492",
"abstract": "A common approach for Augmented Reality labeling is to display the label text on a flag planted into the real world element at a 3D anchor point. When there are more than just a few labels, the efficiency of the interface decreases as the user has to search for a given label sequentially. The search can be accelerated by sorting the labels alphabetically, but sorting all labels results in long and intersecting leader lines from the anchor points to the labels. This paper proposes a partially-sorted concentric label layout that leverages the search efficiency of sorting while avoiding the label display problems of long or intersecting leader lines. The labels are partitioned into a small number of sorted sequences displayed on circles of increasing radii. Since the labels on a circle are sorted, the user can quickly search each circle. A tight upper bound derived from circular permutation theory limits the number of circles and thereby the complexity of the label layout. For example, 12 labels require at most three circles. When the application allows it, the labels are presorted to further reduce the number of circles in the layout. The layout was tested in a user study where it significantly reduced the label searching time compared to a conventional single-circle layout.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A common approach for Augmented Reality labeling is to display the label text on a flag planted into the real world element at a 3D anchor point. When there are more than just a few labels, the efficiency of the interface decreases as the user has to search for a given label sequentially. The search can be accelerated by sorting the labels alphabetically, but sorting all labels results in long and intersecting leader lines from the anchor points to the labels. This paper proposes a partially-sorted concentric label layout that leverages the search efficiency of sorting while avoiding the label display problems of long or intersecting leader lines. The labels are partitioned into a small number of sorted sequences displayed on circles of increasing radii. Since the labels on a circle are sorted, the user can quickly search each circle. A tight upper bound derived from circular permutation theory limits the number of circles and thereby the complexity of the label layout. For example, 12 labels require at most three circles. When the application allows it, the labels are presorted to further reduce the number of circles in the layout. The layout was tested in a user study where it significantly reduced the label searching time compared to a conventional single-circle layout.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A common approach for Augmented Reality labeling is to display the label text on a flag planted into the real world element at a 3D anchor point. When there are more than just a few labels, the efficiency of the interface decreases as the user has to search for a given label sequentially. The search can be accelerated by sorting the labels alphabetically, but sorting all labels results in long and intersecting leader lines from the anchor points to the labels. This paper proposes a partially-sorted concentric label layout that leverages the search efficiency of sorting while avoiding the label display problems of long or intersecting leader lines. The labels are partitioned into a small number of sorted sequences displayed on circles of increasing radii. Since the labels on a circle are sorted, the user can quickly search each circle. A tight upper bound derived from circular permutation theory limits the number of circles and thereby the complexity of the label layout. For example, 12 labels require at most three circles. When the application allows it, the labels are presorted to further reduce the number of circles in the layout. The layout was tested in a user study where it significantly reduced the label searching time compared to a conventional single-circle layout.",
"title": "A Partially-Sorted Concentric Layout for Efficient Label Localization in Augmented Reality",
"normalizedTitle": "A Partially-Sorted Concentric Layout for Efficient Label Localization in Augmented Reality",
"fno": "09523830",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Computational Complexity",
"Data Visualisation",
"Sorting",
"Text Analysis",
"Efficient Label Localization",
"Augmented Reality Labeling",
"Label Text",
"Given Label",
"Labels Results",
"Partially Sorted Concentric Label Layout",
"Label Display Problems",
"Circle",
"Partially Sorted Concentric Layout",
"Layout",
"Sorting",
"Annotations",
"Search Problems",
"Labeling",
"Gaze Tracking",
"Augmented Reality",
"Label Layout",
"Fast Label Finding"
],
"authors": [
{
"givenName": "Zijing",
"surname": "Zhou",
"fullName": "Zijing Zhou",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, School of Computer Science and Engineering, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lili",
"surname": "Wang",
"fullName": "Lili Wang",
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems, Beihang University, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Voicu",
"surname": "Popescu",
"fullName": "Voicu Popescu",
"affiliation": "Purdue University, U.S.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2021-11-01 00:00:00",
"pubType": "trans",
"pages": "4087-4096",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2015/6879/0/07156379",
"title": "Clutter-aware label layout",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156379/12OmNyY4rqE",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600o4136",
"title": "Large Loss Matters in Weakly Supervised Multi-Label Classification",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600o4136/1H1lPFhwti0",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09999508",
"title": "Multi-View Partial Multi-Label Learning via Graph-Fusion-Based Label Enhancement",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09999508/1JrMyxvllsY",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a441",
"title": "Label Guidance based Object Locating in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a441/1JrRbIVIzPG",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbd/2022/0971/0/097100a116",
"title": "Label Enhancement with Sample Correlation via Sparse Representation",
"doi": null,
"abstractUrl": "/proceedings-article/cbd/2022/097100a116/1KdZhjYjh0Q",
"parentPublication": {
"id": "proceedings/cbd/2022/0971/0",
"title": "2022 Tenth International Conference on Advanced Cloud and Big Data (CBD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300b713",
"title": "An Effective Approach for Multi-label Classification with Missing Labels",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300b713/1LSPGZyQHio",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0",
"title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/03/09171331",
"title": "Incomplete Label Multiple Instance Multiple Label Learning",
"doi": null,
"abstractUrl": "/journal/tp/2022/03/09171331/1mq8fINsglW",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/07/09354590",
"title": "Partial Multi-Label Learning With Noisy Label Identification",
"doi": null,
"abstractUrl": "/journal/tp/2022/07/09354590/1reXib2cwWk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/02/09465741",
"title": "Fast Label Enhancement for Label Distribution Learning",
"doi": null,
"abstractUrl": "/journal/tk/2023/02/09465741/1uIR9VwB7Xy",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a933",
"title": "Multi-Label Learning from Single Positive Labels",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a933/1yeIVTYm12g",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09591492",
"articleId": "1y2FvGMxBuM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09523888",
"articleId": "1wpqxgia3Vm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1y2Frg4eSBy",
"name": "ttg202111-09523830s1-supp1-3106492.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202111-09523830s1-supp1-3106492.mp4",
"extension": "mp4",
"size": "76 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cJj4SRFHeE",
"doi": "10.1109/TVCG.2019.2934307",
"abstract": "In recent years the t-distributed Stochastic Neighbor Embedding (t-SNE) algorithm has become one of the most used and insightful techniques for exploratory data analysis of high-dimensional data. It reveals clusters of high-dimensional data points at different scales while only requiring minimal tuning of its parameters. However, the computational complexity of the algorithm limits its application to relatively small datasets. To address this problem, several evolutions of t-SNE have been developed in recent years, mainly focusing on the scalability of the similarity computations between data points. However, these contributions are insufficient to achieve interactive rates when visualizing the evolution of the t-SNE embedding for large datasets. In this work, we present a novel approach to the minimization of the t-SNE objective function that heavily relies on graphics hardware and has linear computational complexity. Our technique decreases the computational cost of running t-SNE on datasets by orders of magnitude and retains or improves on the accuracy of past approximated techniques. We propose to approximate the repulsive forces between data points by splatting kernel textures for each data point. This approximation allows us to reformulate the t-SNE minimization problem as a series of tensor operations that can be efficiently executed on the graphics card. An efficient implementation of our technique is integrated and available for use in the widely used Google TensorFlow.js, and an open-source C++ library.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years the t-distributed Stochastic Neighbor Embedding (t-SNE) algorithm has become one of the most used and insightful techniques for exploratory data analysis of high-dimensional data. It reveals clusters of high-dimensional data points at different scales while only requiring minimal tuning of its parameters. However, the computational complexity of the algorithm limits its application to relatively small datasets. To address this problem, several evolutions of t-SNE have been developed in recent years, mainly focusing on the scalability of the similarity computations between data points. However, these contributions are insufficient to achieve interactive rates when visualizing the evolution of the t-SNE embedding for large datasets. In this work, we present a novel approach to the minimization of the t-SNE objective function that heavily relies on graphics hardware and has linear computational complexity. Our technique decreases the computational cost of running t-SNE on datasets by orders of magnitude and retains or improves on the accuracy of past approximated techniques. We propose to approximate the repulsive forces between data points by splatting kernel textures for each data point. This approximation allows us to reformulate the t-SNE minimization problem as a series of tensor operations that can be efficiently executed on the graphics card. An efficient implementation of our technique is integrated and available for use in the widely used Google TensorFlow.js, and an open-source C++ library.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years the t-distributed Stochastic Neighbor Embedding (t-SNE) algorithm has become one of the most used and insightful techniques for exploratory data analysis of high-dimensional data. It reveals clusters of high-dimensional data points at different scales while only requiring minimal tuning of its parameters. However, the computational complexity of the algorithm limits its application to relatively small datasets. To address this problem, several evolutions of t-SNE have been developed in recent years, mainly focusing on the scalability of the similarity computations between data points. However, these contributions are insufficient to achieve interactive rates when visualizing the evolution of the t-SNE embedding for large datasets. In this work, we present a novel approach to the minimization of the t-SNE objective function that heavily relies on graphics hardware and has linear computational complexity. Our technique decreases the computational cost of running t-SNE on datasets by orders of magnitude and retains or improves on the accuracy of past approximated techniques. We propose to approximate the repulsive forces between data points by splatting kernel textures for each data point. This approximation allows us to reformulate the t-SNE minimization problem as a series of tensor operations that can be efficiently executed on the graphics card. An efficient implementation of our technique is integrated and available for use in the widely used Google TensorFlow.js, and an open-source C++ library.",
"title": "GPGPU Linear Complexity t-SNE Optimization",
"normalizedTitle": "GPGPU Linear Complexity t-SNE Optimization",
"fno": "08811606",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Approximation Theory",
"Computational Complexity",
"Data Analysis",
"Data Visualisation",
"Graphics Processing Units",
"Mathematics Computing",
"Stochastic Processes",
"T SNE Minimization Problem",
"Approximated Techniques",
"Computational Cost",
"Linear Computational Complexity",
"T SNE Objective Function",
"Data Point",
"Similarity Computations",
"Minimal Tuning",
"High Dimensional Data Points",
"Exploratory Data Analysis",
"Insightful Techniques",
"Used Techniques",
"Stochastic Neighbor Embedding Algorithm",
"GPGPU Linear Complexity T SNE",
"Minimization",
"Linear Programming",
"Computational Modeling",
"Approximation Algorithms",
"Complexity Theory",
"Optimization",
"Data Visualization",
"High Dimensional Data",
"Dimensionality Reduction",
"Progressive Visual Analytics",
"Approximate Computation",
"GPGPU"
],
"authors": [
{
"givenName": "Nicola",
"surname": "Pezzotti",
"fullName": "Nicola Pezzotti",
"affiliation": "Google AI, Zürich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julian",
"surname": "Thijssen",
"fullName": "Julian Thijssen",
"affiliation": "Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Mordvintsev",
"fullName": "Alexander Mordvintsev",
"affiliation": "Google AI, Zürich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Höllt",
"fullName": "Thomas Höllt",
"affiliation": "Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Baldur",
"surname": "Van Lew",
"fullName": "Baldur Van Lew",
"affiliation": "Leiden University Medical Center, Leiden, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Boudewijn P.F.",
"surname": "Lelieveldt",
"fullName": "Boudewijn P.F. Lelieveldt",
"affiliation": "Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elmar",
"surname": "Eisemann",
"fullName": "Elmar Eisemann",
"affiliation": "Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anna",
"surname": "Vilanova",
"fullName": "Anna Vilanova",
"affiliation": "Delft University of Technology, Delft, The Netherlands",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "1172-1181",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2017/3586/1/3586a487",
"title": "Nonlinear Manifold Embedding on Keyword Spotting Using t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586a487/12OmNzlUKGo",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682333",
"title": "Facial Expression Recognition Based on t-SNE and AdaboostM2",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682333/12OmNzmclTi",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbac-pad/2018/7769/0/08645912",
"title": "T-SNE-CUDA: GPU-Accelerated T-SNE and its Applications to Modern Data",
"doi": null,
"abstractUrl": "/proceedings-article/sbac-pad/2018/08645912/17QjJeZi1UO",
"parentPublication": {
"id": "proceedings/sbac-pad/2018/7769/0",
"title": "2018 30th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600a011",
"title": "CO-SNE: Dimensionality Reduction and Visualization for Hyperbolic Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600a011/1H0NVwymKFG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020217",
"title": "Informative Initialization and Kernel Selection Improves t-SNE for Biological Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020217/1KfT8iuxRS0",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/08/09064929",
"title": "t-viSNE: Interactive Assessment and Interpretation of t-SNE Projections",
"doi": null,
"abstractUrl": "/journal/tg/2020/08/09064929/1iZGzFjpwPu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400b691",
"title": "Fast Similarity Computation for t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400b691/1uGXpqMZXag",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600c274",
"title": "Wind Turbine Rolling Bearing Fault Diagnosis Using t-SNE and GWO-SVM",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600c274/1x3kI5MLOCs",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552856",
"title": "An Efficient Dual-Hierarchy t-SNE Minimization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552856/1xic3JUxdG8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552433",
"title": "Joint <italic>t</italic>-SNE for Comparable Projections of Multiple High-Dimensional Datasets",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552433/1xic8QCTZRe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08807303",
"articleId": "1cG6axGog24",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08807243",
"articleId": "1cG6lJTyreM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBqMDkL",
"title": "Aug.",
"year": "2020",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1iZGzFjpwPu",
"doi": "10.1109/TVCG.2020.2986996",
"abstract": "t-Distributed Stochastic Neighbor Embedding (t-SNE) for the visualization of multidimensional data has proven to be a popular approach, with successful applications in a wide range of domains. Despite their usefulness, t-SNE projections can be hard to interpret or even misleading, which hurts the trustworthiness of the results. Understanding the details of t-SNE itself and the reasons behind specific patterns in its output may be a daunting task, especially for non-experts in dimensionality reduction. In this article, we present t-viSNE, an interactive tool for the visual exploration of t-SNE projections that enables analysts to inspect different aspects of their accuracy and meaning, such as the effects of hyper-parameters, distance and neighborhood preservation, densities and costs of specific neighborhoods, and the correlations between dimensions and visual patterns. We propose a coherent, accessible, and well-integrated collection of different views for the visualization of t-SNE projections. The applicability and usability of t-viSNE are demonstrated through hypothetical usage scenarios with real data sets. Finally, we present the results of a user study where the tool's effectiveness was evaluated. By bringing to light information that would normally be lost after running t-SNE, we hope to support analysts in using t-SNE and making its results better understandable.",
"abstracts": [
{
"abstractType": "Regular",
"content": "t-Distributed Stochastic Neighbor Embedding (t-SNE) for the visualization of multidimensional data has proven to be a popular approach, with successful applications in a wide range of domains. Despite their usefulness, t-SNE projections can be hard to interpret or even misleading, which hurts the trustworthiness of the results. Understanding the details of t-SNE itself and the reasons behind specific patterns in its output may be a daunting task, especially for non-experts in dimensionality reduction. In this article, we present t-viSNE, an interactive tool for the visual exploration of t-SNE projections that enables analysts to inspect different aspects of their accuracy and meaning, such as the effects of hyper-parameters, distance and neighborhood preservation, densities and costs of specific neighborhoods, and the correlations between dimensions and visual patterns. We propose a coherent, accessible, and well-integrated collection of different views for the visualization of t-SNE projections. The applicability and usability of t-viSNE are demonstrated through hypothetical usage scenarios with real data sets. Finally, we present the results of a user study where the tool's effectiveness was evaluated. By bringing to light information that would normally be lost after running t-SNE, we hope to support analysts in using t-SNE and making its results better understandable.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "t-Distributed Stochastic Neighbor Embedding (t-SNE) for the visualization of multidimensional data has proven to be a popular approach, with successful applications in a wide range of domains. Despite their usefulness, t-SNE projections can be hard to interpret or even misleading, which hurts the trustworthiness of the results. Understanding the details of t-SNE itself and the reasons behind specific patterns in its output may be a daunting task, especially for non-experts in dimensionality reduction. In this article, we present t-viSNE, an interactive tool for the visual exploration of t-SNE projections that enables analysts to inspect different aspects of their accuracy and meaning, such as the effects of hyper-parameters, distance and neighborhood preservation, densities and costs of specific neighborhoods, and the correlations between dimensions and visual patterns. We propose a coherent, accessible, and well-integrated collection of different views for the visualization of t-SNE projections. The applicability and usability of t-viSNE are demonstrated through hypothetical usage scenarios with real data sets. Finally, we present the results of a user study where the tool's effectiveness was evaluated. By bringing to light information that would normally be lost after running t-SNE, we hope to support analysts in using t-SNE and making its results better understandable.",
"title": "t-viSNE: Interactive Assessment and Interpretation of t-SNE Projections",
"normalizedTitle": "t-viSNE: Interactive Assessment and Interpretation of t-SNE Projections",
"fno": "09064929",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Graphical User Interfaces",
"Interactive Systems",
"Stochastic Processes",
"T Vi SNE",
"Visual Patterns",
"Stochastic Neighbor Embedding",
"Interactive Assessment",
"Multidimensional Data Visualization",
"Dimensionality Reduction",
"Interactive Tool",
"Visual Exploration",
"Hyper Parameter Effects",
"Neighborhood Preservation",
"T SNE Projection Visualization",
"Tools",
"Visualization",
"Data Visualization",
"Task Analysis",
"Correlation",
"Principal Component Analysis",
"Dimensionality Reduction",
"Interpretable T SNE",
"Dimensionality Reduction",
"High Dimensional Data",
"Explainable Machine Learning",
"Visualization"
],
"authors": [
{
"givenName": "Angelos",
"surname": "Chatzimparmpas",
"fullName": "Angelos Chatzimparmpas",
"affiliation": "Department of Computer Science and Media Technology, Linnaeus University, Växjö, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rafael M.",
"surname": "Martins",
"fullName": "Rafael M. Martins",
"affiliation": "Department of Computer Science and Media Technology, Linnaeus University, Växjö, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Kerren",
"fullName": "Andreas Kerren",
"affiliation": "Department of Computer Science and Media Technology, Linnaeus University, Växjö, Sweden",
"__typename": "ArticleAuthorType"
}
],
"replicability": {
"isEnabled": true,
"codeDownloadUrl": "https://github.com/angeloschatzimparmpas/t-viSNE.git",
"codeRepositoryUrl": "https://github.com/angeloschatzimparmpas/t-viSNE",
"__typename": "ArticleReplicabilityType"
},
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2020-08-01 00:00:00",
"pubType": "trans",
"pages": "2696-2714",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682333",
"title": "Facial Expression Recognition Based on t-SNE and AdaboostM2",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682333/12OmNzmclTi",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440820",
"title": "DimReader: Axis lines that explain non-linear projections",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440820/17D45Xtvp8C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbac-pad/2018/7769/0/08645912",
"title": "T-SNE-CUDA: GPU-Accelerated T-SNE and its Applications to Modern Data",
"doi": null,
"abstractUrl": "/proceedings-article/sbac-pad/2018/08645912/17QjJeZi1UO",
"parentPublication": {
"id": "proceedings/sbac-pad/2018/7769/0",
"title": "2018 30th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600a011",
"title": "CO-SNE: Dimensionality Reduction and Visualization for Hyperbolic Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600a011/1H0NVwymKFG",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08811606",
"title": "GPGPU Linear Complexity t-SNE Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08811606/1cJj4SRFHeE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2020/5745/0/09094599",
"title": "Visualization and Detection of Changes in Brain States Using t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2020/09094599/1jVQEe9O2vS",
"parentPublication": {
"id": "proceedings/ssiai/2020/5745/0",
"title": "2020 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412900",
"title": "q-SNE: Visualizing Data using q-Gaussian Distributed Stochastic Neighbor Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412900/1tmhROYroSA",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400b691",
"title": "Fast Similarity Computation for t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400b691/1uGXpqMZXag",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552856",
"title": "An Efficient Dual-Hierarchy t-SNE Minimization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552856/1xic3JUxdG8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552433",
"title": "Joint <italic>t</italic>-SNE for Comparable Projections of Multiple High-Dimensional Datasets",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552433/1xic8QCTZRe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09078845",
"articleId": "1joA7lrylZC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08625536",
"articleId": "17D45Wt3Exx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1l5i6UjMsdW",
"name": "ttg202008-09064929s1-tvcg-2986996-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202008-09064929s1-tvcg-2986996-mm.zip",
"extension": "zip",
"size": "163 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xic8QCTZRe",
"doi": "10.1109/TVCG.2021.3114765",
"abstract": "We present Joint t-Stochastic Neighbor Embedding (Joint t-SNE), a technique to generate comparable projections of multiple high-dimensional datasets. Although t-SNE has been widely employed to visualize high-dimensional datasets from various domains, it is limited to projecting a single dataset. When a series of high-dimensional datasets, such as datasets changing over time, is projected independently using t-SNE, misaligned layouts are obtained. Even items with identical features across datasets are projected to different locations, making the technique unsuitable for comparison tasks. To tackle this problem, we introduce edge similarity, which captures the similarities between two adjacent time frames based on the Graphlet Frequency Distribution (GFD). We then integrate a novel loss term into the t-SNE loss function, which we call vector constraints, to preserve the vectors between projected points across the projections, allowing these points to serve as visual landmarks for direct comparisons between projections. Using synthetic datasets whose ground-truth structures are known, we show that Joint t-SNE outperforms existing techniques, including Dynamic t-SNE, in terms of local coherence error, Kullback-Leibler divergence, and neighborhood preservation. We also showcase a real-world use case to visualize and compare the activation of different layers of a neural network.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Joint t-Stochastic Neighbor Embedding (Joint t-SNE), a technique to generate comparable projections of multiple high-dimensional datasets. Although t-SNE has been widely employed to visualize high-dimensional datasets from various domains, it is limited to projecting a single dataset. When a series of high-dimensional datasets, such as datasets changing over time, is projected independently using t-SNE, misaligned layouts are obtained. Even items with identical features across datasets are projected to different locations, making the technique unsuitable for comparison tasks. To tackle this problem, we introduce edge similarity, which captures the similarities between two adjacent time frames based on the Graphlet Frequency Distribution (GFD). We then integrate a novel loss term into the t-SNE loss function, which we call vector constraints, to preserve the vectors between projected points across the projections, allowing these points to serve as visual landmarks for direct comparisons between projections. Using synthetic datasets whose ground-truth structures are known, we show that Joint t-SNE outperforms existing techniques, including Dynamic t-SNE, in terms of local coherence error, Kullback-Leibler divergence, and neighborhood preservation. We also showcase a real-world use case to visualize and compare the activation of different layers of a neural network.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Joint t-Stochastic Neighbor Embedding (Joint t-SNE), a technique to generate comparable projections of multiple high-dimensional datasets. Although t-SNE has been widely employed to visualize high-dimensional datasets from various domains, it is limited to projecting a single dataset. When a series of high-dimensional datasets, such as datasets changing over time, is projected independently using t-SNE, misaligned layouts are obtained. Even items with identical features across datasets are projected to different locations, making the technique unsuitable for comparison tasks. To tackle this problem, we introduce edge similarity, which captures the similarities between two adjacent time frames based on the Graphlet Frequency Distribution (GFD). We then integrate a novel loss term into the t-SNE loss function, which we call vector constraints, to preserve the vectors between projected points across the projections, allowing these points to serve as visual landmarks for direct comparisons between projections. Using synthetic datasets whose ground-truth structures are known, we show that Joint t-SNE outperforms existing techniques, including Dynamic t-SNE, in terms of local coherence error, Kullback-Leibler divergence, and neighborhood preservation. We also showcase a real-world use case to visualize and compare the activation of different layers of a neural network.",
"title": "Joint <italic>t</italic>-SNE for Comparable Projections of Multiple High-Dimensional Datasets",
"normalizedTitle": "Joint t-SNE for Comparable Projections of Multiple High-Dimensional Datasets",
"fno": "09552433",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Task Analysis",
"Optimization",
"Time Measurement",
"Position Measurement",
"Gain Measurement",
"Distortion",
"High Dimensional Data",
"Projection",
"Embedding",
"T Stochastic Neighbor Embedding"
],
"authors": [
{
"givenName": "Yinqiao",
"surname": "Wang",
"fullName": "Yinqiao Wang",
"affiliation": "Shandong University, CN, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lu",
"surname": "Chen",
"fullName": "Lu Chen",
"affiliation": "Shandong University, CN, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jaemin",
"surname": "Jo",
"fullName": "Jaemin Jo",
"affiliation": "Sungkyunkwan University, KR, South Korea",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yunhai",
"surname": "Wang",
"fullName": "Yunhai Wang",
"affiliation": "Shandong University, CN, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "623-632",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2017/3586/1/3586a487",
"title": "Nonlinear Manifold Embedding on Keyword Spotting Using t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586a487/12OmNzlUKGo",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682333",
"title": "Facial Expression Recognition Based on t-SNE and AdaboostM2",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682333/12OmNzmclTi",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbac-pad/2018/7769/0/08645912",
"title": "T-SNE-CUDA: GPU-Accelerated T-SNE and its Applications to Modern Data",
"doi": null,
"abstractUrl": "/proceedings-article/sbac-pad/2018/08645912/17QjJeZi1UO",
"parentPublication": {
"id": "proceedings/sbac-pad/2018/7769/0",
"title": "2018 30th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020217",
"title": "Informative Initialization and Kernel Selection Improves t-SNE for Biological Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020217/1KfT8iuxRS0",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08811606",
"title": "GPGPU Linear Complexity t-SNE Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08811606/1cJj4SRFHeE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/08/09064929",
"title": "t-viSNE: Interactive Assessment and Interpretation of t-SNE Projections",
"doi": null,
"abstractUrl": "/journal/tg/2020/08/09064929/1iZGzFjpwPu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiai/2020/5745/0/09094599",
"title": "Visualization and Detection of Changes in Brain States Using t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/ssiai/2020/09094599/1jVQEe9O2vS",
"parentPublication": {
"id": "proceedings/ssiai/2020/5745/0",
"title": "2020 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412900",
"title": "q-SNE: Visualizing Data using q-Gaussian Distributed Stochastic Neighbor Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412900/1tmhROYroSA",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600c274",
"title": "Wind Turbine Rolling Bearing Fault Diagnosis Using t-SNE and GWO-SVM",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600c274/1x3kI5MLOCs",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552856",
"title": "An Efficient Dual-Hierarchy t-SNE Minimization",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552856/1xic3JUxdG8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552856",
"articleId": "1xic3JUxdG8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552233",
"articleId": "1xic56YNRyU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBb23Bkg1O",
"name": "ttg202201-09552433s1-supp1-3114765.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552433s1-supp1-3114765.pdf",
"extension": "pdf",
"size": "4.97 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xic3JUxdG8",
"doi": "10.1109/TVCG.2021.3114817",
"abstract": "t-distributed Stochastic Neighbour Embedding (t-SNE) has become a standard for exploratory data analysis, as it is capable of revealing clusters even in complex data while requiring minimal user input. While its run-time complexity limited it to small datasets in the past, recent efforts improved upon the expensive similarity computations and the previously quadratic minimization. Nevertheless, t-SNE still has high runtime and memory costs when operating on millions of points. We present a novel method for executing the t-SNE minimization. While our method overall retains a linear runtime complexity, we obtain a significant performance increase in the most expensive part of the minimization. We achieve a significant improvement without a noticeable decrease in accuracy even when targeting a 3D embedding. Our method constructs a pair of spatial hierarchies over the embedding, which are simultaneously traversed to approximate many N-body interactions at once. We demonstrate an efficient GPGPU implementation and evaluate its performance against state-of-the-art methods on a variety of datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "t-distributed Stochastic Neighbour Embedding (t-SNE) has become a standard for exploratory data analysis, as it is capable of revealing clusters even in complex data while requiring minimal user input. While its run-time complexity limited it to small datasets in the past, recent efforts improved upon the expensive similarity computations and the previously quadratic minimization. Nevertheless, t-SNE still has high runtime and memory costs when operating on millions of points. We present a novel method for executing the t-SNE minimization. While our method overall retains a linear runtime complexity, we obtain a significant performance increase in the most expensive part of the minimization. We achieve a significant improvement without a noticeable decrease in accuracy even when targeting a 3D embedding. Our method constructs a pair of spatial hierarchies over the embedding, which are simultaneously traversed to approximate many N-body interactions at once. We demonstrate an efficient GPGPU implementation and evaluate its performance against state-of-the-art methods on a variety of datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "t-distributed Stochastic Neighbour Embedding (t-SNE) has become a standard for exploratory data analysis, as it is capable of revealing clusters even in complex data while requiring minimal user input. While its run-time complexity limited it to small datasets in the past, recent efforts improved upon the expensive similarity computations and the previously quadratic minimization. Nevertheless, t-SNE still has high runtime and memory costs when operating on millions of points. We present a novel method for executing the t-SNE minimization. While our method overall retains a linear runtime complexity, we obtain a significant performance increase in the most expensive part of the minimization. We achieve a significant improvement without a noticeable decrease in accuracy even when targeting a 3D embedding. Our method constructs a pair of spatial hierarchies over the embedding, which are simultaneously traversed to approximate many N-body interactions at once. We demonstrate an efficient GPGPU implementation and evaluate its performance against state-of-the-art methods on a variety of datasets.",
"title": "An Efficient Dual-Hierarchy t-SNE Minimization",
"normalizedTitle": "An Efficient Dual-Hierarchy t-SNE Minimization",
"fno": "09552856",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Complexity",
"Data Analysis",
"Sampling Methods",
"Stochastic Processes",
"Efficient Dual Hierarchy T SNE Minimization",
"Stochastic Neighbour Embedding",
"Exploratory Data Analysis",
"Complex Data",
"Minimal User Input",
"Run Time Complexity",
"Expensive Similarity Computations",
"Quadratic Minimization",
"High Runtime",
"Memory Costs",
"Linear Runtime Complexity",
"Significant Performance Increase",
"Expensive Part",
"Method Constructs",
"Spatial Hierarchies",
"Efficient GPGPU Implementation",
"Minimization",
"Runtime",
"Complexity Theory",
"Kernel",
"Three Dimensional Displays",
"Graphics Processing Units",
"Force",
"High Dimensional Data",
"Dimensionality Reduction",
"Parallel Data Structures",
"Dual Hierarchy",
"GPGPU"
],
"authors": [
{
"givenName": "Mark",
"surname": "van de Ruit",
"fullName": "Mark van de Ruit",
"affiliation": "Delft University of Technology, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus",
"surname": "Billeter",
"fullName": "Markus Billeter",
"affiliation": "University of Leeds, England",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elmar",
"surname": "Eisemann",
"fullName": "Elmar Eisemann",
"affiliation": "Delft University of Technology, Netherlands",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "614-622",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2017/3586/1/3586a487",
"title": "Nonlinear Manifold Embedding on Keyword Spotting Using t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586a487/12OmNzlUKGo",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbac-pad/2018/7769/0/08645912",
"title": "T-SNE-CUDA: GPU-Accelerated T-SNE and its Applications to Modern Data",
"doi": null,
"abstractUrl": "/proceedings-article/sbac-pad/2018/08645912/17QjJeZi1UO",
"parentPublication": {
"id": "proceedings/sbac-pad/2018/7769/0",
"title": "2018 30th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2018/7447/0/744701a390",
"title": "A New Way of Visualizing Curricula Using Competencies: Cosine Similarity and t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2018/744701a390/19m3BVKUOLm",
"parentPublication": {
"id": "proceedings/iiai-aai/2018/7447/0",
"title": "2018 7th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020217",
"title": "Informative Initialization and Kernel Selection Improves t-SNE for Biological Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020217/1KfT8iuxRS0",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08811606",
"title": "GPGPU Linear Complexity t-SNE Optimization",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08811606/1cJj4SRFHeE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/08/09064929",
"title": "t-viSNE: Interactive Assessment and Interpretation of t-SNE Projections",
"doi": null,
"abstractUrl": "/journal/tg/2020/08/09064929/1iZGzFjpwPu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412900",
"title": "q-SNE: Visualizing Data using q-Gaussian Distributed Stochastic Neighbor Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412900/1tmhROYroSA",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2021/9184/0/918400b691",
"title": "Fast Similarity Computation for t-SNE",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2021/918400b691/1uGXpqMZXag",
"parentPublication": {
"id": "proceedings/icde/2021/9184/0",
"title": "2021 IEEE 37th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600c274",
"title": "Wind Turbine Rolling Bearing Fault Diagnosis Using t-SNE and GWO-SVM",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600c274/1x3kI5MLOCs",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552433",
"title": "Joint <italic>t</italic>-SNE for Comparable Projections of Multiple High-Dimensional Datasets",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552433/1xic8QCTZRe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552249",
"articleId": "1xicaqbwmA0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552433",
"articleId": "1xic8QCTZRe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBaM4x9cTS",
"name": "ttg202201-09552856s1-supp2-3114817.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552856s1-supp2-3114817.pdf",
"extension": "pdf",
"size": "44.1 MB",
"__typename": "WebExtraType"
},
{
"id": "1zBaMrymkUg",
"name": "ttg202201-09552856s1-supp1-3114817.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09552856s1-supp1-3114817.mp4",
"extension": "mp4",
"size": "97.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvGPE8n",
"title": "Jan.",
"year": "2016",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy0qnLI",
"doi": "10.1109/TVCG.2015.2467451",
"abstract": "Over the last 50 years a wide variety of automatic network layout algorithms have been developed. Some are fast heuristic techniques suitable for networks with hundreds of thousands of nodes while others are multi-stage frameworks for higher-quality layout of smaller networks. However, despite decades of research currently no algorithm produces layout of comparable quality to that of a human. We give a new “human-centred” methodology for automatic network layout algorithm design that is intended to overcome this deficiency. User studies are first used to identify the aesthetic criteria algorithms should encode, then an algorithm is developed that is informed by these criteria and finally, a follow-up study evaluates the algorithm output. We have used this new methodology to develop an automatic orthogonal network layout method, HOLA, that achieves measurably better (by user study) layout than the best available orthogonal layout algorithm and which produces layouts of comparable quality to those produced by hand.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Over the last 50 years a wide variety of automatic network layout algorithms have been developed. Some are fast heuristic techniques suitable for networks with hundreds of thousands of nodes while others are multi-stage frameworks for higher-quality layout of smaller networks. However, despite decades of research currently no algorithm produces layout of comparable quality to that of a human. We give a new “human-centred” methodology for automatic network layout algorithm design that is intended to overcome this deficiency. User studies are first used to identify the aesthetic criteria algorithms should encode, then an algorithm is developed that is informed by these criteria and finally, a follow-up study evaluates the algorithm output. We have used this new methodology to develop an automatic orthogonal network layout method, HOLA, that achieves measurably better (by user study) layout than the best available orthogonal layout algorithm and which produces layouts of comparable quality to those produced by hand.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Over the last 50 years a wide variety of automatic network layout algorithms have been developed. Some are fast heuristic techniques suitable for networks with hundreds of thousands of nodes while others are multi-stage frameworks for higher-quality layout of smaller networks. However, despite decades of research currently no algorithm produces layout of comparable quality to that of a human. We give a new “human-centred” methodology for automatic network layout algorithm design that is intended to overcome this deficiency. User studies are first used to identify the aesthetic criteria algorithms should encode, then an algorithm is developed that is informed by these criteria and finally, a follow-up study evaluates the algorithm output. We have used this new methodology to develop an automatic orthogonal network layout method, HOLA, that achieves measurably better (by user study) layout than the best available orthogonal layout algorithm and which produces layouts of comparable quality to those produced by hand.",
"title": "HOLA: Human-like Orthogonal Network Layout",
"normalizedTitle": "HOLA: Human-like Orthogonal Network Layout",
"fno": "07192690",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Algorithm Design And Analysis",
"Manuals",
"Standards",
"Software Algorithms",
"Visualization",
"Software",
"Graph Drawing Aesthetics",
"Graph Layout",
"Orthogonal Layout",
"Automatic Layout Algorithms",
"User Generated Layout",
"Graph Drawing Aesthetics",
"Graph Layout",
"Orthogonal Layout",
"Automatic Layout Algorithms",
"User Generated Layout"
],
"authors": [
{
"givenName": "Steve",
"surname": "Kieffer",
"fullName": "Steve Kieffer",
"affiliation": ", Monash University and NICTA Victoria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Dwyer",
"fullName": "Tim Dwyer",
"affiliation": ", Monash University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kim",
"surname": "Marriott",
"fullName": "Kim Marriott",
"affiliation": ", Monash University and NICTA Victoria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Wybrow",
"fullName": "Michael Wybrow",
"affiliation": ", Monash University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2016-01-01 00:00:00",
"pubType": "trans",
"pages": "349-358",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2010/4279/1/4279a380",
"title": "Research on Multi-row Layout Based on Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2010/4279a380/12OmNAle6Bb",
"parentPublication": {
"id": "proceedings/iciii/2010/4279/1",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2015/8020/0/07450427",
"title": "Facade Layout Symmetrization",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2015/07450427/12OmNviHKiH",
"parentPublication": {
"id": "proceedings/cad-graphics/2015/8020/0",
"title": "2015 14th International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsc/2017/1600/0/1600a339",
"title": "Interactive Network Clustering Layout Method Based on Implicit Connection",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a339/12OmNwp74tK",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1998/8712/0/87120056",
"title": "Competitive Learning of Network Diagram Layout",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1998/87120056/12OmNyRPgyj",
"parentPublication": {
"id": "proceedings/vl/1998/8712/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/seke/1992/2830/0/00227922",
"title": "Automatic layout of diagrams for software specification",
"doi": null,
"abstractUrl": "/proceedings-article/seke/1992/00227922/12OmNz2TCuu",
"parentPublication": {
"id": "proceedings/seke/1992/2830/0",
"title": "Proceedings Fourth International Conference on Software Engineering and Knowledge Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192733",
"title": "High-Quality Ultra-Compact Grid Layout of Grouped Networks",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192733/13rRUILc8fd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/1986/04/06312901",
"title": "A layout algorithm for data flow diagrams",
"doi": null,
"abstractUrl": "/journal/ts/1986/04/06312901/13rRUwhpBPd",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2022/09/09798856",
"title": "A General and Automatic Cell Layout Generation Framework With Implicit Learning on Design Rules",
"doi": null,
"abstractUrl": "/journal/si/2022/09/09798856/1Eho9m24qS4",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a109",
"title": "Automatic Furniture Layout Based on Functional Area Division",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a109/1fHklquet0s",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552233",
"title": "Automatic Polygon Layout for Primal-Dual Visualization of Hypergraphs",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552233/1xic56YNRyU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07192733",
"articleId": "13rRUILc8fd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07192724",
"articleId": "13rRUyuegpa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgNH",
"name": "ttg201601-07192690s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201601-07192690s1.zip",
"extension": "zip",
"size": "8.09 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xlw0LJ4OTm",
"doi": "10.1109/TVCG.2021.3114756",
"abstract": "Node-link visualizations are a familiar and powerful tool for displaying the relationships in a network. The readability of these visualizations highly depends on the spatial layout used for the nodes. In this paper, we focus on computing <italic>layered</italic> layouts, in which nodes are aligned on a set of parallel axes to better expose hierarchical or sequential relationships. Heuristic-based layouts are widely used as they scale well to larger networks and usually create readable, albeit sub-optimal, visualizations. We instead use a <italic>layout optimization model</italic> that prioritizes <italic>optimality</italic> - as compared to <italic>scalability</italic> - because an optimal solution not only represents the best attainable result, but can also serve as a baseline to evaluate the effectiveness of layout heuristics. We take an important step towards powerful and flexible network visualization by proposing S<sc>tratisfimal</sc> L<sc>ayout</sc>, a <italic>modular integer-linear-programming formulation</italic> that can consider several important readability criteria <italic>simultaneously</italic> — crossing reduction, edge bendiness, and nested and multi-layer groups. The layout can be adapted to diverse use cases through its modularity. Individual features can be enabled and customized depending on the application. We provide open-source and documented implementations of the layout, both for web-based and desktop visualizations. As a proof-of-concept, we apply it to the problem of visualizing complicated SQL queries, which have features that we believe cannot be addressed by existing layout optimization models. We also include a benchmark network generator and the results of an empirical evaluation to assess the performance trade-offs of our design choices. A full version of this paper with all appendices, data, and source code is available at osf.io/qdyt9 with live examples at <uri>https://visdunneright.github.io/stratisfimal/</uri>.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Node-link visualizations are a familiar and powerful tool for displaying the relationships in a network. The readability of these visualizations highly depends on the spatial layout used for the nodes. In this paper, we focus on computing <italic>layered</italic> layouts, in which nodes are aligned on a set of parallel axes to better expose hierarchical or sequential relationships. Heuristic-based layouts are widely used as they scale well to larger networks and usually create readable, albeit sub-optimal, visualizations. We instead use a <italic>layout optimization model</italic> that prioritizes <italic>optimality</italic> - as compared to <italic>scalability</italic> - because an optimal solution not only represents the best attainable result, but can also serve as a baseline to evaluate the effectiveness of layout heuristics. We take an important step towards powerful and flexible network visualization by proposing S<sc>tratisfimal</sc> L<sc>ayout</sc>, a <italic>modular integer-linear-programming formulation</italic> that can consider several important readability criteria <italic>simultaneously</italic> — crossing reduction, edge bendiness, and nested and multi-layer groups. The layout can be adapted to diverse use cases through its modularity. Individual features can be enabled and customized depending on the application. We provide open-source and documented implementations of the layout, both for web-based and desktop visualizations. As a proof-of-concept, we apply it to the problem of visualizing complicated SQL queries, which have features that we believe cannot be addressed by existing layout optimization models. We also include a benchmark network generator and the results of an empirical evaluation to assess the performance trade-offs of our design choices. A full version of this paper with all appendices, data, and source code is available at osf.io/qdyt9 with live examples at <uri>https://visdunneright.github.io/stratisfimal/</uri>.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Node-link visualizations are a familiar and powerful tool for displaying the relationships in a network. The readability of these visualizations highly depends on the spatial layout used for the nodes. In this paper, we focus on computing layered layouts, in which nodes are aligned on a set of parallel axes to better expose hierarchical or sequential relationships. Heuristic-based layouts are widely used as they scale well to larger networks and usually create readable, albeit sub-optimal, visualizations. We instead use a layout optimization model that prioritizes optimality - as compared to scalability - because an optimal solution not only represents the best attainable result, but can also serve as a baseline to evaluate the effectiveness of layout heuristics. We take an important step towards powerful and flexible network visualization by proposing Stratisfimal Layout, a modular integer-linear-programming formulation that can consider several important readability criteria simultaneously — crossing reduction, edge bendiness, and nested and multi-layer groups. The layout can be adapted to diverse use cases through its modularity. Individual features can be enabled and customized depending on the application. We provide open-source and documented implementations of the layout, both for web-based and desktop visualizations. As a proof-of-concept, we apply it to the problem of visualizing complicated SQL queries, which have features that we believe cannot be addressed by existing layout optimization models. We also include a benchmark network generator and the results of an empirical evaluation to assess the performance trade-offs of our design choices. A full version of this paper with all appendices, data, and source code is available at osf.io/qdyt9 with live examples at https://visdunneright.github.io/stratisfimal/.",
"title": "STRATISFIMAL LAYOUT: A modular optimization model for laying out layered node-link network visualizations",
"normalizedTitle": "STRATISFIMAL LAYOUT: A modular optimization model for laying out layered node-link network visualizations",
"fno": "09556579",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Structured Query Language",
"Visualization",
"Optimization",
"Scalability",
"Integer Linear Programming",
"Computational Modeling",
"Layered Node Link Visualization",
"Integer Linear Programming",
"Crossing Reduction",
"Bendiness Reduction",
"Nested Groups"
],
"authors": [
{
"givenName": "Sara",
"surname": "di Bartolomeo",
"fullName": "Sara di Bartolomeo",
"affiliation": "Northeastern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mirek",
"surname": "Riedewald",
"fullName": "Mirek Riedewald",
"affiliation": "Northeastern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Gatterbauer",
"fullName": "Wolfgang Gatterbauer",
"affiliation": "Northeastern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cody",
"surname": "Dunne",
"fullName": "Cody Dunne",
"affiliation": "Northeastern University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "324-334",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2011/0868/0/06004064",
"title": "Listening to Managers: A Study about Visualizations in Corporate Presentations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004064/12OmNqBbHF8",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596147",
"title": "On the faithfulness of graph visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596147/12OmNwCJON7",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/1997/8189/0/81890002",
"title": "H3: laying out large directed graphs in 3D hyperbolic space",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/1997/81890002/12OmNwEJ10t",
"parentPublication": {
"id": "proceedings/ieee-infovis/1997/8189/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a288",
"title": "Sketch-Based Interactions for Untangling of Force-Directed Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a288/12OmNyO8tVY",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08233127",
"title": "Atom: A Grammar for Unit Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904490",
"title": "A Scanner Deeply: Predicting Gaze Heatmaps on Visualizations Using Crowdsourced Eye Movement Data",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904490/1H1gj9xTTG0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09916137",
"title": "Revisiting the Design Patterns of Composite Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09917516",
"title": "Geo-Storylines: Integrating Maps into Storyline Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09917516/1HrexIf2zZe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09984953",
"title": "VISAtlas: An Image-based Exploration and Query System for Large Visualization Collections via Neural Image Embedding",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09984953/1J6d2SwfUT6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09165928",
"title": "Hybrid Graph Visualizations With ChordLink: Algorithms, Experiments, and Applications",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09165928/1mevWoz3hM4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552919",
"articleId": "1xibXgJW32U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552902",
"articleId": "1xic4qsF8zK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBaFXEllsI",
"name": "ttg202201-09556579s1-tvcg-3114756-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09556579s1-tvcg-3114756-mm.zip",
"extension": "zip",
"size": "55.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cG6uY7sFEs",
"doi": "10.1109/TVCG.2019.2934797",
"abstract": "Think-aloud protocols are widely used by user experience (UX) practitioners in usability testing to uncover issues in user interface design. It is often arduous to analyze large amounts of recorded think-aloud sessions and few UX practitioners have an opportunity to get a second perspective during their analysis due to time and resource constraints. Inspired by the recent research that shows subtle verbalization and speech patterns tend to occur when users encounter usability problems, we take the first step to design and evaluate an intelligent visual analytics tool that leverages such patterns to identify usability problem encounters and present them to UX practitioners to assist their analysis. We first conducted and recorded think-aloud sessions, and then extracted textual and acoustic features from the recordings and trained machine learning (ML) models to detect problem encounters. Next, we iteratively designed and developed a visual analytics tool, VisTA, which enables dynamic investigation of think-aloud sessions with a timeline visualization of ML predictions and input features. We conducted a between-subjects laboratory study to compare three conditions, i.e., VisTA, VisTASimple (no visualization of the ML's input features), and Baseline (no ML information at all), with 30 UX professionals. The findings show that UX professionals identified more problem encounters when using VisTA than Baseline by leveraging the problem visualization as an overview, anticipations, and anchors as well as the feature visualization as a means to understand what ML considers and omits. Our findings also provide insights into how they treated ML, dealt with (dis)agreement with ML, and reviewed the videos (i.e., play, pause, and rewind).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Think-aloud protocols are widely used by user experience (UX) practitioners in usability testing to uncover issues in user interface design. It is often arduous to analyze large amounts of recorded think-aloud sessions and few UX practitioners have an opportunity to get a second perspective during their analysis due to time and resource constraints. Inspired by the recent research that shows subtle verbalization and speech patterns tend to occur when users encounter usability problems, we take the first step to design and evaluate an intelligent visual analytics tool that leverages such patterns to identify usability problem encounters and present them to UX practitioners to assist their analysis. We first conducted and recorded think-aloud sessions, and then extracted textual and acoustic features from the recordings and trained machine learning (ML) models to detect problem encounters. Next, we iteratively designed and developed a visual analytics tool, VisTA, which enables dynamic investigation of think-aloud sessions with a timeline visualization of ML predictions and input features. We conducted a between-subjects laboratory study to compare three conditions, i.e., VisTA, VisTASimple (no visualization of the ML's input features), and Baseline (no ML information at all), with 30 UX professionals. The findings show that UX professionals identified more problem encounters when using VisTA than Baseline by leveraging the problem visualization as an overview, anticipations, and anchors as well as the feature visualization as a means to understand what ML considers and omits. Our findings also provide insights into how they treated ML, dealt with (dis)agreement with ML, and reviewed the videos (i.e., play, pause, and rewind).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Think-aloud protocols are widely used by user experience (UX) practitioners in usability testing to uncover issues in user interface design. It is often arduous to analyze large amounts of recorded think-aloud sessions and few UX practitioners have an opportunity to get a second perspective during their analysis due to time and resource constraints. Inspired by the recent research that shows subtle verbalization and speech patterns tend to occur when users encounter usability problems, we take the first step to design and evaluate an intelligent visual analytics tool that leverages such patterns to identify usability problem encounters and present them to UX practitioners to assist their analysis. We first conducted and recorded think-aloud sessions, and then extracted textual and acoustic features from the recordings and trained machine learning (ML) models to detect problem encounters. Next, we iteratively designed and developed a visual analytics tool, VisTA, which enables dynamic investigation of think-aloud sessions with a timeline visualization of ML predictions and input features. We conducted a between-subjects laboratory study to compare three conditions, i.e., VisTA, VisTASimple (no visualization of the ML's input features), and Baseline (no ML information at all), with 30 UX professionals. The findings show that UX professionals identified more problem encounters when using VisTA than Baseline by leveraging the problem visualization as an overview, anticipations, and anchors as well as the feature visualization as a means to understand what ML considers and omits. Our findings also provide insights into how they treated ML, dealt with (dis)agreement with ML, and reviewed the videos (i.e., play, pause, and rewind).",
"title": "<bold>VisTA</bold>: Integrating Machine Intelligence with <bold>Vis</bold>ualization to Support the Investigation of <bold>T</bold>hink-<bold>A</bold>loud Sessions",
"normalizedTitle": "VisTA: Integrating Machine Intelligence with Visualization to Support the Investigation of Think-Aloud Sessions",
"fno": "08807301",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Ergonomics",
"Human Computer Interaction",
"Learning Artificial Intelligence",
"Protocols",
"Task Analysis",
"User Interfaces",
"Machine Intelligence",
"Think Aloud Protocols",
"User Experience Practitioners",
"Usability Testing",
"User Interface Design",
"Think Aloud Sessions",
"UX Practitioners",
"Speech Patterns",
"Usability Problem",
"Intelligent Visual Analytics Tool",
"Vis TA",
"Timeline Visualization",
"Feature Visualization",
"Verbalization Patterns",
"Usability",
"Visual Analytics",
"Tools",
"Machine Intelligence",
"Feature Extraction",
"Machine Learning",
"Think Aloud",
"Visual Analytics",
"Machine Intelligence",
"User Study",
"Usability Problems",
"Session Review Behavior",
"UX Practices"
],
"authors": [
{
"givenName": "Mingming",
"surname": "Fan",
"fullName": "Mingming Fan",
"affiliation": "University of Toronto",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ke",
"surname": "Wu",
"fullName": "Ke Wu",
"affiliation": "University of Toronto",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jian",
"surname": "Zhao",
"fullName": "Jian Zhao",
"affiliation": "University of Waterloo",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yue",
"surname": "Li",
"fullName": "Yue Li",
"affiliation": "University of Toronto",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Winter",
"surname": "Wei",
"fullName": "Winter Wei",
"affiliation": "University of Toronto",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Khai N.",
"surname": "Truong",
"fullName": "Khai N. Truong",
"affiliation": "University of Toronto",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "343-352",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2016/01/07194834",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876022",
"title": "The Effects of Interactive Latency on Exploratory Visual Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876022/13rRUxYINfd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/01/09693359",
"title": "Visual Analytics for Human-Centered Machine Learning",
"doi": null,
"abstractUrl": "/magazine/cg/2022/01/09693359/1As7zEHCGn6",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a045",
"title": "Visual Auditor: Interactive Visualization for Detection and Summarization of Model Biases",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a045/1J6h6YYG9sA",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a060",
"title": "TimberTrek: Exploring and Curating Sparse Decision Trees with Interactive Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a060/1J6halInwS4",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020585",
"title": "Visual Analytics System of Comprehensive Data Quality Improvement for Machine Learning using Data- and Process-driven Strategies",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020585/1KfTcrlfNdu",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10034833",
"title": "uxSense: Supporting User Experience Analysis with Visualization and Computer Vision",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10034833/1KpxdALb4By",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222086",
"title": "<italic>PipelineProfiler:</italic> A Visual Analytics Tool for the Exploration of AutoML Pipelines",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222086/1nTrpup4LZe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispass/2021/8643/0/864300a073",
"title": "ViStA: Video Streaming and Analytics Benchmark",
"doi": null,
"abstractUrl": "/proceedings-article/ispass/2021/864300a073/1taFjsp7HKo",
"parentPublication": {
"id": "proceedings/ispass/2021/8643/0",
"title": "2021 IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552211",
"title": "CoUX: Collaborative Visual Analysis of Think-Aloud Usability Test Videos for Digital Interfaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552211/1xic1bREyqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08805430",
"articleId": "1cG4Ahb0KnC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08805439",
"articleId": "1cG4DVd6FcQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nTrQ1hHyyA",
"doi": "10.1109/TVCG.2020.3030427",
"abstract": "Abstract data has no natural scale and so interactive data visualizations must provide techniques to allow the user to choose their viewpoint and scale. Such techniques are well established in desktop visualization tools. The two most common techniques are zoom+pan and overview+detail. However, how best to enable the analyst to navigate and view abstract data at different levels of scale in immersive environments has not previously been studied. We report the findings of the first systematic study of immersive navigation techniques for 3D scatterplots. We tested four conditions that represent our best attempt to adapt standard 2D navigation techniques to data visualization in an immersive environment while still providing standard immersive navigation techniques through physical movement and teleportation. We compared room-sized visualization versus a zooming interface, each with and without an overview. We find significant differences in participants' response times and accuracy for a number of standard visual analysis tasks. Both zoom and overview provide benefits over standard locomotion support alone (i.e., physical movement and pointer teleportation). However, which variation is superior, depends on the task. We obtain a more nuanced understanding of the results by analyzing them in terms of a time-cost model for the different components of navigation: way-finding, travel, number of travel steps, and context switching.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract data has no natural scale and so interactive data visualizations must provide techniques to allow the user to choose their viewpoint and scale. Such techniques are well established in desktop visualization tools. The two most common techniques are zoom+pan and overview+detail. However, how best to enable the analyst to navigate and view abstract data at different levels of scale in immersive environments has not previously been studied. We report the findings of the first systematic study of immersive navigation techniques for 3D scatterplots. We tested four conditions that represent our best attempt to adapt standard 2D navigation techniques to data visualization in an immersive environment while still providing standard immersive navigation techniques through physical movement and teleportation. We compared room-sized visualization versus a zooming interface, each with and without an overview. We find significant differences in participants' response times and accuracy for a number of standard visual analysis tasks. Both zoom and overview provide benefits over standard locomotion support alone (i.e., physical movement and pointer teleportation). However, which variation is superior, depends on the task. We obtain a more nuanced understanding of the results by analyzing them in terms of a time-cost model for the different components of navigation: way-finding, travel, number of travel steps, and context switching.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract data has no natural scale and so interactive data visualizations must provide techniques to allow the user to choose their viewpoint and scale. Such techniques are well established in desktop visualization tools. The two most common techniques are zoom+pan and overview+detail. However, how best to enable the analyst to navigate and view abstract data at different levels of scale in immersive environments has not previously been studied. We report the findings of the first systematic study of immersive navigation techniques for 3D scatterplots. We tested four conditions that represent our best attempt to adapt standard 2D navigation techniques to data visualization in an immersive environment while still providing standard immersive navigation techniques through physical movement and teleportation. We compared room-sized visualization versus a zooming interface, each with and without an overview. We find significant differences in participants' response times and accuracy for a number of standard visual analysis tasks. Both zoom and overview provide benefits over standard locomotion support alone (i.e., physical movement and pointer teleportation). However, which variation is superior, depends on the task. We obtain a more nuanced understanding of the results by analyzing them in terms of a time-cost model for the different components of navigation: way-finding, travel, number of travel steps, and context switching.",
"title": "Embodied Navigation in Immersive Abstract Data Visualization: Is Overview+Detail or Zooming Better for 3D Scatterplots?",
"normalizedTitle": "Embodied Navigation in Immersive Abstract Data Visualization: Is Overview+Detail or Zooming Better for 3D Scatterplots?",
"fno": "09222098",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Interactive Systems",
"Navigation",
"User Interfaces",
"Overview Detail Technique",
"Zoom Pan Technique",
"Standard Immersive Navigation Techniques",
"Standard 2 D Navigation Techniques",
"Immersive Environment",
"Desktop Visualization Tools",
"Interactive Data Visualizations",
"Natural Scale",
"Immersive Abstract Data Visualization",
"Embodied Navigation",
"Pointer Teleportation",
"Physical Movement",
"Standard Locomotion Support",
"Standard Visual Analysis Tasks",
"Zooming Interface",
"Room Sized Visualization",
"Navigation",
"Data Visualization",
"Three Dimensional Displays",
"Legged Locomotion",
"Task Analysis",
"Standards",
"Two Dimensional Displays",
"Immersive Analytics",
"Information Visualization",
"Virtual Reality",
"Navigation",
"Overview Detail",
"Zooming",
"Scatterplot"
],
"authors": [
{
"givenName": "Yalong",
"surname": "Yang",
"fullName": "Yalong Yang",
"affiliation": "Harvard University, School of Engineering and Applied Sciences, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maxime",
"surname": "Cordeil",
"fullName": "Maxime Cordeil",
"affiliation": "Department of Human-Centred Computing, Faculty of Information Technology, Monash University, Melbourne, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Johanna",
"surname": "Beyer",
"fullName": "Johanna Beyer",
"affiliation": "Harvard University, School of Engineering and Applied Sciences, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Dwyer",
"fullName": "Tim Dwyer",
"affiliation": "Department of Human-Centred Computing, Faculty of Information Technology, Monash University, Melbourne, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kim",
"surname": "Marriott",
"fullName": "Kim Marriott",
"affiliation": "Department of Human-Centred Computing, Faculty of Information Technology, Monash University, Melbourne, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanspeter",
"surname": "Pfister",
"fullName": "Hanspeter Pfister",
"affiliation": "Harvard University, School of Engineering and Applied Sciences, Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1214-1224",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549349",
"title": "Visual exploration of the infinite canvas",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549349/12OmNzn38XE",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446130",
"title": "Rapid, Continuous Movement Between Nodes as an Accessible Virtual Reality Locomotion Technique",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446130/13bd1f3HvEx",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446144",
"title": "Redirected Scene Rotation for Immersive Movie Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446144/13bd1fHrlRD",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08447558",
"title": "Immersive Visualization of Abstract Information: An Evaluation on Dimensionally-Reduced Data Scatterplots",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08447558/13bd1tMztYK",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08533895",
"title": "Evaluating Navigation Techniques for 3D Graph Visualizations in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08533895/17D45VN31ge",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10113740",
"title": "SceneFusion: Room-Scale Environmental Fusion for Efficient Traveling Between Separate Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10113740/1MNbKAm1pUQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797777",
"title": "Exploration of Large Omnidirectional Images in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797777/1cJ0JISlXDG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09359481",
"title": "The Effect of Exploration Mode and Frame of Reference in Immersive Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09359481/1rlAR6130Dm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382870",
"title": "Group Navigation for Guided Tours in Distributed Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382870/1saZCxsOG9q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a480",
"title": "Analysis of Positional Tracking Space Usage when using Teleportation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09225008",
"articleId": "1nWJFACcD4I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09222263",
"articleId": "1nTqyTSNcCA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLhLQUYsG4",
"name": "ttg202102-09222098s1-tvcg-3030427-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09222098s1-tvcg-3030427-mm.zip",
"extension": "zip",
"size": "80.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nV6cy8Xk5i",
"doi": "10.1109/TVCG.2020.3030460",
"abstract": "In this work we propose the combination of large interactive displays with personal head-mounted Augmented Reality (AR) for information visualization to facilitate data exploration and analysis. Even though large displays provide more display space, they are challenging with regard to perception, effective multi-user support, and managing data density and complexity. To address these issues and illustrate our proposed setup, we contribute an extensive design space comprising first, the spatial alignment of display, visualizations, and objects in AR space. Next, we discuss which parts of a visualization can be augmented. Finally, we analyze how AR can be used to display personal views in order to show additional information and to minimize the mutual disturbance of data analysts. Based on this conceptual foundation, we present a number of exemplary techniques for extending visualizations with AR and discuss their relation to our design space. We further describe how these techniques address typical visualization problems that we have identified during our literature research. To examine our concepts, we introduce a generic AR visualization framework as well as a prototype implementing several example techniques. In order to demonstrate their potential, we further present a use case walkthrough in which we analyze a movie data set. From these experiences, we conclude that the contributed techniques can be useful in exploring and understanding multivariate data. We are convinced that the extension of large displays with AR for information visualization has a great potential for data analysis and sense-making.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work we propose the combination of large interactive displays with personal head-mounted Augmented Reality (AR) for information visualization to facilitate data exploration and analysis. Even though large displays provide more display space, they are challenging with regard to perception, effective multi-user support, and managing data density and complexity. To address these issues and illustrate our proposed setup, we contribute an extensive design space comprising first, the spatial alignment of display, visualizations, and objects in AR space. Next, we discuss which parts of a visualization can be augmented. Finally, we analyze how AR can be used to display personal views in order to show additional information and to minimize the mutual disturbance of data analysts. Based on this conceptual foundation, we present a number of exemplary techniques for extending visualizations with AR and discuss their relation to our design space. We further describe how these techniques address typical visualization problems that we have identified during our literature research. To examine our concepts, we introduce a generic AR visualization framework as well as a prototype implementing several example techniques. In order to demonstrate their potential, we further present a use case walkthrough in which we analyze a movie data set. From these experiences, we conclude that the contributed techniques can be useful in exploring and understanding multivariate data. We are convinced that the extension of large displays with AR for information visualization has a great potential for data analysis and sense-making.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work we propose the combination of large interactive displays with personal head-mounted Augmented Reality (AR) for information visualization to facilitate data exploration and analysis. Even though large displays provide more display space, they are challenging with regard to perception, effective multi-user support, and managing data density and complexity. To address these issues and illustrate our proposed setup, we contribute an extensive design space comprising first, the spatial alignment of display, visualizations, and objects in AR space. Next, we discuss which parts of a visualization can be augmented. Finally, we analyze how AR can be used to display personal views in order to show additional information and to minimize the mutual disturbance of data analysts. Based on this conceptual foundation, we present a number of exemplary techniques for extending visualizations with AR and discuss their relation to our design space. We further describe how these techniques address typical visualization problems that we have identified during our literature research. To examine our concepts, we introduce a generic AR visualization framework as well as a prototype implementing several example techniques. In order to demonstrate their potential, we further present a use case walkthrough in which we analyze a movie data set. From these experiences, we conclude that the contributed techniques can be useful in exploring and understanding multivariate data. We are convinced that the extension of large displays with AR for information visualization has a great potential for data analysis and sense-making.",
"title": "Personal Augmented Reality for Information Visualization on Large Interactive Displays",
"normalizedTitle": "Personal Augmented Reality for Information Visualization on Large Interactive Displays",
"fno": "09223669",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Computer Displays",
"Data Analysis",
"Data Visualisation",
"Information Visualization",
"Interactive Displays",
"Personal Head Mounted Augmented Reality",
"Data Exploration",
"Display Space",
"Effective Multiuser Support",
"Data Density",
"Extensive Design Space",
"AR Space",
"Personal Views",
"Data Analysts",
"Movie Data Set",
"Contributed Techniques",
"Multivariate Data",
"Data Analysis",
"Exemplary Techniques",
"Generic AR Visualization Framework",
"Data Visualization",
"Data Analysis",
"Augmented Reality",
"Three Dimensional Displays",
"Navigation",
"Visualization",
"Augmented Reality",
"Information Visualization",
"Info Vis",
"Large Displays",
"Immersive Analytics",
"Physical Navigation",
"Multiple Coordinated Views"
],
"authors": [
{
"givenName": "Patrick",
"surname": "Reipschlager",
"fullName": "Patrick Reipschlager",
"affiliation": "Interactive Media Lab, Technische Universitat Dresden, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tamara",
"surname": "Flemisch",
"fullName": "Tamara Flemisch",
"affiliation": "Interactive Media Lab, Technische Universitat Dresden, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Raimund",
"surname": "Dachselt",
"fullName": "Raimund Dachselt",
"affiliation": "Interactive Media Lab, Technische Universitat Dresden, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1182-1192",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a049",
"title": "The Ventriloquist Effect in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a049/12OmNvAiSE1",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948443",
"title": "[Poster] Towards user perspective augmented reality for public displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948443/12OmNviHK8p",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549390",
"title": "Multi-view augmented reality for underground exploration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549390/12OmNx8wThm",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a703",
"title": "Exploring Pseudo-Weight in Augmented Reality Extended Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a703/1CJbGtoliuY",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a686",
"title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090631",
"title": "Framing the Scene: An Examination of Augmented Reality Head Worn Displays in Construction Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090631/1jIxyGx0KXK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400b269",
"title": "Augmented Reality for Big Data Visualization: A Review",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400b269/1jdDXnKC1Vu",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09211732",
"title": "Comparison of Augmented Reality Display Techniques to Support Medical Needle Insertion",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09211732/1nB9X7YX7eU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a080",
"title": "Can Retinal Projection Displays Improve Spatial Perception in Augmented Reality?",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a080/1pysvYTZF6w",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a367",
"title": "A Classification of Augmented Reality Approaches for Spatial Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a367/1yeQHlFhK0w",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09222346",
"articleId": "1nTqW9mGTrG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09229116",
"articleId": "1o3nxDL2TGU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLdVrcjxwQ",
"name": "ttg202102-09223669s1-supp1-3030460.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09223669s1-supp1-3030460.mp4",
"extension": "mp4",
"size": "280 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xic0yNxnws",
"doi": "10.1109/TVCG.2021.3114844",
"abstract": "We explore how the lens of fictional superpowers can help characterize how visualizations empower people and provide inspiration for new visualization systems. Researchers and practitioners often tout visualizations' ability to “make the invisible visible” and to “enhance cognitive abilities.” Meanwhile superhero comics and other modern fiction often depict characters with similarly fantastic abilities that allow them to see and interpret the world in ways that transcend traditional human perception. We investigate the intersection of these domains, and show how the language of superpowers can be used to characterize existing visualization systems and suggest opportunities for new and empowering ones. We introduce two frameworks: The first characterizes seven underlying mechanisms that form the basis for a variety of visual superpowers portrayed in fiction. The second identifies seven ways in which visualization tools and interfaces can instill a sense of empowerment in the people who use them. Building on these observations, we illustrate a diverse set of “visualization superpowers” and highlight opportunities for the visualization community to create new systems and interactions that empower new experiences with data Material and illustrations are available under CC-BY 4.0 at osf.io/8yhfz.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We explore how the lens of fictional superpowers can help characterize how visualizations empower people and provide inspiration for new visualization systems. Researchers and practitioners often tout visualizations' ability to “make the invisible visible” and to “enhance cognitive abilities.” Meanwhile superhero comics and other modern fiction often depict characters with similarly fantastic abilities that allow them to see and interpret the world in ways that transcend traditional human perception. We investigate the intersection of these domains, and show how the language of superpowers can be used to characterize existing visualization systems and suggest opportunities for new and empowering ones. We introduce two frameworks: The first characterizes seven underlying mechanisms that form the basis for a variety of visual superpowers portrayed in fiction. The second identifies seven ways in which visualization tools and interfaces can instill a sense of empowerment in the people who use them. Building on these observations, we illustrate a diverse set of “visualization superpowers” and highlight opportunities for the visualization community to create new systems and interactions that empower new experiences with data Material and illustrations are available under CC-BY 4.0 at osf.io/8yhfz.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We explore how the lens of fictional superpowers can help characterize how visualizations empower people and provide inspiration for new visualization systems. Researchers and practitioners often tout visualizations' ability to “make the invisible visible” and to “enhance cognitive abilities.” Meanwhile superhero comics and other modern fiction often depict characters with similarly fantastic abilities that allow them to see and interpret the world in ways that transcend traditional human perception. We investigate the intersection of these domains, and show how the language of superpowers can be used to characterize existing visualization systems and suggest opportunities for new and empowering ones. We introduce two frameworks: The first characterizes seven underlying mechanisms that form the basis for a variety of visual superpowers portrayed in fiction. The second identifies seven ways in which visualization tools and interfaces can instill a sense of empowerment in the people who use them. Building on these observations, we illustrate a diverse set of “visualization superpowers” and highlight opportunities for the visualization community to create new systems and interactions that empower new experiences with data Material and illustrations are available under CC-BY 4.0 at osf.io/8yhfz.",
"title": "Perception! Immersion! Empowerment! Superpowers as Inspiration for Visualization",
"normalizedTitle": "Perception! Immersion! Empowerment! Superpowers as Inspiration for Visualization",
"fno": "09552195",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Cognition",
"Interactive Systems",
"Tools",
"Pragmatics",
"Pattern Recognition",
"Visualization",
"Superpowers",
"Empowerment",
"Vision",
"Perception",
"Cognition",
"Fiction",
"Situated Visualization"
],
"authors": [
{
"givenName": "Wesley",
"surname": "Willett",
"fullName": "Wesley Willett",
"affiliation": "University of Calgary, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bon Adriel",
"surname": "Aseniero",
"fullName": "Bon Adriel Aseniero",
"affiliation": "Autodesk, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sheelagh",
"surname": "Carpendale",
"fullName": "Sheelagh Carpendale",
"affiliation": "Simon Fraser Univ., Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pierre",
"surname": "Dragicevic",
"fullName": "Pierre Dragicevic",
"affiliation": "Universite Paris-Saclay, CNRS. Inria. LISN, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yvonne",
"surname": "Jansen",
"fullName": "Yvonne Jansen",
"affiliation": "Sorbonne Universite, CNRS, ISIR, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lora",
"surname": "Oehlberg",
"fullName": "Lora Oehlberg",
"affiliation": "University of Calgary, United States",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Petra",
"surname": "Isenberg",
"fullName": "Petra Isenberg",
"affiliation": "Universite Paris-Saclay, CNRS. Inria. LISN, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "22-32",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icds/2010/3953/0/3953a285",
"title": "Visualizing Plot in 3D",
"doi": null,
"abstractUrl": "/proceedings-article/icds/2010/3953a285/12OmNqG0SL8",
"parentPublication": {
"id": "proceedings/icds/2010/3953/0",
"title": "International Conference on the Digital Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2009/3733/0/3733a003",
"title": "Visual Perception of Parallel Coordinate Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a003/12OmNwGZNMM",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lab-rs/2008/3272/0/3272a077",
"title": "Using Cognitive Semantics to Integrate Perception and Motion in a Behavior-Based Robot",
"doi": null,
"abstractUrl": "/proceedings-article/lab-rs/2008/3272a077/12OmNx3q6WW",
"parentPublication": {
"id": "proceedings/lab-rs/2008/3272/0",
"title": "Learning and Adaptive Behaviors for Robotic Systems, ECSIS Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2004/2177/0/21770519",
"title": "Learning from Architects: The Difference between Knowledge Visualization and Information Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770519/12OmNx9nGLN",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ozchi/1998/9206/0/92060200",
"title": "Integrating Metric Visualization into a Commercial User-Interface Builder",
"doi": null,
"abstractUrl": "/proceedings-article/ozchi/1998/92060200/12OmNyuPLbI",
"parentPublication": {
"id": "proceedings/ozchi/1998/9206/0",
"title": "Computer-Human Interaction, Australasian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904456",
"title": "Measuring Effects of Spatial Visualization and Domain on Visualization Task Performance: A Comparative Study",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904456/1H1gmktPnLa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08836087",
"title": "The Impact of Immersion on Cluster Identification Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08836087/1dia1nodZeM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089446",
"title": "Graphical Perception for Immersive Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089446/1jIxfA3tlUk",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09492011",
"title": "A Survey of Perception-Based Visualization Studies by Task",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09492011/1volPuHGMdW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09572234",
"title": "Professional Differences: A Comparative Study of Visualization Task Performance and Spatial Ability Across Disciplines",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09572234/1xH5FXdMnoA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552903",
"articleId": "1xibWJujrkk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552927",
"articleId": "1xic6oeRxnO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNrJRP28",
"title": "Nov.-Dec.",
"year": "2012",
"issueNum": "06",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "32",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvya3t",
"doi": "10.1109/MCG.2012.120",
"abstract": "Visualization is often seen as a tool to support complex thinking. Although different people can have very different ways of approaching the kind of complex task that visualizations support, as researchers and designers we still rarely consider individual differences in creating and evaluating visualizations. This article reviews recent research on individual differences in visualization and human-computer interaction, showing that both cognitive abilities and personality profiles might significantly affect performance with these tools. The study of individual differences has led to the conclusion that advances in this important area in visualization will require more focused research. Specifically, we must isolate the cognitive factors that are relevant to visualization and the design factors that make one visualization more suited to a user than another. In doing so, we could increase our understanding of the visualization user and reshape how we approach design and evaluation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualization is often seen as a tool to support complex thinking. Although different people can have very different ways of approaching the kind of complex task that visualizations support, as researchers and designers we still rarely consider individual differences in creating and evaluating visualizations. This article reviews recent research on individual differences in visualization and human-computer interaction, showing that both cognitive abilities and personality profiles might significantly affect performance with these tools. The study of individual differences has led to the conclusion that advances in this important area in visualization will require more focused research. Specifically, we must isolate the cognitive factors that are relevant to visualization and the design factors that make one visualization more suited to a user than another. In doing so, we could increase our understanding of the visualization user and reshape how we approach design and evaluation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualization is often seen as a tool to support complex thinking. Although different people can have very different ways of approaching the kind of complex task that visualizations support, as researchers and designers we still rarely consider individual differences in creating and evaluating visualizations. This article reviews recent research on individual differences in visualization and human-computer interaction, showing that both cognitive abilities and personality profiles might significantly affect performance with these tools. The study of individual differences has led to the conclusion that advances in this important area in visualization will require more focused research. Specifically, we must isolate the cognitive factors that are relevant to visualization and the design factors that make one visualization more suited to a user than another. In doing so, we could increase our understanding of the visualization user and reshape how we approach design and evaluation.",
"title": "Understanding Visualization by Understanding Individual Users",
"normalizedTitle": "Understanding Visualization by Understanding Individual Users",
"fno": "mcg2012060088",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Cognition",
"Data Visualisation",
"Human Computer Interaction",
"Human Factors",
"Individual User Understanding",
"Visualisation Evaluation",
"Visualisation Creation",
"Individual Differences",
"Human Computer Interaction",
"Cognitive Abilities",
"Personality Profiles",
"Cognitive Factors",
"Design Factors",
"Visualization User",
"Visualization",
"Human Computer Interaction",
"Task Analysis",
"Complexity Theory",
"Information Visualization",
"Visual Analytics",
"Individual Differences",
"Personality",
"Cognitive Science",
"Computer Graphics"
],
"authors": [
{
"givenName": "Caroline",
"surname": "Ziemkiewicz",
"fullName": "Caroline Ziemkiewicz",
"affiliation": "Brown University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alvitta",
"surname": "Ottley",
"fullName": "Alvitta Ottley",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R. Jordan",
"surname": "Crouser",
"fullName": "R. Jordan Crouser",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Krysta",
"surname": "Chauncey",
"fullName": "Krysta Chauncey",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sara L.",
"surname": "Su",
"fullName": "Sara L. Su",
"affiliation": "Google",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Remco",
"surname": "Chang",
"fullName": "Remco Chang",
"affiliation": "Tufts University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2012-11-01 00:00:00",
"pubType": "mags",
"pages": "88-94",
"year": "2012",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2015/7568/0/7568a140",
"title": "Towards the Understanding of Interaction in Information Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a140/12OmNBQ2VVu",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2002/1781/0/17810215",
"title": "Augmented-Reality Visualizations Guided by Cognition:Perceptual Heuristics for Combining Visible and Obscured Information",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2002/17810215/12OmNrMZpxI",
"parentPublication": {
"id": "proceedings/ismar/2002/1781/0",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660065",
"title": "Understanding Visualization through Spatial Ability Differences",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660065/12OmNxwWoKu",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532836",
"title": "Understanding visualization through spatial ability differences",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532836/12OmNzl3X0g",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2018/6884/0/08634150",
"title": "Requirements for Reproducibility of Research in Situational and Spatio-Temporal Visualization : Position Paper",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2018/08634150/17D45W9KVHt",
"parentPublication": {
"id": "proceedings/beliv/2018/6884/0",
"title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2018/6884/0/08634261",
"title": "Towards Designing Unbiased Replication Studies in Information Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2018/08634261/17D45WWzW4f",
"parentPublication": {
"id": "proceedings/beliv/2018/6884/0",
"title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2018/6884/0/08634267",
"title": "A Case for Cognitive Models in Visualization Research : Position paper",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2018/08634267/17D45XDIXRH",
"parentPublication": {
"id": "proceedings/beliv/2018/6884/0",
"title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904456",
"title": "Measuring Effects of Spatial Visualization and Domain on Visualization Task Performance: A Comparative Study",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904456/1H1gmktPnLa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09572234",
"title": "Professional Differences: A Comparative Study of Visualization Task Performance and Spatial Ability Across Disciplines",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09572234/1xH5FXdMnoA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555646",
"title": "Understanding Data Visualization Design Practice",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555646/1xlw1u3Uiw8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2012060082",
"articleId": "13rRUy0ZzV0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2012060095",
"articleId": "13rRUxYrbOK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjGoG1",
"doi": "10.1109/TVCG.2012.263",
"abstract": "Sports analysts live in a world of dynamic games flattened into tables of numbers, divorced from the rinks, pitches, and courts where they were generated. Currently, these professional analysts use R, Stata, SAS, and other statistical software packages for uncovering insights from game data. Quantitative sports consultants seek a competitive advantage both for their clients and for themselves as analytics becomes increasingly valued by teams, clubs, and squads. In order for the information visualization community to support the members of this blossoming industry, it must recognize where and how visualization can enhance the existing analytical workflow. In this paper, we identify three primary stages of today’s sports analyst’s routine where visualization can be beneficially integrated: 1) exploring a dataspace; 2) sharing hypotheses with internal colleagues; and 3) communicating findings to stakeholders.Working closely with professional ice hockey analysts, we designed and built SnapShot, a system to integrate visualization into the hockey intelligence gathering process. SnapShot employs a variety of information visualization techniques to display shot data, yet given the importance of a specific hockey statistic, shot length, we introduce a technique, the radial heat map. Through a user study, we received encouraging feedback from several professional analysts, both independent consultants and professional team personnel.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sports analysts live in a world of dynamic games flattened into tables of numbers, divorced from the rinks, pitches, and courts where they were generated. Currently, these professional analysts use R, Stata, SAS, and other statistical software packages for uncovering insights from game data. Quantitative sports consultants seek a competitive advantage both for their clients and for themselves as analytics becomes increasingly valued by teams, clubs, and squads. In order for the information visualization community to support the members of this blossoming industry, it must recognize where and how visualization can enhance the existing analytical workflow. In this paper, we identify three primary stages of today’s sports analyst’s routine where visualization can be beneficially integrated: 1) exploring a dataspace; 2) sharing hypotheses with internal colleagues; and 3) communicating findings to stakeholders.Working closely with professional ice hockey analysts, we designed and built SnapShot, a system to integrate visualization into the hockey intelligence gathering process. SnapShot employs a variety of information visualization techniques to display shot data, yet given the importance of a specific hockey statistic, shot length, we introduce a technique, the radial heat map. Through a user study, we received encouraging feedback from several professional analysts, both independent consultants and professional team personnel.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sports analysts live in a world of dynamic games flattened into tables of numbers, divorced from the rinks, pitches, and courts where they were generated. Currently, these professional analysts use R, Stata, SAS, and other statistical software packages for uncovering insights from game data. Quantitative sports consultants seek a competitive advantage both for their clients and for themselves as analytics becomes increasingly valued by teams, clubs, and squads. In order for the information visualization community to support the members of this blossoming industry, it must recognize where and how visualization can enhance the existing analytical workflow. In this paper, we identify three primary stages of today’s sports analyst’s routine where visualization can be beneficially integrated: 1) exploring a dataspace; 2) sharing hypotheses with internal colleagues; and 3) communicating findings to stakeholders.Working closely with professional ice hockey analysts, we designed and built SnapShot, a system to integrate visualization into the hockey intelligence gathering process. SnapShot employs a variety of information visualization techniques to display shot data, yet given the importance of a specific hockey statistic, shot length, we introduce a technique, the radial heat map. Through a user study, we received encouraging feedback from several professional analysts, both independent consultants and professional team personnel.",
"title": "SnapShot: Visualization to Propel Ice Hockey Analytics",
"normalizedTitle": "SnapShot: Visualization to Propel Ice Hockey Analytics",
"fno": "ttg2012122819",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Games",
"Sports Equipment",
"Knowledge Discovery",
"Human Computer Interaction",
"Visual Knowledge Discovery",
"Visual Knowledge Representation",
"Hypothesis Testing",
"Visual Evidence"
],
"authors": [
{
"givenName": "Hannah",
"surname": "Pileggi",
"fullName": "Hannah Pileggi",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Charles D.",
"surname": "Stolper",
"fullName": "Charles D. Stolper",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. Michael",
"surname": "Boyle",
"fullName": "J. Michael Boyle",
"affiliation": "Sports Analytics Institute, Inc.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John T.",
"surname": "Stasko",
"fullName": "John T. Stasko",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2819-2828",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2017/0733/0/0733a147",
"title": "Classification of Puck Possession Events in Ice Hockey",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733a147/12OmNC943E6",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2006/0591/0/04035757",
"title": "Toward a Multi-Analyst, Collaborative Framework for Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2006/04035757/12OmNqAU6pq",
"parentPublication": {
"id": "proceedings/vast/2006/0591/0",
"title": "2006 IEEE Symposium On Visual Analytics Science And Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400507",
"title": "Agile visual analytics for banking cyber “big data”",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400507/12OmNyPQ4EL",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122908",
"title": "The User Puzzle—Explaining the Interaction with Visual Analytics Systems",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122908/13rRUIIVlcH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/07/mco2013070047",
"title": "Real-Time Visual Analytics for Text Streams",
"doi": null,
"abstractUrl": "/magazine/co/2013/07/mco2013070047/13rRUxAStVJ",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06908006",
"title": "Personal Visualization and Personal Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06908006/13rRUyYBlgA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b636",
"title": "Keep Your Eye on the Puck: Automatic Hockey Videography",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b636/18j8MxlOXTy",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900d450",
"title": "Ice hockey player identification via transformers and weakly supervised learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900d450/1G560PDMJMs",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09555646",
"title": "Understanding Data Visualization Design Practice",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09555646/1xlw1u3Uiw8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2021/3827/0/382700a054",
"title": "VisuaLeague: Visual Analytics of Multiple Games",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2021/382700a054/1y4oI1vKfmg",
"parentPublication": {
"id": "proceedings/iv/2021/3827/0",
"title": "2021 25th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122809",
"articleId": "13rRUwI5TXx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122829",
"articleId": "13rRUypp57E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAle6QI",
"title": "Nov.-Dec.",
"year": "2015",
"issueNum": "06",
"idPrefix": "ic",
"pubType": "magazine",
"volume": "19",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxCRFSG",
"doi": "10.1109/MIC.2015.129",
"abstract": "As new input devices and interaction techniques emerge daily, how might they improve interactive visualization?",
"abstracts": [
{
"abstractType": "Regular",
"content": "As new input devices and interaction techniques emerge daily, how might they improve interactive visualization?",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As new input devices and interaction techniques emerge daily, how might they improve interactive visualization?",
"title": "Natural Interaction with Visualization Systems",
"normalizedTitle": "Natural Interaction with Visualization Systems",
"fno": "mic2015060060",
"hasPdf": true,
"idPrefix": "ic",
"keywords": [
"Data Visualisation",
"Human Computer Interaction",
"Interactive Systems",
"Natural Interaction",
"Visualization Systems",
"Interactive Visualization",
"Data Visualization",
"Three Dimensional Displays",
"Visualization",
"Context Modeling",
"Human Computer Interaction",
"Internet Web Technologies",
"Natural Web Interfaces",
"Natural User Interface",
"NUI",
"Human Computer Interaction",
"HCI",
"Interactive Visualization"
],
"authors": [
{
"givenName": "Robert",
"surname": "St. Amant",
"fullName": "Robert St. Amant",
"affiliation": "North Carolina State University, Raleigh, NC, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2015-11-01 00:00:00",
"pubType": "mags",
"pages": "60-64",
"year": "2015",
"issn": "1089-7801",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/06/mcg2012060088",
"title": "Understanding Visualization by Understanding Individual Users",
"doi": null,
"abstractUrl": "/magazine/cg/2012/06/mcg2012060088/13rRUNvya3t",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2010/02/mcg2010020008",
"title": "Integrating Visualization and Interaction Research to Improve Scientific Workflows",
"doi": null,
"abstractUrl": "/magazine/cg/2010/02/mcg2010020008/13rRUxBrGjo",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876050",
"title": "MovExp: A Versatile Visualization Tool for Human-Computer Interaction Studies with 3D Performance and Biomechanical Data",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876050/13rRUxDqS8j",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122719",
"title": "Interaction Support for Visual Comparison Inspired by Natural Behavior",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122719/13rRUxZRbo0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/06/07018997",
"title": "Query2Question: Translating Visualization Interaction into Natural Language",
"doi": null,
"abstractUrl": "/journal/tg/2015/06/07018997/13rRUy0HYRr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/03/06908006",
"title": "Personal Visualization and Personal Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2015/03/06908006/13rRUyYBlgA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlui/2019/4064/0/10075560",
"title": "Machine Learning from User Interaction for Visualization and Analytics: A Workshop-Generated Research Agenda",
"doi": null,
"abstractUrl": "/proceedings-article/mlui/2019/10075560/1LIRveJELtK",
"parentPublication": {
"id": "proceedings/mlui/2019/4064/0",
"title": "2019 IEEE Workshop on Machine Learning from User Interaction for Visualization and Analytics (MLUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805424",
"title": "What is Interaction for Data Visualization?",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805424/1cG4MsovTO0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090501",
"title": "Exploring a Mixed Reality Framework for the Internet-of-Things: Toward Visualization and Interaction with Hybrid Objects and Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090501/1jIxiuC3FII",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mic2015060054",
"articleId": "13rRUx0xPB5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mic2015060066",
"articleId": "13rRUILLkI5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAPBbfM",
"title": "Oct.",
"year": "2019",
"issueNum": "10",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjoNx8",
"doi": "10.1109/TVCG.2018.2865940",
"abstract": "Visualizing network data is applicable in domains such as biology, engineering, and social sciences. We report the results of a study comparing the effectiveness of the two primary techniques for showing network data: node-link diagrams and adjacency matrices. Specifically, an evaluation with a large number of online participants revealed statistically significant differences between the two visualizations. Our work adds to existing research in several ways. First, we explore a broad spectrum of network tasks, many of which had not been previously evaluated. Second, our study uses two large datasets, typical of many real-life networks not explored by previous studies. Third, we leverage crowdsourcing to evaluate many tasks with many participants. This paper is an expanded journal version of a Graph Drawing (GD'17) conference paper. We evaluated a second dataset, added a qualitative feedback section, and expanded the procedure, results, discussion, and limitations sections.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualizing network data is applicable in domains such as biology, engineering, and social sciences. We report the results of a study comparing the effectiveness of the two primary techniques for showing network data: node-link diagrams and adjacency matrices. Specifically, an evaluation with a large number of online participants revealed statistically significant differences between the two visualizations. Our work adds to existing research in several ways. First, we explore a broad spectrum of network tasks, many of which had not been previously evaluated. Second, our study uses two large datasets, typical of many real-life networks not explored by previous studies. Third, we leverage crowdsourcing to evaluate many tasks with many participants. This paper is an expanded journal version of a Graph Drawing (GD'17) conference paper. We evaluated a second dataset, added a qualitative feedback section, and expanded the procedure, results, discussion, and limitations sections.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualizing network data is applicable in domains such as biology, engineering, and social sciences. We report the results of a study comparing the effectiveness of the two primary techniques for showing network data: node-link diagrams and adjacency matrices. Specifically, an evaluation with a large number of online participants revealed statistically significant differences between the two visualizations. Our work adds to existing research in several ways. First, we explore a broad spectrum of network tasks, many of which had not been previously evaluated. Second, our study uses two large datasets, typical of many real-life networks not explored by previous studies. Third, we leverage crowdsourcing to evaluate many tasks with many participants. This paper is an expanded journal version of a Graph Drawing (GD'17) conference paper. We evaluated a second dataset, added a qualitative feedback section, and expanded the procedure, results, discussion, and limitations sections.",
"title": "Node-Link or Adjacency Matrices: Old Question, New Insights",
"normalizedTitle": "Node-Link or Adjacency Matrices: Old Question, New Insights",
"fno": "08438968",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Graph Theory",
"Matrix Algebra",
"Network Theory Graphs",
"Statistical Analysis",
"Node Link Diagrams",
"Adjacency Matrices",
"Network Data Visualization",
"Crowdsourcing",
"Qualitative Feedback Section",
"Graph Drawing",
"Task Analysis",
"Data Visualization",
"Visualization",
"Taxonomy",
"Encoding",
"Layout",
"Airports",
"Evaluation",
"User Study",
"Graphs",
"Networks",
"Node Link",
"Adjacency Matrices"
],
"authors": [
{
"givenName": "Mershack",
"surname": "Okoe",
"fullName": "Mershack Okoe",
"affiliation": "Florida International University, Miami, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Radu",
"surname": "Jianu",
"fullName": "Radu Jianu",
"affiliation": "City, University of London, London, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stephen",
"surname": "Kobourov",
"fullName": "Stephen Kobourov",
"affiliation": "University of Arizona, Tucson, AZ, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "10",
"pubDate": "2019-10-01 00:00:00",
"pubType": "trans",
"pages": "2940-2952",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2016/8942/0/8942a159",
"title": "A Low Level Evaluation of Head-Tracker and Speech Commands Interactions in Information Visualization Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a159/12OmNscxj3B",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122457",
"title": "Compressed Adjacency Matrices: Untangling Gene Regulatory Networks",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122457/13rRUNvyakM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/04/mcg2015040028",
"title": "Characterizing Visualization Insights from Quantified Selfers' Personal Data Presentations",
"doi": null,
"abstractUrl": "/magazine/cg/2015/04/mcg2015040028/13rRUxCRFQl",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06812198",
"title": "Visual Adjacency Lists for Dynamic Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06812198/13rRUxcbnCs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122476",
"title": "Evaluation of Filesystem Provenance Visualization Tools",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122476/13rRUytWF9j",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09930144",
"title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a258",
"title": "Layouts of 3D Data Visualizations Small Multiples around Users in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a258/1J7WxzHZHry",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/06/09201309",
"title": "Data Badges: Making an Academic Profile Through a DIY Wearable Physicalization",
"doi": null,
"abstractUrl": "/magazine/cg/2020/06/09201309/1niUCwuef7y",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09465643",
"title": "Designing With Pictographs: Envision Topics Without Sacrificing Understanding",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09465643/1uIReQZxty8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552876",
"title": "Learning Objectives, Insights, and Assessments: How Specification Formats Impact Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552876/1xic1wsZtLi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08421591",
"articleId": "13rRUEgs2Mb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08437183",
"articleId": "13rRUIM2VBP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1d3mHLPJVio",
"name": "ttg201910-08438968s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201910-08438968s1.zip",
"extension": "zip",
"size": "458 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBp52xz",
"title": "Dec.",
"year": "2016",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5s2",
"doi": "10.1109/TVCG.2015.2511718",
"abstract": "Information visualization has become a popular tool to facilitate sense-making, discovery and communication in a large range of professional and casual contexts. However, evaluating visualizations is still a challenge. In particular, we lack techniques to help understand how visualizations are experienced by people. In this paper we discuss the potential of the Elicitation Interview technique to be applied in the context of visualization. The Elicitation Interview is a method for gathering detailed and precise accounts of human experience. We argue that it can be applied to help understand how people experience and interpret visualizations as part of exploration and data analysis processes. We describe the key characteristics of this interview technique and present a study we conducted to exemplify how it can be applied to evaluate data representations. Our study illustrates the types of insights this technique can bring to the fore, for example, evidence for deep interpretation of visual representations and the formation of interpretations and stories beyond the represented data. We discuss general visualization evaluation scenarios where the Elicitation Interview technique may be beneficial and specify what needs to be considered when applying this technique in a visualization context specifically.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Information visualization has become a popular tool to facilitate sense-making, discovery and communication in a large range of professional and casual contexts. However, evaluating visualizations is still a challenge. In particular, we lack techniques to help understand how visualizations are experienced by people. In this paper we discuss the potential of the Elicitation Interview technique to be applied in the context of visualization. The Elicitation Interview is a method for gathering detailed and precise accounts of human experience. We argue that it can be applied to help understand how people experience and interpret visualizations as part of exploration and data analysis processes. We describe the key characteristics of this interview technique and present a study we conducted to exemplify how it can be applied to evaluate data representations. Our study illustrates the types of insights this technique can bring to the fore, for example, evidence for deep interpretation of visual representations and the formation of interpretations and stories beyond the represented data. We discuss general visualization evaluation scenarios where the Elicitation Interview technique may be beneficial and specify what needs to be considered when applying this technique in a visualization context specifically.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Information visualization has become a popular tool to facilitate sense-making, discovery and communication in a large range of professional and casual contexts. However, evaluating visualizations is still a challenge. In particular, we lack techniques to help understand how visualizations are experienced by people. In this paper we discuss the potential of the Elicitation Interview technique to be applied in the context of visualization. The Elicitation Interview is a method for gathering detailed and precise accounts of human experience. We argue that it can be applied to help understand how people experience and interpret visualizations as part of exploration and data analysis processes. We describe the key characteristics of this interview technique and present a study we conducted to exemplify how it can be applied to evaluate data representations. Our study illustrates the types of insights this technique can bring to the fore, for example, evidence for deep interpretation of visual representations and the formation of interpretations and stories beyond the represented data. We discuss general visualization evaluation scenarios where the Elicitation Interview technique may be beneficial and specify what needs to be considered when applying this technique in a visualization context specifically.",
"title": "The Elicitation Interview Technique: Capturing People's Experiences of Data Representations",
"normalizedTitle": "The Elicitation Interview Technique: Capturing People's Experiences of Data Representations",
"fno": "07369991",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Interviews",
"Data Visualization",
"Context",
"Usability",
"Human Computer Interaction",
"Data Analysis",
"Visualization",
"Thematic Analysis",
"Qualitative Evaluation",
"Psychophenomenology",
"Phenomenology",
"Elicitation Interview Technique"
],
"authors": [
{
"givenName": "Trevor",
"surname": "Hogan",
"fullName": "Trevor Hogan",
"affiliation": "Crawford College of Art and Design, Cork Institute of Technology, Cork, Ireland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Uta",
"surname": "Hinrichs",
"fullName": "Uta Hinrichs",
"affiliation": "SACHI Group, School of Computer Science, University of St Andrews, Fife, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eva",
"surname": "Hornecker",
"fullName": "Eva Hornecker",
"affiliation": "Faculty of Media, Bauhaus-Universität Weimar, Weimar, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2016-12-01 00:00:00",
"pubType": "trans",
"pages": "2579-2593",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/re/2017/3191/0/3191a400",
"title": "Interview Review: Detecting Latent Ambiguities to Improve the Requirements Elicitation Process",
"doi": null,
"abstractUrl": "/proceedings-article/re/2017/3191a400/12OmNAsBFH0",
"parentPublication": {
"id": "proceedings/re/2017/3191/0",
"title": "2017 IEEE 25th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsea/2010/4144/0/4144a325",
"title": "Understanding the Use of Elicitation Approaches for Effective Requirements Gathering",
"doi": null,
"abstractUrl": "/proceedings-article/icsea/2010/4144a325/12OmNwGZNU5",
"parentPublication": {
"id": "proceedings/icsea/2010/4144/0",
"title": "Software Engineering Advances, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2011/0868/0/06004069",
"title": "Knowledge Visualization in Qualitative Methods -- Or How Can I See What I Say?",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2011/06004069/12OmNxA3YVI",
"parentPublication": {
"id": "proceedings/iv/2011/0868/0",
"title": "2011 15th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/apsec/2001/1408/0/14080141",
"title": "A Model for Navigating Interview Processes in Requirements Elicitation",
"doi": null,
"abstractUrl": "/proceedings-article/apsec/2001/14080141/12OmNxuo0jS",
"parentPublication": {
"id": "proceedings/apsec/2001/1408/0",
"title": "Proceedings Eighth Asia-Pacific Software Engineering Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2016/05/07307191",
"title": "Effect of Domain Knowledge on Elicitation Effectiveness: An Internally Replicated Controlled Experiment",
"doi": null,
"abstractUrl": "/journal/ts/2016/05/07307191/13rRUxNmPFK",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/re/2018/7418/0/741800a194",
"title": "Efficiency and Effectiveness of Requirements Elicitation Techniques for Children",
"doi": null,
"abstractUrl": "/proceedings-article/re/2018/741800a194/17D45WaTkd6",
"parentPublication": {
"id": "proceedings/re/2018/7418/0",
"title": "2018 IEEE 26th International Requirements Engineering Conference (RE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2020/8961/0/09274267",
"title": "Revealing teaching conceptions and methods through document elicitation of course syllabi and statements of teaching philosophy",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2020/09274267/1phRDwdkQ36",
"parentPublication": {
"id": "proceedings/fie/2020/8961/0",
"title": "2020 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2020/9642/0/964200a065",
"title": "Understanding User Experience of COVID-19 Maps through Remote Elicitation Interviews",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2020/964200a065/1q0FOJPYeFG",
"parentPublication": {
"id": "proceedings/beliv/2020/9642/0",
"title": "2020 IEEE Workshop on Evaluation and Beyond - Methodological Approaches to Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552876",
"title": "Learning Objectives, Insights, and Assessments: How Specification Formats Impact Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552876/1xic1wsZtLi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552177",
"title": "Towards Understanding Sensory Substitution for Accessible Visualization: An Interview Study",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552177/1xic9a4I0pi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07374748",
"articleId": "13rRUxly9dX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07369992",
"articleId": "13rRUygT7yg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H3ZV2tCxTa",
"doi": "10.1109/TVCG.2022.3209500",
"abstract": "When designing communicative visualizations, we often focus on goals that seek to convey patterns, relations, or comparisons (cognitive learning objectives). We pay less attention to affective intents–those that seek to influence or leverage the audience's opinions, attitudes, or values in some way. Affective objectives may range in outcomes from making the viewer care about the subject, strengthening a stance on an opinion, or leading them to take further action. Because such goals are often considered a violation of perceived ‘neutrality’ or are ‘political,’ designers may resist or be unable to describe these intents, let alone formalize them as learning objectives. While there are notable exceptions–such as advocacy visualizations or persuasive cartography–we find that visualization designers rarely acknowledge or formalize affective objectives. Through interviews with visualization designers, we expand on prior work on using learning objectives as a framework for describing and assessing communicative intent. Specifically, we extend and revise the framework to include a set of affective learning objectives. This structured taxonomy can help designers identify and declare their goals and compare and assess designs in a more principled way. Additionally, the taxonomy can enable external critique and analysis of visualizations. We illustrate the use of the taxonomy with a critical analysis of an affective visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When designing communicative visualizations, we often focus on goals that seek to convey patterns, relations, or comparisons (cognitive learning objectives). We pay less attention to affective intents–those that seek to influence or leverage the audience's opinions, attitudes, or values in some way. Affective objectives may range in outcomes from making the viewer care about the subject, strengthening a stance on an opinion, or leading them to take further action. Because such goals are often considered a violation of perceived ‘neutrality’ or are ‘political,’ designers may resist or be unable to describe these intents, let alone formalize them as learning objectives. While there are notable exceptions–such as advocacy visualizations or persuasive cartography–we find that visualization designers rarely acknowledge or formalize affective objectives. Through interviews with visualization designers, we expand on prior work on using learning objectives as a framework for describing and assessing communicative intent. Specifically, we extend and revise the framework to include a set of affective learning objectives. This structured taxonomy can help designers identify and declare their goals and compare and assess designs in a more principled way. Additionally, the taxonomy can enable external critique and analysis of visualizations. We illustrate the use of the taxonomy with a critical analysis of an affective visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When designing communicative visualizations, we often focus on goals that seek to convey patterns, relations, or comparisons (cognitive learning objectives). We pay less attention to affective intents–those that seek to influence or leverage the audience's opinions, attitudes, or values in some way. Affective objectives may range in outcomes from making the viewer care about the subject, strengthening a stance on an opinion, or leading them to take further action. Because such goals are often considered a violation of perceived ‘neutrality’ or are ‘political,’ designers may resist or be unable to describe these intents, let alone formalize them as learning objectives. While there are notable exceptions–such as advocacy visualizations or persuasive cartography–we find that visualization designers rarely acknowledge or formalize affective objectives. Through interviews with visualization designers, we expand on prior work on using learning objectives as a framework for describing and assessing communicative intent. Specifically, we extend and revise the framework to include a set of affective learning objectives. This structured taxonomy can help designers identify and declare their goals and compare and assess designs in a more principled way. Additionally, the taxonomy can enable external critique and analysis of visualizations. We illustrate the use of the taxonomy with a critical analysis of an affective visualization.",
"title": "Affective Learning Objectives for Communicative Visualizations",
"normalizedTitle": "Affective Learning Objectives for Communicative Visualizations",
"fno": "09905872",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cartography",
"Cognition",
"Computer Aided Instruction",
"Design Engineering",
"Advocacy Visualizations",
"Affective Intents Those",
"Affective Learning Objectives",
"Affective Objectives",
"Affective Visualization",
"Audience",
"Cognitive Learning Objectives",
"Communicative Intent",
"Comparisons",
"Designing Communicative Visualizations",
"External Critique",
"Visualization Designers",
"Data Visualization",
"Taxonomy",
"Appraisal",
"Interviews",
"Visualization",
"Journalism",
"Image Color Analysis",
"Affective Visualization",
"Communicative Visualization",
"Learning Objectives"
],
"authors": [
{
"givenName": "Elsie",
"surname": "Lee-Robbins",
"fullName": "Elsie Lee-Robbins",
"affiliation": "University of Michigan, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eytan",
"surname": "Adar",
"fullName": "Eytan Adar",
"affiliation": "University of Michigan, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "1-11",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2009/4715/0/05350496",
"title": "Discrete Mathematics assessment using learning objectives based on Bloom's taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2009/05350496/12OmNBd9T4k",
"parentPublication": {
"id": "proceedings/fie/2009/4715/0",
"title": "2009 39th IEEE Frontiers in Education Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a025",
"title": "A Preliminary Investigation of the Effect of Social Media on Affective Trust in Customer-Supplier Relationships",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a025/12OmNrMZpn1",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718577",
"title": "Towards a Taxonomy of Requirements for Hybrid Products",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718577/12OmNy7h39O",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-vis/2008/3271/0/3271a018",
"title": "Mixing Emotions, How Physical Discomfort Influences the Affective Appraisal of Virtual Places",
"doi": null,
"abstractUrl": "/proceedings-article/iv-vis/2008/3271a018/12OmNzwpU2I",
"parentPublication": {
"id": "proceedings/iv-vis/2008/3271/0",
"title": "Visualisation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b671",
"title": "Human Perceptions of Sensitive Content in Photos",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b671/17D45WgziQE",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222102",
"title": "Communicative Visualizations as a Learning Problem",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222102/1nTr1JohElO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2020/9642/0/964200a019",
"title": "How to evaluate data visualizations across different levels of understanding",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2020/964200a019/1q0FOQPpIic",
"parentPublication": {
"id": "proceedings/beliv/2020/9642/0",
"title": "2020 IEEE Workshop on Evaluation and Beyond - Methodological Approaches to Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09409643",
"title": "Smile or Scowl? Looking at Infographic Design Through the Affective Lens",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09409643/1sXjFUw1Jra",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552240",
"title": "Kineticharts: Augmenting Affective Expressiveness of Charts in Data Stories with Animation Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552240/1xic12y0QJG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552876",
"title": "Learning Objectives, Insights, and Assessments: How Specification Formats Impact Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552876/1xic1wsZtLi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09991008",
"articleId": "1J9yEmQszII",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904455",
"articleId": "1H1gjlaBqVO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9yvdIchWM",
"name": "ttg202301-09905872s1-supp1-3209500.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09905872s1-supp1-3209500.pdf",
"extension": "pdf",
"size": "4.51 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nTr1JohElO",
"doi": "10.1109/TVCG.2020.3030375",
"abstract": "Significant research has provided robust task and evaluation languages for the analysis of exploratory visualizations. Unfortunately, these taxonomies fail when applied to communicative visualizations. Instead, designers often resort to evaluating communicative visualizations from the cognitive efficiency perspective: “can the recipient accurately decode my message/insight?” However, designers are unlikely to be satisfied if the message went `in one ear and out the other.' The consequence of this inconsistency is that it is difficult to design or select between competing options in a principled way. The problem we address is the fundamental mismatch between how designers want to describe their intent, and the language they have. We argue that visualization designers can address this limitation through a learning lens: that the recipient is a student and the designer a teacher. By using learning objectives, designers can better define, assess, and compare communicative visualizations. We illustrate how the learning-based approach provides a framework for understanding a wide array of communicative goals. To understand how the framework can be applied (and its limitations), we surveyed and interviewed members of the Data Visualization Society using their own visualizations as a probe. Through this study we identified the broad range of objectives in communicative visualizations and the prevalence of certain objective types.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Significant research has provided robust task and evaluation languages for the analysis of exploratory visualizations. Unfortunately, these taxonomies fail when applied to communicative visualizations. Instead, designers often resort to evaluating communicative visualizations from the cognitive efficiency perspective: “can the recipient accurately decode my message/insight?” However, designers are unlikely to be satisfied if the message went `in one ear and out the other.' The consequence of this inconsistency is that it is difficult to design or select between competing options in a principled way. The problem we address is the fundamental mismatch between how designers want to describe their intent, and the language they have. We argue that visualization designers can address this limitation through a learning lens: that the recipient is a student and the designer a teacher. By using learning objectives, designers can better define, assess, and compare communicative visualizations. We illustrate how the learning-based approach provides a framework for understanding a wide array of communicative goals. To understand how the framework can be applied (and its limitations), we surveyed and interviewed members of the Data Visualization Society using their own visualizations as a probe. Through this study we identified the broad range of objectives in communicative visualizations and the prevalence of certain objective types.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Significant research has provided robust task and evaluation languages for the analysis of exploratory visualizations. Unfortunately, these taxonomies fail when applied to communicative visualizations. Instead, designers often resort to evaluating communicative visualizations from the cognitive efficiency perspective: “can the recipient accurately decode my message/insight?” However, designers are unlikely to be satisfied if the message went `in one ear and out the other.' The consequence of this inconsistency is that it is difficult to design or select between competing options in a principled way. The problem we address is the fundamental mismatch between how designers want to describe their intent, and the language they have. We argue that visualization designers can address this limitation through a learning lens: that the recipient is a student and the designer a teacher. By using learning objectives, designers can better define, assess, and compare communicative visualizations. We illustrate how the learning-based approach provides a framework for understanding a wide array of communicative goals. To understand how the framework can be applied (and its limitations), we surveyed and interviewed members of the Data Visualization Society using their own visualizations as a probe. Through this study we identified the broad range of objectives in communicative visualizations and the prevalence of certain objective types.",
"title": "Communicative Visualizations as a Learning Problem",
"normalizedTitle": "Communicative Visualizations as a Learning Problem",
"fno": "09222102",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Learning Artificial Intelligence",
"Learning Based Approach",
"Learning Objectives",
"Data Visualization Society",
"Exploratory Visualizations",
"Communicative Visualizations",
"Data Visualization",
"Taxonomy",
"Task Analysis",
"Visualization",
"Elbow",
"Tools",
"Correlation",
"Learning Objectives",
"Communicative Visualization",
"Visualization Design"
],
"authors": [
{
"givenName": "Eytan",
"surname": "Adar",
"fullName": "Eytan Adar",
"affiliation": "University of Michigan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elsie",
"surname": "Lee",
"fullName": "Elsie Lee",
"affiliation": "University of Michigan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "946-956",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2014/5118/0/5118a216",
"title": "Visual Persuasion: Inferring Communicative Intents of Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118a216/12OmNAle6nQ",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/criwg/2000/0828/0/08280154",
"title": "Interaction in Networked Virtual Environments as Communicative Action: Social Theory and Multi-Player Games",
"doi": null,
"abstractUrl": "/proceedings-article/criwg/2000/08280154/12OmNC2xhFT",
"parentPublication": {
"id": "proceedings/criwg/2000/0828/0",
"title": "Groupware, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06888482",
"title": "Activity Sculptures: Exploring the Impact of Physical Visualizations on Running Activity",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06888482/13rRUxAASTd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017602",
"title": "Scatterplots: Tasks, Data, and Designs",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017602/13rRUy3gn7C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440818",
"title": "Looks Good To Me: Visualizations As Sanity Checks",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440818/17D45W2WyxG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09905872",
"title": "Affective Learning Objectives for Communicative Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09905872/1H3ZV2tCxTa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09916137",
"title": "Revisiting the Design Patterns of Composite Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09916137/1HojAjSAGNq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222335",
"title": "PlotThread: Creating Expressive Storyline Visualizations using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222335/1nTqzC75UfS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2020/9642/0/964200a029",
"title": "Using Close Reading as a Method for Evaluating Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2020/964200a029/1q0FO3J5Ogg",
"parentPublication": {
"id": "proceedings/beliv/2020/9642/0",
"title": "2020 IEEE Workshop on Evaluation and Beyond - Methodological Approaches to Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552876",
"title": "Learning Objectives, Insights, and Assessments: How Specification Formats Impact Design",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552876/1xic1wsZtLi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09241413",
"articleId": "1ogG3C3nsNW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09229515",
"articleId": "1o3nxS8lm7u",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLfeUovYDm",
"name": "ttg202102-09222102s1-supp1-3030375.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09222102s1-supp1-3030375.pdf",
"extension": "pdf",
"size": "73.7 kB",
"__typename": "WebExtraType"
},
{
"id": "1qLfePvmnIs",
"name": "ttg202102-09222102s1-supp2-3030375.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09222102s1-supp2-3030375.pdf",
"extension": "pdf",
"size": "37.3 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1HMOit1lSk8",
"title": "Dec.",
"year": "2022",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1taANyFFcmQ",
"doi": "10.1109/TVCG.2021.3076222",
"abstract": "In multiple coordinated views (MCVs), visualizations across views update their content in response to users’ interactions in other views. Interactive systems provide direct manipulation to create coordination between views, but are restricted to limited types of predefined templates. By contrast, textual specification languages enable flexible coordination but expose technical burden. To bridge the gap, we contribute Nebula, a grammar based on natural language for coordinating visualizations in MCVs. The grammar design is informed by a novel framework based on a systematic review of 176 coordinations from existing theories and applications, which describes coordination by demonstration, i.e., how coordination is performed by users. With the framework, Nebula specification formalizes coordination as a composition of user- and coordination-triggered interactions in origin and destination views, respectively, along with potential data transformation between the interactions. We evaluate Nebula by demonstrating its expressiveness with a gallery of diverse examples and analyzing its usability on cognitive dimensions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In multiple coordinated views (MCVs), visualizations across views update their content in response to users’ interactions in other views. Interactive systems provide direct manipulation to create coordination between views, but are restricted to limited types of predefined templates. By contrast, textual specification languages enable flexible coordination but expose technical burden. To bridge the gap, we contribute Nebula, a grammar based on natural language for coordinating visualizations in MCVs. The grammar design is informed by a novel framework based on a systematic review of 176 coordinations from existing theories and applications, which describes coordination by demonstration, i.e., how coordination is performed by users. With the framework, Nebula specification formalizes coordination as a composition of user- and coordination-triggered interactions in origin and destination views, respectively, along with potential data transformation between the interactions. We evaluate Nebula by demonstrating its expressiveness with a gallery of diverse examples and analyzing its usability on cognitive dimensions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In multiple coordinated views (MCVs), visualizations across views update their content in response to users’ interactions in other views. Interactive systems provide direct manipulation to create coordination between views, but are restricted to limited types of predefined templates. By contrast, textual specification languages enable flexible coordination but expose technical burden. To bridge the gap, we contribute Nebula, a grammar based on natural language for coordinating visualizations in MCVs. The grammar design is informed by a novel framework based on a systematic review of 176 coordinations from existing theories and applications, which describes coordination by demonstration, i.e., how coordination is performed by users. With the framework, Nebula specification formalizes coordination as a composition of user- and coordination-triggered interactions in origin and destination views, respectively, along with potential data transformation between the interactions. We evaluate Nebula by demonstrating its expressiveness with a gallery of diverse examples and analyzing its usability on cognitive dimensions.",
"title": "Nebula: A Coordinating Grammar of Graphics",
"normalizedTitle": "Nebula: A Coordinating Grammar of Graphics",
"fno": "09417674",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Formal Specification",
"Grammars",
"Interactive Systems",
"Specification Languages",
"Coordinating Grammar",
"Coordination Triggered Interactions",
"Destination Views",
"Direct Manipulation",
"Flexible Coordination",
"Grammar Design",
"Interactive Systems",
"MC Vs",
"Multiple Coordinated Views",
"Natural Language",
"Nebula Specification Formalizes Coordination",
"Predefined Templates",
"Technical Burden",
"Textual Specification Languages",
"Users",
"Data Visualization",
"Grammar",
"Visualization",
"Usability",
"Data Models",
"Natural Languages",
"Coordination",
"Multiple Coordinated Views",
"Interactive Visualization",
"Grammar Of Graphics"
],
"authors": [
{
"givenName": "Ran",
"surname": "Chen",
"fullName": "Ran Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xinhuan",
"surname": "Shu",
"fullName": "Xinhuan Shu",
"affiliation": "Hong Kong University of Science and Technology, Hong Kong",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jiahui",
"surname": "Chen",
"fullName": "Jiahui Chen",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Di",
"surname": "Weng",
"fullName": "Di Weng",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junxiu",
"surname": "Tang",
"fullName": "Junxiu Tang",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Siwei",
"surname": "Fu",
"fullName": "Siwei Fu",
"affiliation": "Zhejiang Lab, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yingcai",
"surname": "Wu",
"fullName": "Yingcai Wu",
"affiliation": "State Key Lab of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "4127-4140",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2016/1269/0/07760001",
"title": "Bangla grammar pattern recognition using shift reduce parser",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760001/12OmNAlNiKx",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2014/2696/0/06963105",
"title": "Transformation between BPMN and BPEL based on graph grammar",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2014/06963105/12OmNy314fJ",
"parentPublication": {
"id": "proceedings/icccnt/2014/2696/0",
"title": "2014 5th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ialp/2011/4554/0/4554a015",
"title": "Natural Language Grammar Induction of Indonesian Language Corpora Using Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ialp/2011/4554a015/12OmNyprnt4",
"parentPublication": {
"id": "proceedings/ialp/2011/4554/0",
"title": "Asian Language Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539624",
"title": "Vega-Lite: A Grammar of Interactive Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539624/13rRUIJuxvn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061065",
"title": "Lark: Coordinating Co-located Collaboration with Information Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061065/13rRUynHuj4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08233127",
"title": "Atom: A Grammar for Unit Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08233127/14H4WLzSYsE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440063",
"title": "A Declarative Grammar of Flexible Volume Visualization Pipelines",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440063/17D45XacGi1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a001",
"title": "CorFish: Coordinating Emphasis Across Multiple Views Using Spatial Distortion",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a001/1cMF6xDCFhu",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09234027",
"title": "Gemini: A Grammar and Recommender System for Animated Transitions in Statistical Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09234027/1o531wbxsSk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a131",
"title": "Encodable: Configurable Grammar for Visualization Components",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a131/1qRNXTuFymI",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09416829",
"articleId": "1t8VUXSYL2E",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09420254",
"articleId": "1tdUMGe1DAk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HMOuE9Kfpm",
"name": "ttg202212-09417674s1-supp1-3076222.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202212-09417674s1-supp1-3076222.mp4",
"extension": "mp4",
"size": "10.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvGPE8n",
"title": "Jan.",
"year": "2016",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgarnM",
"doi": "10.1109/TVCG.2015.2467611",
"abstract": "Sensemaking is described as the process of comprehension, finding meaning and gaining insight from information, producing new knowledge and informing further action. Understanding the sensemaking process allows building effective visual analytics tools to make sense of large and complex datasets. Currently, it is often a manual and time-consuming undertaking to comprehend this: researchers collect observation data, transcribe screen capture videos and think-aloud recordings, identify recurring patterns, and eventually abstract the sensemaking process into a general model. In this paper, we propose a general approach to facilitate such a qualitative analysis process, and introduce a prototype, SensePath, to demonstrate the application of this approach with a focus on browser-based online sensemaking. The approach is based on a study of a number of qualitative research sessions including observations of users performing sensemaking tasks and post hoc analyses to uncover their sensemaking processes. Based on the study results and a follow-up participatory design session with HCI researchers, we decided to focus on the transcription and coding stages of thematic analysis. SensePath automatically captures user's sensemaking actions, i.e., analytic provenance, and provides multi-linked views to support their further analysis. A number of other requirements elicited from the design session are also implemented in SensePath, such as easy integration with existing qualitative analysis workflow and non-intrusive for participants. The tool was used by an experienced HCI researcher to analyze two sensemaking sessions. The researcher found the tool intuitive and considerably reduced analysis time, allowing better understanding of the sensemaking process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Sensemaking is described as the process of comprehension, finding meaning and gaining insight from information, producing new knowledge and informing further action. Understanding the sensemaking process allows building effective visual analytics tools to make sense of large and complex datasets. Currently, it is often a manual and time-consuming undertaking to comprehend this: researchers collect observation data, transcribe screen capture videos and think-aloud recordings, identify recurring patterns, and eventually abstract the sensemaking process into a general model. In this paper, we propose a general approach to facilitate such a qualitative analysis process, and introduce a prototype, SensePath, to demonstrate the application of this approach with a focus on browser-based online sensemaking. The approach is based on a study of a number of qualitative research sessions including observations of users performing sensemaking tasks and post hoc analyses to uncover their sensemaking processes. Based on the study results and a follow-up participatory design session with HCI researchers, we decided to focus on the transcription and coding stages of thematic analysis. SensePath automatically captures user's sensemaking actions, i.e., analytic provenance, and provides multi-linked views to support their further analysis. A number of other requirements elicited from the design session are also implemented in SensePath, such as easy integration with existing qualitative analysis workflow and non-intrusive for participants. The tool was used by an experienced HCI researcher to analyze two sensemaking sessions. The researcher found the tool intuitive and considerably reduced analysis time, allowing better understanding of the sensemaking process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Sensemaking is described as the process of comprehension, finding meaning and gaining insight from information, producing new knowledge and informing further action. Understanding the sensemaking process allows building effective visual analytics tools to make sense of large and complex datasets. Currently, it is often a manual and time-consuming undertaking to comprehend this: researchers collect observation data, transcribe screen capture videos and think-aloud recordings, identify recurring patterns, and eventually abstract the sensemaking process into a general model. In this paper, we propose a general approach to facilitate such a qualitative analysis process, and introduce a prototype, SensePath, to demonstrate the application of this approach with a focus on browser-based online sensemaking. The approach is based on a study of a number of qualitative research sessions including observations of users performing sensemaking tasks and post hoc analyses to uncover their sensemaking processes. Based on the study results and a follow-up participatory design session with HCI researchers, we decided to focus on the transcription and coding stages of thematic analysis. SensePath automatically captures user's sensemaking actions, i.e., analytic provenance, and provides multi-linked views to support their further analysis. A number of other requirements elicited from the design session are also implemented in SensePath, such as easy integration with existing qualitative analysis workflow and non-intrusive for participants. The tool was used by an experienced HCI researcher to analyze two sensemaking sessions. The researcher found the tool intuitive and considerably reduced analysis time, allowing better understanding of the sensemaking process.",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"normalizedTitle": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"fno": "07194834",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Human Computer Interaction",
"Visual Analytics",
"Context",
"Encoding",
"Manuals",
"Web Pages",
"Timeline Visualization",
"Sensemaking",
"Analytic Provenance",
"Transcription",
"Coding",
"Qualitative Research",
"Timeline Visualization",
"Sensemaking",
"Analytic Provenance",
"Transcription",
"Coding",
"Qualitative Research"
],
"authors": [
{
"givenName": "Phong H.",
"surname": "Nguyen",
"fullName": "Phong H. Nguyen",
"affiliation": ", Middlesex University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kai",
"surname": "Xu",
"fullName": "Kai Xu",
"affiliation": ", Middlesex University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ashley",
"surname": "Wheat",
"fullName": "Ashley Wheat",
"affiliation": ", Middlesex University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.L. William",
"surname": "Wong",
"fullName": "B.L. William Wong",
"affiliation": ", Middlesex University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Simon",
"surname": "Attfield",
"fullName": "Simon Attfield",
"affiliation": ", Middlesex University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bob",
"surname": "Fields",
"fullName": "Bob Fields",
"affiliation": ", Middlesex University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2016-01-01 00:00:00",
"pubType": "trans",
"pages": "41-50",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400558",
"title": "SocialNetSense: Supporting sensemaking of social and structural features in networks with interactive visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400558/12OmNxdm4ya",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883515",
"title": "SenseMap: Supporting browser-based online sensemaking through analytic provenance",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883515/12OmNz2kqj9",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a177",
"title": "Guidelines for Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/03/mcg2015030056",
"title": "Analytic Provenance for Sensemaking: A Research Agenda",
"doi": null,
"abstractUrl": "/magazine/cg/2015/03/mcg2015030056/13rRUB7a13F",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122207",
"title": "The Impact of Physical Navigation on Spatial Organization for Sensemaking",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122207/13rRUwI5TQZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122879",
"title": "Semantic Interaction for Sensemaking: Inferring Analytical Reasoning for Model Steering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122879/13rRUwdIOUL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585484",
"title": "CRICTO: Supporting Sensemaking through Crowdsourced Information Schematization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585484/17D45Wc1ILV",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09894094",
"title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08889811",
"title": "Provenance Analysis for Sensemaking",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08889811/1eBul1FAEIE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07192714",
"articleId": "13rRUxOdD2F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07192662",
"articleId": "13rRUyuegha",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgGM",
"name": "ttg201601-07194834s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201601-07194834s1.zip",
"extension": "zip",
"size": "31 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIIVlcH",
"doi": "10.1109/TVCG.2012.273",
"abstract": "Visual analytics emphasizes the interplay between visualization, analytical procedures performed by computers and human perceptual and cognitive activities. Human reasoning is an important element in this context. There are several theories in psychology and HCI explaining open-ended and exploratory reasoning. Five of these theories (sensemaking theories, gestalt theories, distributed cognition, graph comprehension theories and skill-rule-knowledge models) are described in this paper. We discuss their relevance for visual analytics. In order to do this more systematically, we developed a schema of categories relevant for visual analytics research and evaluation. All these theories have strengths but also weaknesses in explaining interaction with visual analytics systems. A possibility to overcome the weaknesses would be to combine two or more of these theories.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual analytics emphasizes the interplay between visualization, analytical procedures performed by computers and human perceptual and cognitive activities. Human reasoning is an important element in this context. There are several theories in psychology and HCI explaining open-ended and exploratory reasoning. Five of these theories (sensemaking theories, gestalt theories, distributed cognition, graph comprehension theories and skill-rule-knowledge models) are described in this paper. We discuss their relevance for visual analytics. In order to do this more systematically, we developed a schema of categories relevant for visual analytics research and evaluation. All these theories have strengths but also weaknesses in explaining interaction with visual analytics systems. A possibility to overcome the weaknesses would be to combine two or more of these theories.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual analytics emphasizes the interplay between visualization, analytical procedures performed by computers and human perceptual and cognitive activities. Human reasoning is an important element in this context. There are several theories in psychology and HCI explaining open-ended and exploratory reasoning. Five of these theories (sensemaking theories, gestalt theories, distributed cognition, graph comprehension theories and skill-rule-knowledge models) are described in this paper. We discuss their relevance for visual analytics. In order to do this more systematically, we developed a schema of categories relevant for visual analytics research and evaluation. All these theories have strengths but also weaknesses in explaining interaction with visual analytics systems. A possibility to overcome the weaknesses would be to combine two or more of these theories.",
"title": "The User Puzzle—Explaining the Interaction with Visual Analytics Systems",
"normalizedTitle": "The User Puzzle—Explaining the Interaction with Visual Analytics Systems",
"fno": "ttg2012122908",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cognition",
"Human Factors",
"Visual Analytics",
"Psychology",
"Problem Solving",
"Cognitive Theory",
"Visual Knowledge Discovery",
"Interaction Design",
"Reasoning"
],
"authors": [
{
"givenName": "Margit",
"surname": "Pohl",
"fullName": "Margit Pohl",
"affiliation": "Vienna University of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Smuc",
"fullName": "Michael Smuc",
"affiliation": "Danube University Krems",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eva",
"surname": "Mayr",
"fullName": "Eva Mayr",
"affiliation": "Danube University Krems",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2908-2916",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2013/4892/0/4892b495",
"title": "A Role for Reasoning in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892b495/12OmNqJ8tq4",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718615",
"title": "Expanding the Scope: Interaction Design Perspectives for Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718615/12OmNxGSm6x",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/03/mcg2009030052",
"title": "Recovering Reasoning Processes from User Interactions",
"doi": null,
"abstractUrl": "/magazine/cg/2009/03/mcg2009030052/13rRUNvyans",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/05/ttg2012050689",
"title": "Co-Located Collaborative Visual Analytics around a Tabletop Display",
"doi": null,
"abstractUrl": "/journal/tg/2012/05/ttg2012050689/13rRUwkxc5p",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/04/mcg2015040094",
"title": "Semantic Interaction: Coupling Cognition and Computation through Usable Interactive Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2015/04/mcg2015040094/13rRUxOveck",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/07/mco2013070020",
"title": "Visual Analytics: Seeking the Unknown",
"doi": null,
"abstractUrl": "/magazine/co/2013/07/mco2013070020/13rRUy0HYNj",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876049",
"title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08812988",
"title": "Explaining Vulnerabilities to Adversarial Machine Learning through Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08812988/1cOhCfAgaZO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09219240",
"title": "A Visual Analytics Framework for Explaining and Diagnosing Transfer Learning Processes",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09219240/1nMMmribStW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2021/03/09459443",
"title": "Visual Analytics Review: An Early and Continuing Success of Convergent Research With Impact",
"doi": null,
"abstractUrl": "/magazine/cs/2021/03/09459443/1uvzWEEbavK",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122899",
"articleId": "13rRUxDqS8g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122917",
"articleId": "13rRUxASuAv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwFid7k",
"title": "May",
"year": "2011",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLkvl",
"doi": "10.1109/TVCG.2010.84",
"abstract": "Despite the growing number of systems providing visual analytic support for investigative analysis, few empirical studies of the potential benefits of such systems have been conducted, particularly controlled, comparative evaluations. Determining how such systems foster insight and sensemaking is important for their continued growth and study, however. Furthermore, studies that identify how people use such systems and why they benefit (or not) can help inform the design of new systems in this area. We conducted an evaluation of the visual analytics system Jigsaw employed in a small investigative sensemaking exercise, and compared its use to three other more traditional methods of analysis. Sixteen participants performed a simulated intelligence analysis task under one of the four conditions. Experimental results suggest that Jigsaw assisted participants to analyze the data and identify an embedded threat. We describe different analysis strategies used by study participants and how computational support (or the lack thereof) influenced the strategies. We then illustrate several characteristics of the sensemaking process identified in the study and provide design implications for investigative analysis tools based thereon. We conclude with recommendations on metrics and techniques for evaluating visual analytics systems for investigative analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite the growing number of systems providing visual analytic support for investigative analysis, few empirical studies of the potential benefits of such systems have been conducted, particularly controlled, comparative evaluations. Determining how such systems foster insight and sensemaking is important for their continued growth and study, however. Furthermore, studies that identify how people use such systems and why they benefit (or not) can help inform the design of new systems in this area. We conducted an evaluation of the visual analytics system Jigsaw employed in a small investigative sensemaking exercise, and compared its use to three other more traditional methods of analysis. Sixteen participants performed a simulated intelligence analysis task under one of the four conditions. Experimental results suggest that Jigsaw assisted participants to analyze the data and identify an embedded threat. We describe different analysis strategies used by study participants and how computational support (or the lack thereof) influenced the strategies. We then illustrate several characteristics of the sensemaking process identified in the study and provide design implications for investigative analysis tools based thereon. We conclude with recommendations on metrics and techniques for evaluating visual analytics systems for investigative analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite the growing number of systems providing visual analytic support for investigative analysis, few empirical studies of the potential benefits of such systems have been conducted, particularly controlled, comparative evaluations. Determining how such systems foster insight and sensemaking is important for their continued growth and study, however. Furthermore, studies that identify how people use such systems and why they benefit (or not) can help inform the design of new systems in this area. We conducted an evaluation of the visual analytics system Jigsaw employed in a small investigative sensemaking exercise, and compared its use to three other more traditional methods of analysis. Sixteen participants performed a simulated intelligence analysis task under one of the four conditions. Experimental results suggest that Jigsaw assisted participants to analyze the data and identify an embedded threat. We describe different analysis strategies used by study participants and how computational support (or the lack thereof) influenced the strategies. We then illustrate several characteristics of the sensemaking process identified in the study and provide design implications for investigative analysis tools based thereon. We conclude with recommendations on metrics and techniques for evaluating visual analytics systems for investigative analysis.",
"title": "How Can Visual Analytics Assist Investigative Analysis? Design Implications from an Evaluation",
"normalizedTitle": "How Can Visual Analytics Assist Investigative Analysis? Design Implications from an Evaluation",
"fno": "ttg2011050570",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Information Visualization",
"Evaluation",
"Investigative Analysis",
"User Study"
],
"authors": [
{
"givenName": "Youn-ah",
"surname": "Kang",
"fullName": "Youn-ah Kang",
"affiliation": "Georgia Institute of Technology, Atlanta",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Carsten",
"surname": "Görg",
"fullName": "Carsten Görg",
"affiliation": "Georgia Institute of Technology, Atlanta",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Stasko",
"fullName": "John Stasko",
"affiliation": "Georgia Institute of Technology, Atlanta",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2011-05-01 00:00:00",
"pubType": "trans",
"pages": "570-583",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2016/01/07194834",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/11/ttg2012111992",
"title": "Evaluating the Role of Time in Investigative Analysis of Document Collections",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111992/13rRUwI5TQW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/07/mco2013070030",
"title": "Visual Analytics Support for Intelligence Analysis",
"doi": null,
"abstractUrl": "/magazine/co/2013/07/mco2013070030/13rRUxD9h0P",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122869",
"title": "Examining the Use of a Visual Analytics System for Sensemaking Tasks: Case Studies with Domain Experts",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122869/13rRUxNmPDT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/02/mcg2015020016",
"title": "Preparing Undergraduates for Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2015/02/mcg2015020016/13rRUxjQyjN",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017596",
"title": "Supporting Handoff in Asynchronous Collaborative Sensemaking Using Knowledge-Transfer Graphs",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017596/13rRUytWF9s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122032",
"title": "Visual Analytics for Multimodal Social Network Analysis: A Design Study with Social Scientists",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122032/13rRUyuegp5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09894094",
"title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vds/2022/5721/0/572100a006",
"title": "Communication Analysis through Visual Analytics: Current Practices, Challenges, and New Frontiers",
"doi": null,
"abstractUrl": "/proceedings-article/vds/2022/572100a006/1JezMbpIoX6",
"parentPublication": {
"id": "proceedings/vds/2022/5721/0",
"title": "2022 IEEE Visualization in Data Science (VDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a081",
"title": "Designing for Ambiguity: Visual Analytics in Avalanche Forecasting",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a081/1qROrjfqgSI",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011050557",
"articleId": "13rRUEgarBo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011050584",
"articleId": "13rRUxZ0o1v",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNmPDT",
"doi": "10.1109/TVCG.2012.224",
"abstract": "While the formal evaluation of systems in visual analytics is still relatively uncommon, particularly rare are case studies of prolonged system use by domain analysts working with their own data. Conducting case studies can be challenging, but it can be a particularly effective way to examine whether visual analytics systems are truly helping expert users to accomplish their goals. We studied the use of a visual analytics system for sensemaking tasks on documents by six analysts from a variety of domains. We describe their application of the system along with the benefits, issues, and problems that we uncovered. Findings from the studies identify features that visual analytics systems should emphasize as well as missing capabilities that should be addressed. These findings inform design implications for future systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While the formal evaluation of systems in visual analytics is still relatively uncommon, particularly rare are case studies of prolonged system use by domain analysts working with their own data. Conducting case studies can be challenging, but it can be a particularly effective way to examine whether visual analytics systems are truly helping expert users to accomplish their goals. We studied the use of a visual analytics system for sensemaking tasks on documents by six analysts from a variety of domains. We describe their application of the system along with the benefits, issues, and problems that we uncovered. Findings from the studies identify features that visual analytics systems should emphasize as well as missing capabilities that should be addressed. These findings inform design implications for future systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While the formal evaluation of systems in visual analytics is still relatively uncommon, particularly rare are case studies of prolonged system use by domain analysts working with their own data. Conducting case studies can be challenging, but it can be a particularly effective way to examine whether visual analytics systems are truly helping expert users to accomplish their goals. We studied the use of a visual analytics system for sensemaking tasks on documents by six analysts from a variety of domains. We describe their application of the system along with the benefits, issues, and problems that we uncovered. Findings from the studies identify features that visual analytics systems should emphasize as well as missing capabilities that should be addressed. These findings inform design implications for future systems.",
"title": "Examining the Use of a Visual Analytics System for Sensemaking Tasks: Case Studies with Domain Experts",
"normalizedTitle": "Examining the Use of a Visual Analytics System for Sensemaking Tasks: Case Studies with Domain Experts",
"fno": "ttg2012122869",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Qualitative Analysis",
"Electronic Mail",
"Data Visualization",
"Market Research",
"Qualitative Evaluation",
"Visual Analytics",
"Case Study"
],
"authors": [
{
"givenName": "Youn-ah",
"surname": "Kang",
"fullName": "Youn-ah Kang",
"affiliation": "Google Inc.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Stasko",
"fullName": "John Stasko",
"affiliation": "Georgia Institute of Technology",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2869-2878",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dsc/2017/1600/0/1600a368",
"title": "Tasks for Visual Analytics in Multilayer Networks",
"doi": null,
"abstractUrl": "/proceedings-article/dsc/2017/1600a368/12OmNz61dsf",
"parentPublication": {
"id": "proceedings/dsc/2017/1600/0",
"title": "2017 IEEE Second International Conference on Data Science in Cyberspace (DSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a177",
"title": "Guidelines for Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/03/mcg2009030039",
"title": "Integrating Statistics and Visualization for Exploratory Power: From Long-Term Case Studies to Design Guidelines",
"doi": null,
"abstractUrl": "/magazine/cg/2009/03/mcg2009030039/13rRUB7a13y",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/05/ttg2011050570",
"title": "How Can Visual Analytics Assist Investigative Analysis? Design Implications from an Evaluation",
"doi": null,
"abstractUrl": "/journal/tg/2011/05/ttg2011050570/13rRUILLkvl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/03/mcg2009030052",
"title": "Recovering Reasoning Processes from User Interactions",
"doi": null,
"abstractUrl": "/magazine/cg/2009/03/mcg2009030052/13rRUNvyans",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/04/ttg2011040440",
"title": "Forecasting Hotspots—A Predictive Analytics Approach",
"doi": null,
"abstractUrl": "/journal/tg/2011/04/ttg2011040440/13rRUwdrdSv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585484",
"title": "CRICTO: Supporting Sensemaking through Crowdsourced Information Schematization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585484/17D45Wc1ILV",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/03/09082281",
"title": "TimeSets: Temporal Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/magazine/cg/2020/03/09082281/1jqfbRdclvq",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a040",
"title": "Beyond Visual Analytics: Human-Machine Teaming for AI-Driven Data Sensemaking",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a040/1yQB6SxBJ0A",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122859",
"articleId": "13rRUx0Pqpw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122879",
"articleId": "13rRUwdIOUL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GIqpC6j7na",
"doi": "10.1109/TVCG.2022.3207357",
"abstract": "Existing research on immersive analytics to support the sensemaking process focuses on single-session sensemaking tasks. However, in the wild, sensemaking can take days or months to complete. In order to understand the full benefits of immersive analytic systems, we need to understand how immersive analytic systems provide flexibility for the dynamic nature of the sensemaking process. In our work, we build upon an existing immersive analytic system – Immersive Space to Think, to evaluate how immersive analytic systems can support sensemaking tasks over time. We conducted a user study with eight participants with three separate analysis sessions each. We found significant differences between analysis strategies between sessions one, two, and three, which suggest that immersive space to think can benefit analysts during multiple stages in the sensemaking process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing research on immersive analytics to support the sensemaking process focuses on single-session sensemaking tasks. However, in the wild, sensemaking can take days or months to complete. In order to understand the full benefits of immersive analytic systems, we need to understand how immersive analytic systems provide flexibility for the dynamic nature of the sensemaking process. In our work, we build upon an existing immersive analytic system – Immersive Space to Think, to evaluate how immersive analytic systems can support sensemaking tasks over time. We conducted a user study with eight participants with three separate analysis sessions each. We found significant differences between analysis strategies between sessions one, two, and three, which suggest that immersive space to think can benefit analysts during multiple stages in the sensemaking process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing research on immersive analytics to support the sensemaking process focuses on single-session sensemaking tasks. However, in the wild, sensemaking can take days or months to complete. In order to understand the full benefits of immersive analytic systems, we need to understand how immersive analytic systems provide flexibility for the dynamic nature of the sensemaking process. In our work, we build upon an existing immersive analytic system – Immersive Space to Think, to evaluate how immersive analytic systems can support sensemaking tasks over time. We conducted a user study with eight participants with three separate analysis sessions each. We found significant differences between analysis strategies between sessions one, two, and three, which suggest that immersive space to think can benefit analysts during multiple stages in the sensemaking process.",
"title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"normalizedTitle": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"fno": "09894094",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Task Analysis",
"Data Visualization",
"Cognition",
"Prototypes",
"Visual Analytics",
"Three Dimensional Displays",
"Keyboards",
"Human Computer Interaction",
"Immersive Analytics",
"Virtual Reality",
"Information Visualization",
"Sensemaking"
],
"authors": [
{
"givenName": "Kylie",
"surname": "Davidson",
"fullName": "Kylie Davidson",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lee",
"surname": "Lisle",
"fullName": "Lee Lisle",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kirsten",
"surname": "Whitley",
"fullName": "Kirsten Whitley",
"affiliation": "US Department of Defense, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug A.",
"surname": "Bowman",
"fullName": "Doug A. Bowman",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "North",
"fullName": "Chris North",
"affiliation": "Department of Computer Science, Virginia Polytechnic Institute and State University, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-09-01 00:00:00",
"pubType": "trans",
"pages": "1-15",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400559",
"title": "Analyst's Workspace: An embodied sensemaking environment for large, high-resolution displays",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400559/12OmNwF0BJt",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2016/2857/0/07870192",
"title": "How Analysts Think: Think-steps as a Tool for Structuring Sensemaking in Criminal Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2016/07870192/12OmNxWLTzQ",
"parentPublication": {
"id": "proceedings/eisic/2016/2857/0",
"title": "2016 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a177",
"title": "Guidelines for Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194834",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122879",
"title": "Semantic Interaction for Sensemaking: Inferring Analytical Reasoning for Model Steering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122879/13rRUwdIOUL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090546",
"title": "[DC] The Immersive Space to Think: Immersive Analytics for Multimedia Data",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090546/1jIxrquhCNO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090620",
"title": "Evaluating the Benefits of the Immersive Space to Think",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090620/1jIxs5S1PwY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382916",
"title": "Exploring the SenseMaking Process through Interactions and fNIRS in Immersive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382916/1saZna718yY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a529",
"title": "Sensemaking Strategies with Immersive Space to Think",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a529/1tuAMAuN6kU",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09893374",
"articleId": "1GGLIh8KmSA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09894103",
"articleId": "1GIqpPbyH7y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1GNprfMS0uc",
"name": "ttg555501-09894094s1-supp1-3207357.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09894094s1-supp1-3207357.pdf",
"extension": "pdf",
"size": "33.4 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxvO04Q",
"title": "Jan.",
"year": "2017",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxlgy3M",
"doi": "10.1109/TVCG.2016.2598918",
"abstract": "We present an evaluation of Colorgorical, a web-based tool for creating discriminable and aesthetically preferable categorical color palettes. Colorgorical uses iterative semi-random sampling to pick colors from CIELAB space based on user-defined discriminability and preference importances. Colors are selected by assigning each a weighted sum score that applies the user-defined importances to Perceptual Distance, Name Difference, Name Uniqueness, and Pair Preference scoring functions, which compare a potential sample to already-picked palette colors. After, a color is added to the palette by randomly sampling from the highest scoring palettes. Users can also specify hue ranges or build off their own starting palettes. This procedure differs from previous approaches that do not allow customization (e.g., pre-made ColorBrewer palettes) or do not consider visualization design constraints (e.g., Adobe Color and ACE). In a Palette Score Evaluation, we verified that each scoring function measured different color information. Experiment 1 demonstrated that slider manipulation generates palettes that are consistent with the expected balance of discriminability and aesthetic preference for 3-, 5-, and 8-color palettes, and also shows that the number of colors may change the effectiveness of pair-based discriminability and preference scores. For instance, if the Pair Preference slider were upweighted, users would judge the palettes as more preferable on average. Experiment 2 compared Colorgorical palettes to benchmark palettes (ColorBrewer, Microsoft, Tableau, Random). Colorgorical palettes are as discriminable and are at least as preferable or more preferable than the alternative palette sets. In sum, Colorgorical allows users to make customized color palettes that are, on average, as effective as current industry standards by balancing the importance of discriminability and aesthetic preference.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an evaluation of Colorgorical, a web-based tool for creating discriminable and aesthetically preferable categorical color palettes. Colorgorical uses iterative semi-random sampling to pick colors from CIELAB space based on user-defined discriminability and preference importances. Colors are selected by assigning each a weighted sum score that applies the user-defined importances to Perceptual Distance, Name Difference, Name Uniqueness, and Pair Preference scoring functions, which compare a potential sample to already-picked palette colors. After, a color is added to the palette by randomly sampling from the highest scoring palettes. Users can also specify hue ranges or build off their own starting palettes. This procedure differs from previous approaches that do not allow customization (e.g., pre-made ColorBrewer palettes) or do not consider visualization design constraints (e.g., Adobe Color and ACE). In a Palette Score Evaluation, we verified that each scoring function measured different color information. Experiment 1 demonstrated that slider manipulation generates palettes that are consistent with the expected balance of discriminability and aesthetic preference for 3-, 5-, and 8-color palettes, and also shows that the number of colors may change the effectiveness of pair-based discriminability and preference scores. For instance, if the Pair Preference slider were upweighted, users would judge the palettes as more preferable on average. Experiment 2 compared Colorgorical palettes to benchmark palettes (ColorBrewer, Microsoft, Tableau, Random). Colorgorical palettes are as discriminable and are at least as preferable or more preferable than the alternative palette sets. In sum, Colorgorical allows users to make customized color palettes that are, on average, as effective as current industry standards by balancing the importance of discriminability and aesthetic preference.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an evaluation of Colorgorical, a web-based tool for creating discriminable and aesthetically preferable categorical color palettes. Colorgorical uses iterative semi-random sampling to pick colors from CIELAB space based on user-defined discriminability and preference importances. Colors are selected by assigning each a weighted sum score that applies the user-defined importances to Perceptual Distance, Name Difference, Name Uniqueness, and Pair Preference scoring functions, which compare a potential sample to already-picked palette colors. After, a color is added to the palette by randomly sampling from the highest scoring palettes. Users can also specify hue ranges or build off their own starting palettes. This procedure differs from previous approaches that do not allow customization (e.g., pre-made ColorBrewer palettes) or do not consider visualization design constraints (e.g., Adobe Color and ACE). In a Palette Score Evaluation, we verified that each scoring function measured different color information. Experiment 1 demonstrated that slider manipulation generates palettes that are consistent with the expected balance of discriminability and aesthetic preference for 3-, 5-, and 8-color palettes, and also shows that the number of colors may change the effectiveness of pair-based discriminability and preference scores. For instance, if the Pair Preference slider were upweighted, users would judge the palettes as more preferable on average. Experiment 2 compared Colorgorical palettes to benchmark palettes (ColorBrewer, Microsoft, Tableau, Random). Colorgorical palettes are as discriminable and are at least as preferable or more preferable than the alternative palette sets. In sum, Colorgorical allows users to make customized color palettes that are, on average, as effective as current industry standards by balancing the importance of discriminability and aesthetic preference.",
"title": "Colorgorical: Creating discriminable and preferable color palettes for information visualization",
"normalizedTitle": "Colorgorical: Creating discriminable and preferable color palettes for information visualization",
"fno": "07539386",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Color",
"Image Color Analysis",
"Visualization",
"Harmonic Analysis",
"Benchmark Testing",
"Industries",
"Standards",
"Visualization",
"Aesthetics In Visualization",
"Color Perception",
"Metrics Benchmarks",
"Visual Design"
],
"authors": [
{
"givenName": "Connor C.",
"surname": "Gramazio",
"fullName": "Connor C. Gramazio",
"affiliation": "Dept. of Computer Science at Brown University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David H.",
"surname": "Laidlaw",
"fullName": "David H. Laidlaw",
"affiliation": "Dept. of Computer Science at Brown University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Karen B.",
"surname": "Schloss",
"fullName": "Karen B. Schloss",
"affiliation": "Dept. of Cognitive, Linguistic, and Psychological Sciences at Brown University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "trans",
"pages": "521-530",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1997/8183/1/81831830",
"title": "Adaptive palette determination for color images based on Kohonen networks",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831830/12OmNAnMuHl",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2015/7082/0/07177443",
"title": "Creative design of color palettes for product packaging",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177443/12OmNqH9hqW",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b618",
"title": "Robust Image Segmentation Using Contour-Guided Color Palettes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b618/12OmNz5s0RE",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101746",
"title": "Perceptually Driven Visibility Optimization for Categorical Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101746/13rRUwI5Ug7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/06/07911336",
"title": "Color Orchestra: Ordering Color Palettes for Interpolation and Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2018/06/07911336/13rRUxASu0R",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192709",
"title": "A Linguistic Approach to Categorical Color Assignment for Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192709/13rRUyYjKaj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09969167",
"title": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09969167/1IMicNIXex2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a040",
"title": "Affective Color Palette Recommendations with Non-negative Tensor Factorization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a040/1KaH7ehIrGU",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222351",
"title": "Palettailor: Discriminable Colorization for Categorical Data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222351/1nTq353vBNS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09444798",
"title": "InfoColorizer: Interactive Recommendation of Color Palettes for Infographics",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09444798/1u51zekYoA8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07539294",
"articleId": "13rRUNvgyWs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07539319",
"articleId": "13rRUwh80Hi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H1ggMqzJUQ",
"doi": "10.1109/TVCG.2022.3209481",
"abstract": "The interpretation of colors in visualizations is facilitated when the assignments between colors and concepts in the visualizations match human's expectations, implying that the colors can be interpreted in a semantic manner. However, manually creating a dataset of suitable associations between colors and concepts for use in visualizations is costly, as such associations would have to be collected from humans for a large variety of concepts. To address the challenge of collecting this data, we introduce a method to extract color-concept associations automatically from a set of concept images. While the state-of-the-art method extracts associations from data with supervised learning, we developed a self-supervised method based on colorization that does not require the preparation of ground truth color-concept associations. Our key insight is that a set of images of a concept should be sufficient for learning color-concept associations, since humans also learn to associate colors to concepts mainly from past visual input. Thus, we propose to use an automatic colorization method to extract statistical models of the color-concept associations that appear in concept images. Specifically, we take a colorization model pre-trained on ImageNet and fine-tune it on the set of images associated with a given concept, to predict pixel-wise probability distributions in Lab color space for the images. Then, we convert the predicted probability distributions into color ratings for a given color library and aggregate them for all the images of a concept to obtain the final color-concept associations. We evaluate our method using four different evaluation metrics and via a user study. Experiments show that, although the state-of-the-art method based on supervised learning with user-provided ratings is more effective at capturing relative associations, our self-supervised method obtains overall better results according to metrics like Earth Mover's Distance (EMD) and Entropy Difference (ED), which are closer to human perception of color distributions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The interpretation of colors in visualizations is facilitated when the assignments between colors and concepts in the visualizations match human's expectations, implying that the colors can be interpreted in a semantic manner. However, manually creating a dataset of suitable associations between colors and concepts for use in visualizations is costly, as such associations would have to be collected from humans for a large variety of concepts. To address the challenge of collecting this data, we introduce a method to extract color-concept associations automatically from a set of concept images. While the state-of-the-art method extracts associations from data with supervised learning, we developed a self-supervised method based on colorization that does not require the preparation of ground truth color-concept associations. Our key insight is that a set of images of a concept should be sufficient for learning color-concept associations, since humans also learn to associate colors to concepts mainly from past visual input. Thus, we propose to use an automatic colorization method to extract statistical models of the color-concept associations that appear in concept images. Specifically, we take a colorization model pre-trained on ImageNet and fine-tune it on the set of images associated with a given concept, to predict pixel-wise probability distributions in Lab color space for the images. Then, we convert the predicted probability distributions into color ratings for a given color library and aggregate them for all the images of a concept to obtain the final color-concept associations. We evaluate our method using four different evaluation metrics and via a user study. Experiments show that, although the state-of-the-art method based on supervised learning with user-provided ratings is more effective at capturing relative associations, our self-supervised method obtains overall better results according to metrics like Earth Mover's Distance (EMD) and Entropy Difference (ED), which are closer to human perception of color distributions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The interpretation of colors in visualizations is facilitated when the assignments between colors and concepts in the visualizations match human's expectations, implying that the colors can be interpreted in a semantic manner. However, manually creating a dataset of suitable associations between colors and concepts for use in visualizations is costly, as such associations would have to be collected from humans for a large variety of concepts. To address the challenge of collecting this data, we introduce a method to extract color-concept associations automatically from a set of concept images. While the state-of-the-art method extracts associations from data with supervised learning, we developed a self-supervised method based on colorization that does not require the preparation of ground truth color-concept associations. Our key insight is that a set of images of a concept should be sufficient for learning color-concept associations, since humans also learn to associate colors to concepts mainly from past visual input. Thus, we propose to use an automatic colorization method to extract statistical models of the color-concept associations that appear in concept images. Specifically, we take a colorization model pre-trained on ImageNet and fine-tune it on the set of images associated with a given concept, to predict pixel-wise probability distributions in Lab color space for the images. Then, we convert the predicted probability distributions into color ratings for a given color library and aggregate them for all the images of a concept to obtain the final color-concept associations. We evaluate our method using four different evaluation metrics and via a user study. Experiments show that, although the state-of-the-art method based on supervised learning with user-provided ratings is more effective at capturing relative associations, our self-supervised method obtains overall better results according to metrics like Earth Mover's Distance (EMD) and Entropy Difference (ED), which are closer to human perception of color distributions.",
"title": "Self-Supervised Color-Concept Association via Image Colorization",
"normalizedTitle": "Self-Supervised Color-Concept Association via Image Colorization",
"fno": "09904484",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Entropy",
"Image Colour Analysis",
"Statistical Analysis",
"Statistical Distributions",
"Supervised Learning",
"Automatic Colorization Method",
"Color Distributions",
"Color Library",
"Color Ratings",
"Concept Images",
"Ground Truth Color Concept Associations",
"Image Colorization",
"Image Net",
"Lab Color Space",
"Pixel Wise Probability Distribution",
"Self Supervised Color Concept Association",
"Statistical Model",
"Supervised Learning",
"Visualization",
"Image Color Analysis",
"Feature Extraction",
"Probability Distribution",
"Data Mining",
"Semantics",
"Libraries",
"Aggregates",
"Color Concept Association",
"Colorization",
"EMD"
],
"authors": [
{
"givenName": "Ruizhen",
"surname": "Hu",
"fullName": "Ruizhen Hu",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ziqi",
"surname": "Ye",
"fullName": "Ziqi Ye",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bin",
"surname": "Chen",
"fullName": "Bin Chen",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oliver",
"surname": "van Kaick",
"fullName": "Oliver van Kaick",
"affiliation": "Carleton University, School of Computer Science, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Huang",
"fullName": "Hui Huang",
"affiliation": "Shenzhen University, Visual Computing Research Center, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "247-256",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ispan/2009/3908/0/3908a492",
"title": "A Flexible and Effective Colorization System",
"doi": null,
"abstractUrl": "/proceedings-article/ispan/2009/3908a492/12OmNA0MZ6t",
"parentPublication": {
"id": "proceedings/ispan/2009/3908/0",
"title": "Parallel Architectures, Algorithms, and Networks, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a089",
"title": "An Interactive Framework for Video Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a089/12OmNBLdKIJ",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460810",
"title": "Patch-based image colorization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460810/12OmNBigFy1",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a032",
"title": "Colorization by Multidimensional Projection",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a032/12OmNBsLPdX",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c877",
"title": "Learning Diverse Image Colorization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c877/12OmNvmG7Wg",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2017/5504/0/08022768",
"title": "Automatic manga colorization with color style by generative adversarial nets",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2017/08022768/12OmNwkR5tS",
"parentPublication": {
"id": "proceedings/snpd/2017/5504/0",
"title": "2017 18th IEEE/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200o4357",
"title": "Towards Vivid and Diverse Image Colorization with Generative Color Prior",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4357/1BmHExXxcEo",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809846",
"title": "Estimating Color-Concept Associations from Image Statistics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809846/1cHEoEeTId2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/03/09188002",
"title": "A Colorization Framework for Monochrome-Color Dual-Lens Systems Using a Deep Convolutional Network",
"doi": null,
"abstractUrl": "/journal/tg/2022/03/09188002/1mXkglIVFVS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mipr/2021/1865/0/186500a382",
"title": "Culture-inspired Multi-modal Color Palette Generation and Colorization: A Chinese Youth Subculture Case",
"doi": null,
"abstractUrl": "/proceedings-article/mipr/2021/186500a382/1xPsjoppgdi",
"parentPublication": {
"id": "proceedings/mipr/2021/1865/0",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09903511",
"articleId": "1GZokWw73mo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09903547",
"articleId": "1GZookEFGzC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9yE3mnM88",
"name": "ttg202301-09904484s1-supp1-3209481.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09904484s1-supp1-3209481.pdf",
"extension": "pdf",
"size": "12.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H3ZWHY73by",
"doi": "10.1109/TVCG.2022.3209443",
"abstract": "People have expectations about how colors map to concepts in visualizations, and they are better at interpreting visualizations that match their expectations. Traditionally, studies on these expectations (<italic>inferred mappings</italic>) distinguished distinct factors relevant for visualizations of categorical vs. continuous information. Studies on categorical information focused on direct associations (e.g., mangos are associated with yellows) whereas studies on continuous information focused on relational associations (e.g., darker colors map to larger quantities; dark-is-more bias). We unite these two areas within a single framework of assignment inference. Assignment inference is the process by which people infer mappings between perceptual features and concepts represented in encoding systems. Observers infer globally optimal assignments by maximizing the “merit,” or “goodness,” of each possible assignment. Previous work on assignment inference focused on visualizations of categorical information. We extend this approach to visualizations of continuous data by (a) broadening the notion of merit to include relational associations and (b) developing a method for combining multiple (sometimes conflicting) sources of merit to predict people's inferred mappings. We developed and tested our model on data from experiments in which participants interpreted colormap data visualizations, representing fictitious data about environmental concepts (sunshine, shade, wild fire, ocean water, glacial ice). We found both direct and relational associations contribute independently to inferred mappings. These results can be used to optimize visualization design to facilitate visual communication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "People have expectations about how colors map to concepts in visualizations, and they are better at interpreting visualizations that match their expectations. Traditionally, studies on these expectations (<italic>inferred mappings</italic>) distinguished distinct factors relevant for visualizations of categorical vs. continuous information. Studies on categorical information focused on direct associations (e.g., mangos are associated with yellows) whereas studies on continuous information focused on relational associations (e.g., darker colors map to larger quantities; dark-is-more bias). We unite these two areas within a single framework of assignment inference. Assignment inference is the process by which people infer mappings between perceptual features and concepts represented in encoding systems. Observers infer globally optimal assignments by maximizing the “merit,” or “goodness,” of each possible assignment. Previous work on assignment inference focused on visualizations of categorical information. We extend this approach to visualizations of continuous data by (a) broadening the notion of merit to include relational associations and (b) developing a method for combining multiple (sometimes conflicting) sources of merit to predict people's inferred mappings. We developed and tested our model on data from experiments in which participants interpreted colormap data visualizations, representing fictitious data about environmental concepts (sunshine, shade, wild fire, ocean water, glacial ice). We found both direct and relational associations contribute independently to inferred mappings. These results can be used to optimize visualization design to facilitate visual communication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "People have expectations about how colors map to concepts in visualizations, and they are better at interpreting visualizations that match their expectations. Traditionally, studies on these expectations (inferred mappings) distinguished distinct factors relevant for visualizations of categorical vs. continuous information. Studies on categorical information focused on direct associations (e.g., mangos are associated with yellows) whereas studies on continuous information focused on relational associations (e.g., darker colors map to larger quantities; dark-is-more bias). We unite these two areas within a single framework of assignment inference. Assignment inference is the process by which people infer mappings between perceptual features and concepts represented in encoding systems. Observers infer globally optimal assignments by maximizing the “merit,” or “goodness,” of each possible assignment. Previous work on assignment inference focused on visualizations of categorical information. We extend this approach to visualizations of continuous data by (a) broadening the notion of merit to include relational associations and (b) developing a method for combining multiple (sometimes conflicting) sources of merit to predict people's inferred mappings. We developed and tested our model on data from experiments in which participants interpreted colormap data visualizations, representing fictitious data about environmental concepts (sunshine, shade, wild fire, ocean water, glacial ice). We found both direct and relational associations contribute independently to inferred mappings. These results can be used to optimize visualization design to facilitate visual communication.",
"title": "Unifying Effects of Direct and Relational Associations for Visual Communication",
"normalizedTitle": "Unifying Effects of Direct and Relational Associations for Visual Communication",
"fno": "09905997",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Inference Mechanisms",
"Assignment Inference",
"Categorical Information",
"Colormap Data Visualizations",
"Direct Associations",
"Relational Associations",
"Visual Communication",
"Visualization Design",
"Image Color Analysis",
"Data Visualization",
"Semantics",
"Encoding",
"Visualization",
"Color",
"Observers",
"Visual Reasoning",
"Information Visualization",
"Colormap Data Visualizations",
"Visual Encoding",
"Color Cognition"
],
"authors": [
{
"givenName": "Melissa A.",
"surname": "Schoenlein",
"fullName": "Melissa A. Schoenlein",
"affiliation": "Psychology and Wisconsin Institute for Discovery, University of Wisconsin-Madison, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Johnny",
"surname": "Campos",
"fullName": "Johnny Campos",
"affiliation": "Cognitive Science, University of California, Merced, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kevin J.",
"surname": "Lande",
"fullName": "Kevin J. Lande",
"affiliation": "Philosophy, Centre for Vision Research, York University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Laurent",
"surname": "Lessard",
"fullName": "Laurent Lessard",
"affiliation": "Mechanical and Industrial Engineering, Northeastern University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Karen B.",
"surname": "Schloss",
"fullName": "Karen B. Schloss",
"affiliation": "Psychology, Wisconsin Institute for Discovery, University of Wisconsin-Madison, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "385-395",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851f975",
"title": "Recovering the Missing Link: Predicting Class-Attribute Associations for Unsupervised Zero-Shot Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851f975/12OmNzC5T4l",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101746",
"title": "Perceptually Driven Visibility Optimization for Categorical Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101746/13rRUwI5Ug7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875946",
"title": "Constructing Visual Representations: Investigating the Use of Tangible Tokens",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875946/13rRUwgQpDv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017646",
"title": "Extracting and Retargeting Color Mappings from Bitmap Images of Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017646/13rRUxYIN4e",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122297",
"title": "Common Angle Plots as Perception-True Visualizations of Categorical Associations",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122297/13rRUyYjKaf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192709",
"title": "A Linguistic Approach to Categorical Color Assignment for Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192709/13rRUyYjKaj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809846",
"title": "Estimating Color-Concept Associations from Image Statistics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809846/1cHEoEeTId2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925465",
"title": "Understanding Chromaesthesia by Strengthening Auditory -Visual-Emotional Associations",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925465/1fHGGt6qWje",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09239918",
"title": "Semantic Discriminability for Visual Communication",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09239918/1oeZWSkMqre",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552216",
"title": "Context Matters: A Theory of Semantic Discriminability for Perceptual Encoding Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552216/1xic1HOWGli",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09904428",
"articleId": "1H1goP5OTrW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904490",
"articleId": "1H1gj9xTTG0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9yBRPLXIk",
"name": "ttg202301-09905997s1-supp1-3209443.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09905997s1-supp1-3209443.pdf",
"extension": "pdf",
"size": "1.78 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IMicNIXex2",
"doi": "10.1109/TVCG.2022.3226218",
"abstract": "Color has been widely used to encode data in all types of visualizations. Effective color palettes contain discriminable and harmonious colors, which allow information from visualizations to be accurately and aesthetically conveyed. However, predefined color palettes not only lack the flexibility of custom color palette generation but also ignore the context in which the visualizations are used. Designing an effective color palette is a time-consuming and challenging process for users, even experts. In this work, we propose the generation of an image-based visualization color palette to exploit the human perception of visually appealing images while considering visualization cognition. By analyzing color palette constraints, including harmony, discrimination, and context, we propose an image-driven color generation method. We design a color clustering method in the saliency-hue plane based on visual importance detection and then select the palette based on the visualization color constraints. In addition, we design two color optimization and assignment strategies for visualizations of different data types. Evaluations through numeric indicators and user experiments demonstrate that the palettes predicted by our method are visually related to the original images and are aesthetically pleasing, supporting diverse visualization contexts and data types in practical applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Color has been widely used to encode data in all types of visualizations. Effective color palettes contain discriminable and harmonious colors, which allow information from visualizations to be accurately and aesthetically conveyed. However, predefined color palettes not only lack the flexibility of custom color palette generation but also ignore the context in which the visualizations are used. Designing an effective color palette is a time-consuming and challenging process for users, even experts. In this work, we propose the generation of an image-based visualization color palette to exploit the human perception of visually appealing images while considering visualization cognition. By analyzing color palette constraints, including harmony, discrimination, and context, we propose an image-driven color generation method. We design a color clustering method in the saliency-hue plane based on visual importance detection and then select the palette based on the visualization color constraints. In addition, we design two color optimization and assignment strategies for visualizations of different data types. Evaluations through numeric indicators and user experiments demonstrate that the palettes predicted by our method are visually related to the original images and are aesthetically pleasing, supporting diverse visualization contexts and data types in practical applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Color has been widely used to encode data in all types of visualizations. Effective color palettes contain discriminable and harmonious colors, which allow information from visualizations to be accurately and aesthetically conveyed. However, predefined color palettes not only lack the flexibility of custom color palette generation but also ignore the context in which the visualizations are used. Designing an effective color palette is a time-consuming and challenging process for users, even experts. In this work, we propose the generation of an image-based visualization color palette to exploit the human perception of visually appealing images while considering visualization cognition. By analyzing color palette constraints, including harmony, discrimination, and context, we propose an image-driven color generation method. We design a color clustering method in the saliency-hue plane based on visual importance detection and then select the palette based on the visualization color constraints. In addition, we design two color optimization and assignment strategies for visualizations of different data types. Evaluations through numeric indicators and user experiments demonstrate that the palettes predicted by our method are visually related to the original images and are aesthetically pleasing, supporting diverse visualization contexts and data types in practical applications.",
"title": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization",
"normalizedTitle": "Image-Driven Harmonious Color Palette Generation for Diverse Information Visualization",
"fno": "09969167",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Color Analysis",
"Data Visualization",
"Visualization",
"Task Analysis",
"Encoding",
"Visual Perception",
"Media",
"Color Assignment",
"Color Palette",
"Information Visualization",
"Visual Perception",
"Visualization Design"
],
"authors": [
{
"givenName": "Shuqi",
"surname": "Liu",
"fullName": "Shuqi Liu",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mingtian",
"surname": "Tao",
"fullName": "Mingtian Tao",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yifei",
"surname": "Huang",
"fullName": "Yifei Huang",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Changbo",
"surname": "Wang",
"fullName": "Changbo Wang",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chenhui",
"surname": "Li",
"fullName": "Chenhui Li",
"affiliation": "School of Computer Science and Technology, East China Normal University, Shanghai, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "1-16",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1997/8183/1/81831830",
"title": "Adaptive palette determination for color images based on Kohonen networks",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831830/12OmNAnMuHl",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2016/2179/0/2179a266",
"title": "Texture Compression with Hierarchical Palette",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2016/2179a266/12OmNzYeAKH",
"parentPublication": {
"id": "proceedings/bigmm/2016/2179/0",
"title": "2016 IEEE Second International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/10/ttg2013101746",
"title": "Perceptually Driven Visibility Optimization for Categorical Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/10/ttg2013101746/13rRUwI5Ug7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/06/07911336",
"title": "Color Orchestra: Ordering Color Palettes for Interpolation and Prediction",
"doi": null,
"abstractUrl": "/journal/tg/2018/06/07911336/13rRUxASu0R",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061739",
"title": "Color Design for Illustrative Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061739/13rRUxE04tv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539386",
"title": "Colorgorical: Creating discriminable and preferable color palettes for information visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539386/13rRUxlgy3M",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192709",
"title": "A Linguistic Approach to Categorical Color Assignment for Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192709/13rRUyYjKaj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a040",
"title": "Affective Color Palette Recommendations with Non-negative Tensor Factorization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a040/1KaH7ehIrGU",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600d610",
"title": "Color Recommendation for Vector Graphic Documents based on Multi-Palette Representation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600d610/1KxUnpzWb3q",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809846",
"title": "Estimating Color-Concept Associations from Image Statistics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809846/1cHEoEeTId2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09968104",
"articleId": "1IKDek8SF0c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09969571",
"articleId": "1IMidH7hZhC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1IRiLbGuc7e",
"name": "ttg555501-09969167s1-supp1-3226218.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09969167s1-supp1-3226218.pdf",
"extension": "pdf",
"size": "5.23 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cHEoEeTId2",
"doi": "10.1109/TVCG.2019.2934536",
"abstract": "To interpret the meanings of colors in visualizations of categorical information, people must determine how distinct colors correspond to different concepts. This process is easier when assignments between colors and concepts in visualizations match people's expectations, making color palettes semantically interpretable. Efforts have been underway to optimize color palette design for semantic interpretablity, but this requires having good estimates of human color-concept associations. Obtaining these data from humans is costly, which motivates the need for automated methods. We developed and evaluated a new method for automatically estimating color-concept associations in a way that strongly correlates with human ratings. Building on prior studies using Google Images, our approach operates directly on Google Image search results without the need for humans in the loop. Specifically, we evaluated several methods for extracting raw pixel content of the images in order to best estimate color-concept associations obtained from human ratings. The most effective method extracted colors using a combination of cylindrical sectors and color categories in color space. We demonstrate that our approach can accurately estimate average human color-concept associations for different fruits using only a small set of images. The approach also generalizes moderately well to more complicated recycling-related concepts of objects that can appear in any color.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To interpret the meanings of colors in visualizations of categorical information, people must determine how distinct colors correspond to different concepts. This process is easier when assignments between colors and concepts in visualizations match people's expectations, making color palettes semantically interpretable. Efforts have been underway to optimize color palette design for semantic interpretablity, but this requires having good estimates of human color-concept associations. Obtaining these data from humans is costly, which motivates the need for automated methods. We developed and evaluated a new method for automatically estimating color-concept associations in a way that strongly correlates with human ratings. Building on prior studies using Google Images, our approach operates directly on Google Image search results without the need for humans in the loop. Specifically, we evaluated several methods for extracting raw pixel content of the images in order to best estimate color-concept associations obtained from human ratings. The most effective method extracted colors using a combination of cylindrical sectors and color categories in color space. We demonstrate that our approach can accurately estimate average human color-concept associations for different fruits using only a small set of images. The approach also generalizes moderately well to more complicated recycling-related concepts of objects that can appear in any color.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To interpret the meanings of colors in visualizations of categorical information, people must determine how distinct colors correspond to different concepts. This process is easier when assignments between colors and concepts in visualizations match people's expectations, making color palettes semantically interpretable. Efforts have been underway to optimize color palette design for semantic interpretablity, but this requires having good estimates of human color-concept associations. Obtaining these data from humans is costly, which motivates the need for automated methods. We developed and evaluated a new method for automatically estimating color-concept associations in a way that strongly correlates with human ratings. Building on prior studies using Google Images, our approach operates directly on Google Image search results without the need for humans in the loop. Specifically, we evaluated several methods for extracting raw pixel content of the images in order to best estimate color-concept associations obtained from human ratings. The most effective method extracted colors using a combination of cylindrical sectors and color categories in color space. We demonstrate that our approach can accurately estimate average human color-concept associations for different fruits using only a small set of images. The approach also generalizes moderately well to more complicated recycling-related concepts of objects that can appear in any color.",
"title": "Estimating Color-Concept Associations from Image Statistics",
"normalizedTitle": "Estimating Color-Concept Associations from Image Statistics",
"fno": "08809846",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Image Colour Analysis",
"Color Categories",
"Visualization",
"Human Color Concept Associations",
"Color Concept Associations",
"Google Image Search Results",
"Google Images",
"Human Ratings",
"Color Palette Design",
"Color Palettes",
"Image Statistics",
"Color Space",
"Image Color Analysis",
"Data Visualization",
"Databases",
"Google",
"Semantics",
"Cognition",
"Visualization",
"Visual Reasoning",
"Visual Communication",
"Visual Encoding",
"Color Perception",
"Color Cognition",
"Color Categories"
],
"authors": [
{
"givenName": "Ragini",
"surname": "Rathore",
"fullName": "Ragini Rathore",
"affiliation": "Computer Sciences and Wisconsin Institute for Discovery (WID), University of Wisconsin–Madison",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zachary",
"surname": "Leggon",
"fullName": "Zachary Leggon",
"affiliation": "Biology and WIDUniversity of Wisconisn–Madison",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Laurent",
"surname": "Lessard",
"fullName": "Laurent Lessard",
"affiliation": "Electrical and Computer Engineering and WID, University of Wisconsin–Madison",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Karen B.",
"surname": "Schloss",
"fullName": "Karen B. Schloss",
"affiliation": "Psychology and WID, University of Wisconsin–Madison",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "1226-1235",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2010/9343/0/05643580",
"title": "Color harmonization for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643580/12OmNzaQod1",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a033",
"title": "Design Tool of Color Schemes on the CIELAB Space",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a033/12OmNzb7Zv0",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192709",
"title": "A Linguistic Approach to Categorical Color Assignment for Data Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192709/13rRUyYjKaj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0/945700c349",
"title": "Reasoning and evolution of perceptions. The Influence of Color, on Brand's Logo in Malaysia",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2021/945700c349/1DNCU7ZhqWA",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity-dependsys/2021/9457/0",
"title": "2021 IEEE 23rd Int Conf on High Performance Computing & Communications; 7th Int Conf on Data Science & Systems; 19th Int Conf on Smart City; 7th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904484",
"title": "Self-Supervised Color-Concept Association via Image Colorization",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904484/1H1ggMqzJUQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09905997",
"title": "Unifying Effects of Direct and Relational Associations for Visual Communication",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09905997/1H3ZWHY73by",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a040",
"title": "Affective Color Palette Recommendations with Non-negative Tensor Factorization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a040/1KaH7ehIrGU",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2018/6882/0/08823597",
"title": "Color Interpolation for Non-Euclidean Color Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2018/08823597/1d5kylEtwxa",
"parentPublication": {
"id": "proceedings/scivis/2018/6882/0",
"title": "2018 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09293392",
"title": "A Perceptual Color-Matching Method for Examining Color Blending in Augmented Reality Head-Up Display Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09293392/1pyomiXbJQs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552216",
"title": "Context Matters: A Theory of Semantic Discriminability for Perceptual Encoding Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552216/1xic1HOWGli",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08805429",
"articleId": "1cG4w5XPNUQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08809832",
"articleId": "1cHEgg8WeNW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1fe9tYEZMyI",
"name": "ttg202001-08809846s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202001-08809846s1.zip",
"extension": "zip",
"size": "4.33 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oeZWSkMqre",
"doi": "10.1109/TVCG.2020.3030434",
"abstract": "To interpret information visualizations, observers must determine how visual features map onto concepts. First and foremost, this ability depends on perceptual discriminability; observers must be able to see the difference between different colors for those colors to communicate different meanings. However, the ability to interpret visualizations also depends on semantic discriminability, the degree to which observers can infer a unique mapping between visual features and concepts, based on the visual features and concepts alone (i.e., without help from verbal cues such as legends or labels). Previous evidence suggested that observers were better at interpreting encoding systems that maximized semantic discriminability (maximizing association strength between assigned colors and concepts while minimizing association strength between unassigned colors and concepts), compared to a system that only maximized color-concept association strength. However, increasing semantic discriminability also resulted in increased perceptual distance, so it is unclear which factor was responsible for improved performance. In the present study, we conducted two experiments that tested for independent effects of semantic distance and perceptual distance on semantic discriminability of bar graph data visualizations. Perceptual distance was large enough to ensure colors were more than just noticeably different. We found that increasing semantic distance improved performance, independent of variation in perceptual distance, and when these two factors were uncorrelated, responses were dominated by semantic distance. These results have implications for navigating trade-offs in color palette design optimization for visual communication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To interpret information visualizations, observers must determine how visual features map onto concepts. First and foremost, this ability depends on perceptual discriminability; observers must be able to see the difference between different colors for those colors to communicate different meanings. However, the ability to interpret visualizations also depends on semantic discriminability, the degree to which observers can infer a unique mapping between visual features and concepts, based on the visual features and concepts alone (i.e., without help from verbal cues such as legends or labels). Previous evidence suggested that observers were better at interpreting encoding systems that maximized semantic discriminability (maximizing association strength between assigned colors and concepts while minimizing association strength between unassigned colors and concepts), compared to a system that only maximized color-concept association strength. However, increasing semantic discriminability also resulted in increased perceptual distance, so it is unclear which factor was responsible for improved performance. In the present study, we conducted two experiments that tested for independent effects of semantic distance and perceptual distance on semantic discriminability of bar graph data visualizations. Perceptual distance was large enough to ensure colors were more than just noticeably different. We found that increasing semantic distance improved performance, independent of variation in perceptual distance, and when these two factors were uncorrelated, responses were dominated by semantic distance. These results have implications for navigating trade-offs in color palette design optimization for visual communication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To interpret information visualizations, observers must determine how visual features map onto concepts. First and foremost, this ability depends on perceptual discriminability; observers must be able to see the difference between different colors for those colors to communicate different meanings. However, the ability to interpret visualizations also depends on semantic discriminability, the degree to which observers can infer a unique mapping between visual features and concepts, based on the visual features and concepts alone (i.e., without help from verbal cues such as legends or labels). Previous evidence suggested that observers were better at interpreting encoding systems that maximized semantic discriminability (maximizing association strength between assigned colors and concepts while minimizing association strength between unassigned colors and concepts), compared to a system that only maximized color-concept association strength. However, increasing semantic discriminability also resulted in increased perceptual distance, so it is unclear which factor was responsible for improved performance. In the present study, we conducted two experiments that tested for independent effects of semantic distance and perceptual distance on semantic discriminability of bar graph data visualizations. Perceptual distance was large enough to ensure colors were more than just noticeably different. We found that increasing semantic distance improved performance, independent of variation in perceptual distance, and when these two factors were uncorrelated, responses were dominated by semantic distance. These results have implications for navigating trade-offs in color palette design optimization for visual communication.",
"title": "Semantic Discriminability for Visual Communication",
"normalizedTitle": "Semantic Discriminability for Visual Communication",
"fno": "09239918",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Encoding",
"Graph Theory",
"Perceptual Discriminability",
"Semantic Discriminability",
"Visual Features",
"Encoding Systems",
"Association Strength",
"Perceptual Distance",
"Bar Graph Data Visualizations",
"Visual Communication",
"Information Visualizations",
"Semantic Distance",
"Semantics",
"Color",
"Visualization",
"Observers",
"Image Color Analysis",
"Data Visualization",
"Encoding",
"Visual Reasoning",
"Information Visualization",
"Visual Communication",
"Visual Encoding",
"Color Perception",
"Color Cognition"
],
"authors": [
{
"givenName": "Karen B.",
"surname": "Schloss",
"fullName": "Karen B. Schloss",
"affiliation": "Psychology and Wisconsin Institute for Discovery, University of Wisconsin-Madison",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zachary",
"surname": "Leggon",
"fullName": "Zachary Leggon",
"affiliation": "Zachary Leggon Biology and Wisconsin Institute for Discovery, University of Wisconsin-Madison",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Laurent",
"surname": "Lessard",
"fullName": "Laurent Lessard",
"affiliation": "Mechanical and Industrial Engineering, Northeastern University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1022-1031",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2017/6067/0/08019384",
"title": "Graph-based color Gamut Mapping using neighbor metric",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2017/08019384/12OmNvkGW7U",
"parentPublication": {
"id": "proceedings/icme/2017/6067/0",
"title": "2017 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012090",
"title": "A handy calibrator for color vision of a human observer",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012090/12OmNzwHvq1",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07164348",
"title": "Matching and Reaching Depth Judgments with Real and Augmented Reality Targets",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07164348/13rRUxZzAhH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539386",
"title": "Colorgorical: Creating discriminable and preferable color palettes for information visualization",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539386/13rRUxlgy3M",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/09/08704932",
"title": "Defining Image Memorability Using the Visual Memory Schema",
"doi": null,
"abstractUrl": "/journal/tp/2020/09/08704932/19HKQBoUMla",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09905997",
"title": "Unifying Effects of Direct and Relational Associations for Visual Communication",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09905997/1H3ZWHY73by",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956466",
"title": "FD-CAM: Improving Faithfulness and Discriminability of Visual Explanation for CNNs",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956466/1IHpYQO1b7a",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809850",
"title": "Discriminability Tests for Visualization Effectiveness and Scalability",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809850/1cHEkrFpU76",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e796",
"title": "Learning to Observe: Approximating Human Perceptual Thresholds for Detection of Suprathreshold Image Transformations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e796/1m3nXBshA6A",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552216",
"title": "Context Matters: A Theory of Semantic Discriminability for Perceptual Encoding Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552216/1xic1HOWGli",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09241426",
"articleId": "1ogFZOCOF4k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09222327",
"articleId": "1nTqMLwYD0A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLgmF28czm",
"name": "ttg202102-09239918s1-supp1-3030434.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09239918s1-supp1-3030434.pdf",
"extension": "pdf",
"size": "275 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyfKIHJ",
"doi": "10.1109/TVCG.2012.229",
"abstract": "Reading a visualization can involve a number of tasks such as extracting, comparing or aggregating numerical values. Yet, most of the charts that are published in newspapers, reports, books, and on the Web only support a subset of these tasks. In this paper we introduce graphical overlays—visual elements that are layered onto charts to facilitate a larger set of chart reading tasks. These overlays directly support the lower-level perceptual and cognitive processes that viewers must perform to read a chart. We identify five main types of overlays that support these processes; the overlays can provide (1) reference structures such as gridlines, (2) highlights such as outlines around important marks, (3) redundant encodings such as numerical data labels, (4) summary statistics such as the mean or max and (5) annotations such as descriptive text for context. We then present an automated system that applies user-chosen graphical overlays to existing chart bitmaps. Our approach is based on the insight that generating most of these graphical overlays only requires knowing the properties of the visual marks and axes that encode the data, but does not require access to the underlying data values. Thus, our system analyzes the chart bitmap to extract only the properties necessary to generate the desired overlay. We also discuss techniques for generating interactive overlays that provide additional controls to viewers. We demonstrate several examples of each overlay type for bar, pie and line charts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reading a visualization can involve a number of tasks such as extracting, comparing or aggregating numerical values. Yet, most of the charts that are published in newspapers, reports, books, and on the Web only support a subset of these tasks. In this paper we introduce graphical overlays—visual elements that are layered onto charts to facilitate a larger set of chart reading tasks. These overlays directly support the lower-level perceptual and cognitive processes that viewers must perform to read a chart. We identify five main types of overlays that support these processes; the overlays can provide (1) reference structures such as gridlines, (2) highlights such as outlines around important marks, (3) redundant encodings such as numerical data labels, (4) summary statistics such as the mean or max and (5) annotations such as descriptive text for context. We then present an automated system that applies user-chosen graphical overlays to existing chart bitmaps. Our approach is based on the insight that generating most of these graphical overlays only requires knowing the properties of the visual marks and axes that encode the data, but does not require access to the underlying data values. Thus, our system analyzes the chart bitmap to extract only the properties necessary to generate the desired overlay. We also discuss techniques for generating interactive overlays that provide additional controls to viewers. We demonstrate several examples of each overlay type for bar, pie and line charts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reading a visualization can involve a number of tasks such as extracting, comparing or aggregating numerical values. Yet, most of the charts that are published in newspapers, reports, books, and on the Web only support a subset of these tasks. In this paper we introduce graphical overlays—visual elements that are layered onto charts to facilitate a larger set of chart reading tasks. These overlays directly support the lower-level perceptual and cognitive processes that viewers must perform to read a chart. We identify five main types of overlays that support these processes; the overlays can provide (1) reference structures such as gridlines, (2) highlights such as outlines around important marks, (3) redundant encodings such as numerical data labels, (4) summary statistics such as the mean or max and (5) annotations such as descriptive text for context. We then present an automated system that applies user-chosen graphical overlays to existing chart bitmaps. Our approach is based on the insight that generating most of these graphical overlays only requires knowing the properties of the visual marks and axes that encode the data, but does not require access to the underlying data values. Thus, our system analyzes the chart bitmap to extract only the properties necessary to generate the desired overlay. We also discuss techniques for generating interactive overlays that provide additional controls to viewers. We demonstrate several examples of each overlay type for bar, pie and line charts.",
"title": "Graphical Overlays: Using Layered Elements to Aid Chart Reading",
"normalizedTitle": "Graphical Overlays: Using Layered Elements to Aid Chart Reading",
"fno": "ttg2012122631",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Encoding",
"Image Color Analysis",
"Data Mining",
"Data Visualization",
"Bars",
"Market Research",
"Graph Comprehension",
"Visualization",
"Overlays",
"Graphical Perception"
],
"authors": [
{
"givenName": "Nicholas",
"surname": "Kong",
"fullName": "Nicholas Kong",
"affiliation": "University of California, Berkeley",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maneesh",
"surname": "Agrawala",
"fullName": "Maneesh Agrawala",
"affiliation": "University of California, Berkeley",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2631-2638",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/enic/2016/3455/0/07838060",
"title": "Eyetracking Evaluation of Different Chart Types Used for Web-Based System Data Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/enic/2016/07838060/12OmNwkzusn",
"parentPublication": {
"id": "proceedings/enic/2016/3455/0",
"title": "2016 Third European Network Intelligence Conference (ENIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07845717",
"title": "Converting Basic D3 Charts into Reusable Style Templates",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07845717/13rRUxYINfm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440833",
"title": "SmartCues: A Multitouch Query Approach for Details-on-Demand through Dynamically Computed Overlays",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440833/17D45Vw15wL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440827",
"title": "Charticulator: Interactive Construction of Bespoke Chart Layouts",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440827/17D45WYQJ6A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a128",
"title": "Synthetic Chart Image Generator: An Application for Generating Chart Image Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a128/17D45X0yjUm",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/11/09085944",
"title": "Chart Mining: A Survey of Methods for Automated Chart Analysis",
"doi": null,
"abstractUrl": "/journal/tp/2021/11/09085944/1jE1Hu1xUzu",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09293003",
"title": "Chartem: Reviving Chart Images with Data Embedding",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09293003/1pyonCyir8k",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412144",
"title": "Hybrid Cascade Point Search Network for High Precision Bar Chart Component Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412144/1tmhwCCzbUY",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412153",
"title": "Visual Style Extraction from Chart Images for Chart Restyling",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412153/1tmiHY12xy0",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122621",
"articleId": "13rRUygBwhH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122639",
"articleId": "13rRUwbs2b2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1J9y2mtpt3a",
"title": "Jan.",
"year": "2023",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1H1geE4olvG",
"doi": "10.1109/TVCG.2022.3209426",
"abstract": "We conduct a user study to quantify and compare user performance for a value comparison task using four bar chart designs, where the bars show the mean values of data loaded progressively and updated every second (progressive bar charts). Progressive visualization divides different stages of the visualization pipeline—data loading, processing, and visualization—into iterative animated steps to limit the latency when loading large amounts of data. An animated visualization appearing quickly, unfolding, and getting more accurate with time, enables users to make early decisions. However, intermediate mean estimates are computed only on partial data and may not have time to converge to the true means, potentially misleading users and resulting in incorrect decisions. To address this issue, we propose two new designs visualizing the history of values in progressive bar charts, in addition to the use of confidence intervals. We comparatively study four progressive bar chart designs: with/without confidence intervals, and using near-history representation with/without confidence intervals, on three realistic data distributions. We evaluate user performance based on the percentage of correct answers (accuracy), response time, and user confidence. Our results show that, overall, users can make early and accurate decisions with 92% accuracy using only 18% of the data, regardless of the design. We find that our proposed bar chart design with only near-history is comparable to bar charts with only confidence intervals in performance, and the qualitative feedback we received indicates a preference for designs with history.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We conduct a user study to quantify and compare user performance for a value comparison task using four bar chart designs, where the bars show the mean values of data loaded progressively and updated every second (progressive bar charts). Progressive visualization divides different stages of the visualization pipeline—data loading, processing, and visualization—into iterative animated steps to limit the latency when loading large amounts of data. An animated visualization appearing quickly, unfolding, and getting more accurate with time, enables users to make early decisions. However, intermediate mean estimates are computed only on partial data and may not have time to converge to the true means, potentially misleading users and resulting in incorrect decisions. To address this issue, we propose two new designs visualizing the history of values in progressive bar charts, in addition to the use of confidence intervals. We comparatively study four progressive bar chart designs: with/without confidence intervals, and using near-history representation with/without confidence intervals, on three realistic data distributions. We evaluate user performance based on the percentage of correct answers (accuracy), response time, and user confidence. Our results show that, overall, users can make early and accurate decisions with 92% accuracy using only 18% of the data, regardless of the design. We find that our proposed bar chart design with only near-history is comparable to bar charts with only confidence intervals in performance, and the qualitative feedback we received indicates a preference for designs with history.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We conduct a user study to quantify and compare user performance for a value comparison task using four bar chart designs, where the bars show the mean values of data loaded progressively and updated every second (progressive bar charts). Progressive visualization divides different stages of the visualization pipeline—data loading, processing, and visualization—into iterative animated steps to limit the latency when loading large amounts of data. An animated visualization appearing quickly, unfolding, and getting more accurate with time, enables users to make early decisions. However, intermediate mean estimates are computed only on partial data and may not have time to converge to the true means, potentially misleading users and resulting in incorrect decisions. To address this issue, we propose two new designs visualizing the history of values in progressive bar charts, in addition to the use of confidence intervals. We comparatively study four progressive bar chart designs: with/without confidence intervals, and using near-history representation with/without confidence intervals, on three realistic data distributions. We evaluate user performance based on the percentage of correct answers (accuracy), response time, and user confidence. Our results show that, overall, users can make early and accurate decisions with 92% accuracy using only 18% of the data, regardless of the design. We find that our proposed bar chart design with only near-history is comparable to bar charts with only confidence intervals in performance, and the qualitative feedback we received indicates a preference for designs with history.",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"normalizedTitle": "Studying Early Decision Making with Progressive Bar Charts",
"fno": "09904487",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Bar Charts",
"Computer Animation",
"Data Visualisation",
"Decision Making",
"Human Factors",
"Confidence Intervals",
"Data Distributions",
"Data Loading",
"Data Processing",
"Data Visualization",
"Decision Making",
"Iterative Animated Steps",
"Progressive Bar Chart Designs",
"Progressive Visualization",
"Response Time",
"User Confidence",
"User Performance",
"Value Comparison Task",
"Bars",
"Data Visualization",
"Uncertainty",
"Decision Making",
"History",
"Visualization",
"Task Analysis",
"Progressive Visualization",
"Uncertainty",
"Bar Charts",
"Confidence Intervals"
],
"authors": [
{
"givenName": "Ameya",
"surname": "Patil",
"fullName": "Ameya Patil",
"affiliation": "University of Washington, Seattle, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gaëlle",
"surname": "Richer",
"fullName": "Gaëlle Richer",
"affiliation": "Inria & Université Paris-Saclay, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christopher",
"surname": "Jermaine",
"fullName": "Christopher Jermaine",
"affiliation": "Rice University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dominik",
"surname": "Moritz",
"fullName": "Dominik Moritz",
"affiliation": "Carnegie Mellon University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jean-Daniel",
"surname": "Fekete",
"fullName": "Jean-Daniel Fekete",
"affiliation": "Inria & Université Paris-Saclay, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "407-417",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/12/06876021",
"title": "Four Experiments on the Perception of Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876021/13rRUNvgz9Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875990",
"title": "Multi-Charts for Comparative 3D Ensemble Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875990/13rRUxYIMUZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/04/v0822",
"title": "Value-Cell Bar Charts for Visualizing Large Transaction Data Sets",
"doi": null,
"abstractUrl": "/journal/tg/2007/04/v0822/13rRUygBw71",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2002/03/v0255",
"title": "Hierarchical Pixel Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2002/03/v0255/13rRUyuegh1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a159",
"title": "Improving Perception Accuracy in Bar Charts with Internal Contrast and Framing Enhancements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a159/17D45WnnFWc",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a067",
"title": "An Overview of the Design and Development for Dynamic and Physical Bar Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a067/1KaH61BvDWw",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a163",
"title": "Proposal and Evaluation of Textual Description Templates for Bar Charts Vocalization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a163/1cMFc4aDtWo",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a151",
"title": "The Cost of Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552208",
"title": "Visual Arrangements of Bar Charts Influence Comparisons in Viewer Takeaways",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552208/1xibWU97C8w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09904490",
"articleId": "1H1gj9xTTG0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09904446",
"articleId": "1H0GdhG1Ef6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1J9yuGZMKrK",
"name": "ttg202301-09904487s1-supp1-3209426.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09904487s1-supp1-3209426.pdf",
"extension": "pdf",
"size": "2.77 MB",
"__typename": "WebExtraType"
},
{
"id": "1J9yuzmBqCc",
"name": "ttg202301-09904487s1-supp2-3209426.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202301-09904487s1-supp2-3209426.mp4",
"extension": "mp4",
"size": "45 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cG4Az22lFe",
"doi": "10.1109/TVCG.2019.2934399",
"abstract": "Students who eat breakfast more frequently tend to have a higher grade point average. From this data, many people might confidently state that a before-school breakfast program would lead to higher grades. This is a reasoning error, because correlation does not necessarily indicate causation - X and Y can be correlated without one directly causing the other. While this error is pervasive, its prevalence might be amplified or mitigated by the way that the data is presented to a viewer. Across three crowdsourced experiments, we examined whether how simple data relations are presented would mitigate this reasoning error. The first experiment tested examples similar to the breakfast-GPA relation, varying in the plausibility of the causal link. We asked participants to rate their level of agreement that the relation was correlated, which they rated appropriately as high. However, participants also expressed high agreement with a causal interpretation of the data. Levels of support for the causal interpretation were not equally strong across visualization types: causality ratings were highest for text descriptions and bar graphs, but weaker for scatter plots. But is this effect driven by bar graphs aggregating data into two groups or by the visual encoding type? We isolated data aggregation versus visual encoding type and examined their individual effect on perceived causality. Overall, different visualization designs afford different cognitive reasoning affordances across the same data. High levels of data aggregation by graphs tend to be associated with higher perceived causality in data. Participants perceived line and dot visual encodings as more causal than bar encodings. Our results demonstrate how some visualization designs trigger stronger causal links while choosing others can help mitigate unwarranted perceptions of causality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Students who eat breakfast more frequently tend to have a higher grade point average. From this data, many people might confidently state that a before-school breakfast program would lead to higher grades. This is a reasoning error, because correlation does not necessarily indicate causation - X and Y can be correlated without one directly causing the other. While this error is pervasive, its prevalence might be amplified or mitigated by the way that the data is presented to a viewer. Across three crowdsourced experiments, we examined whether how simple data relations are presented would mitigate this reasoning error. The first experiment tested examples similar to the breakfast-GPA relation, varying in the plausibility of the causal link. We asked participants to rate their level of agreement that the relation was correlated, which they rated appropriately as high. However, participants also expressed high agreement with a causal interpretation of the data. Levels of support for the causal interpretation were not equally strong across visualization types: causality ratings were highest for text descriptions and bar graphs, but weaker for scatter plots. But is this effect driven by bar graphs aggregating data into two groups or by the visual encoding type? We isolated data aggregation versus visual encoding type and examined their individual effect on perceived causality. Overall, different visualization designs afford different cognitive reasoning affordances across the same data. High levels of data aggregation by graphs tend to be associated with higher perceived causality in data. Participants perceived line and dot visual encodings as more causal than bar encodings. Our results demonstrate how some visualization designs trigger stronger causal links while choosing others can help mitigate unwarranted perceptions of causality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Students who eat breakfast more frequently tend to have a higher grade point average. From this data, many people might confidently state that a before-school breakfast program would lead to higher grades. This is a reasoning error, because correlation does not necessarily indicate causation - X and Y can be correlated without one directly causing the other. While this error is pervasive, its prevalence might be amplified or mitigated by the way that the data is presented to a viewer. Across three crowdsourced experiments, we examined whether how simple data relations are presented would mitigate this reasoning error. The first experiment tested examples similar to the breakfast-GPA relation, varying in the plausibility of the causal link. We asked participants to rate their level of agreement that the relation was correlated, which they rated appropriately as high. However, participants also expressed high agreement with a causal interpretation of the data. Levels of support for the causal interpretation were not equally strong across visualization types: causality ratings were highest for text descriptions and bar graphs, but weaker for scatter plots. But is this effect driven by bar graphs aggregating data into two groups or by the visual encoding type? We isolated data aggregation versus visual encoding type and examined their individual effect on perceived causality. Overall, different visualization designs afford different cognitive reasoning affordances across the same data. High levels of data aggregation by graphs tend to be associated with higher perceived causality in data. Participants perceived line and dot visual encodings as more causal than bar encodings. Our results demonstrate how some visualization designs trigger stronger causal links while choosing others can help mitigate unwarranted perceptions of causality.",
"title": "Illusion of Causality in Visualized Data",
"normalizedTitle": "Illusion of Causality in Visualized Data",
"fno": "08805448",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Causality",
"Cognition",
"Data Visualisation",
"Educational Computing",
"Educational Institutions",
"Graph Theory",
"Causal Interpretation",
"Visualization Types",
"Causality Ratings",
"Bar Graphs",
"Data Aggregation",
"Bar Encodings",
"Data Visualization",
"Grade Point Average",
"Before School Breakfast Program",
"Crowdsourced Experiments",
"Text Descriptions",
"Correlation",
"Data Visualization",
"Bars",
"Task Analysis",
"Visualization",
"Cognition",
"Encoding",
"Information Visualization",
"Correlation And Causation",
"Visualization Design",
"Reasoning Affordance"
],
"authors": [
{
"givenName": "Cindy",
"surname": "Xiong",
"fullName": "Cindy Xiong",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joel",
"surname": "Shapiro",
"fullName": "Joel Shapiro",
"affiliation": "Northwestern University, Kellogg School of Management",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jessica",
"surname": "Hullman",
"fullName": "Jessica Hullman",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "853-862",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209d546",
"title": "Bayesian Network Structure Learning Using Causality",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209d546/12OmNC8uRrb",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pads/2002/1608/0/16080053",
"title": "Critical Causality in Distributed Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/pads/2002/16080053/12OmNzb7Zpc",
"parentPublication": {
"id": "proceedings/pads/2002/1608/0",
"title": "Proceedings 16th Workshop on Parallel and Distributed Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448287",
"title": "Spatially Perturbed Collision Sounds Attenuate Perceived Causality in 3D Launching Events",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448287/13bd1fWcuDr",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585647",
"title": "Visual Causality Analysis Made Practical",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585647/17D45VTRouU",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f648",
"title": "DVQA: Understanding Data Visualizations via Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f648/17D45WZZ7EU",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse/2022/9221/0/922100a325",
"title": "Causality in Configurable Software Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icse/2022/922100a325/1EmsmQoyGwE",
"parentPublication": {
"id": "proceedings/icse/2022/9221/0",
"title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2023/7578/0/757800a375",
"title": "Investigating Causality in Mobile Health Data through Deep Learning Models",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2023/757800a375/1LFLEtcpxf2",
"parentPublication": {
"id": "proceedings/bigcomp/2023/7578/0",
"title": "2023 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08836120",
"title": "Measures of the Benefit of Direct Encoding of Data Deltas for Data Pair Relation Perception",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08836120/1dia2KVa7g4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222294",
"title": "Visual Causality Analysis of Event Sequence Data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222294/1nTqOCPOdTq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222357",
"title": "Once Upon A Time In Visualization: Understanding the Use of Textual Narratives for Causality",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222357/1nTqwapYWYw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08805432",
"articleId": "1cG4MgXJw0U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08827593",
"articleId": "1dgdXsKkxeo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cG6vb0dTG0",
"doi": "10.1109/TVCG.2019.2934786",
"abstract": "Perceptual tasks in visualizations often involve comparisons. Of two sets of values depicted in two charts, which set had values that were the highest overall? Which had the widest range? Prior empirical work found that the performance on different visual comparison tasks (e.g., “biggest delta”, “biggest correlation”) varied widely across different combinations of marks and spatial arrangements. In this paper, we expand upon these combinations in an empirical evaluation of two new comparison tasks: the “biggest mean” and “biggest range” between two sets of values. We used a staircase procedure to titrate the difficulty of the data comparison to assess which arrangements produced the most precise comparisons for each task. We find visual comparisons of biggest mean and biggest range are supported by some chart arrangements more than others, and that this pattern is substantially different from the pattern for other tasks. To synthesize these dissonant findings, we argue that we must understand which features of a visualization are actually used by the human visual system to solve a given task. We call these perceptual proxies. For example, when comparing the means of two bar charts, the visual system might use a “Mean length” proxy that isolates the actual lengths of the bars and then constructs a true average across these lengths. Alternatively, it might use a “Hull Area” proxy that perceives an implied hull bounded by the bars of each chart and then compares the areas of these hulls. We propose a series of potential proxies across different tasks, marks, and spatial arrangements. Simple models of these proxies can be empirically evaluated for their explanatory power by matching their performance to human performance across these marks, arrangements, and tasks. We use this process to highlight candidates for perceptual proxies that might scale more broadly to explain performance in visual comparison.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Perceptual tasks in visualizations often involve comparisons. Of two sets of values depicted in two charts, which set had values that were the highest overall? Which had the widest range? Prior empirical work found that the performance on different visual comparison tasks (e.g., “biggest delta”, “biggest correlation”) varied widely across different combinations of marks and spatial arrangements. In this paper, we expand upon these combinations in an empirical evaluation of two new comparison tasks: the “biggest mean” and “biggest range” between two sets of values. We used a staircase procedure to titrate the difficulty of the data comparison to assess which arrangements produced the most precise comparisons for each task. We find visual comparisons of biggest mean and biggest range are supported by some chart arrangements more than others, and that this pattern is substantially different from the pattern for other tasks. To synthesize these dissonant findings, we argue that we must understand which features of a visualization are actually used by the human visual system to solve a given task. We call these perceptual proxies. For example, when comparing the means of two bar charts, the visual system might use a “Mean length” proxy that isolates the actual lengths of the bars and then constructs a true average across these lengths. Alternatively, it might use a “Hull Area” proxy that perceives an implied hull bounded by the bars of each chart and then compares the areas of these hulls. We propose a series of potential proxies across different tasks, marks, and spatial arrangements. Simple models of these proxies can be empirically evaluated for their explanatory power by matching their performance to human performance across these marks, arrangements, and tasks. We use this process to highlight candidates for perceptual proxies that might scale more broadly to explain performance in visual comparison.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Perceptual tasks in visualizations often involve comparisons. Of two sets of values depicted in two charts, which set had values that were the highest overall? Which had the widest range? Prior empirical work found that the performance on different visual comparison tasks (e.g., “biggest delta”, “biggest correlation”) varied widely across different combinations of marks and spatial arrangements. In this paper, we expand upon these combinations in an empirical evaluation of two new comparison tasks: the “biggest mean” and “biggest range” between two sets of values. We used a staircase procedure to titrate the difficulty of the data comparison to assess which arrangements produced the most precise comparisons for each task. We find visual comparisons of biggest mean and biggest range are supported by some chart arrangements more than others, and that this pattern is substantially different from the pattern for other tasks. To synthesize these dissonant findings, we argue that we must understand which features of a visualization are actually used by the human visual system to solve a given task. We call these perceptual proxies. For example, when comparing the means of two bar charts, the visual system might use a “Mean length” proxy that isolates the actual lengths of the bars and then constructs a true average across these lengths. Alternatively, it might use a “Hull Area” proxy that perceives an implied hull bounded by the bars of each chart and then compares the areas of these hulls. We propose a series of potential proxies across different tasks, marks, and spatial arrangements. Simple models of these proxies can be empirically evaluated for their explanatory power by matching their performance to human performance across these marks, arrangements, and tasks. We use this process to highlight candidates for perceptual proxies that might scale more broadly to explain performance in visual comparison.",
"title": "The Perceptual Proxies of Visual Comparison",
"normalizedTitle": "The Perceptual Proxies of Visual Comparison",
"fno": "08807320",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Human Visual System",
"Perceptual Proxies",
"Bar Charts",
"Mean Length Proxy",
"Hull Area Proxy",
"Perceptual Tasks",
"Data Comparison",
"Chart Arrangements",
"Visual Comparison Tasks",
"Visualizations",
"Visualization",
"Task Analysis",
"Bars",
"Correlation",
"Data Visualization",
"Visual Systems",
"Animation",
"Graphical Perception",
"Visual Perception",
"Visual Comparison",
"Crowdsourced Evaluation"
],
"authors": [
{
"givenName": "Nicole",
"surname": "Jardine",
"fullName": "Nicole Jardine",
"affiliation": "Cook County Assessor's Office, Northwestern University, Chicago",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Brian D.",
"surname": "Ondov",
"fullName": "Brian D. Ondov",
"affiliation": "National Institutes of Health, Bethesda, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Cook County Assessor's Office, Northwestern University, Chicago",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "1012-1021",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/12/06875992",
"title": "EvoRiver: Visual Analysis of Topic Coopetition on Social Media",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875992/13rRUxBa563",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122719",
"title": "Interaction Support for Visual Comparison Inspired by Natural Behavior",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122719/13rRUxZRbo0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440856",
"title": "Face to Face: Evaluating Visual Comparison",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440856/17D45WnnFWl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658790",
"title": "Visual Analytic Workflow to Understand Students’ Performance in Computer Science Courses",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658790/18j9pOuyFt6",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09978718",
"title": "The Risks of Ranking: Revisiting Graphical Perception to Model Individual Differences in Visualization Performance",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09978718/1IXUnbRdUEE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vds/2022/5721/0/572100a001",
"title": "Case Study Comparison of Computational Notebook Platforms for Interactive Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vds/2022/572100a001/1JezLhI4Vm8",
"parentPublication": {
"id": "proceedings/vds/2022/5721/0",
"title": "2022 IEEE Visualization in Data Science (VDS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2010/9488/0/05651204",
"title": "VisWorks text and network visual analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2010/05651204/1eof34V7mPS",
"parentPublication": {
"id": "proceedings/vast/2010/9488/0",
"title": "2010 IEEE Symposium on Visual Analytics Science and Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0757",
"title": "Visual Commonsense R-CNN",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0757/1m3nLjESQLu",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09238508",
"title": "Revealing Perceptual Proxies with Adversarial Examples",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09238508/1oa15KNUtGg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552208",
"title": "Visual Arrangements of Bar Charts Influence Comparisons in Viewer Takeaways",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552208/1xibWU97C8w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08807224",
"articleId": "1cG6twVJ2HC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08845772",
"articleId": "1dtMPLotlKg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1dia2KVa7g4",
"doi": "10.1109/TVCG.2019.2934801",
"abstract": "The power of data visualization is not to convey absolute values of individual data points, but to allow the exploration of relations (increases or decreases in a data value) among them. One approach to highlighting these relations is to explicitly encode the numeric differences (deltas) between data values. Because this approach removes the context of the individual data values, it is important to measure how much of a performance improvement it actually offers, especially across differences in encodings and tasks, to ensure that it is worth adding to a visualization design. Across 3 different tasks, we measured the increase in visual processing efficiency for judging the relations between pairs of data values, from when only the values were shown, to when the deltas between the values were explicitly encoded, across position and length visual feature encodings (and slope encodings in Experiments 1 & 2). In Experiment 1, the participant's task was to locate a pair of data values with a given relation (e.g., Find the `small bar to the left of a tall bar' pair) among pairs of the opposite relation, and we measured processing efficiency from the increase in response times as the number of pairs increased. In Experiment 2, the task was to judge which of two relation types was more prevalent in a briefly presented display of 10 data pairs (e.g., Are there more `small bar to the left of a tall bar' pairs or more `tall bar to the left of a small bar' pairs?). In the final experiment, the task was to estimate the average delta within a briefly presented display of 6 data pairs (e.g., What is the average bar height difference across all `small bar to the left of a tall bar' pairs?). Across all three experiments, visual processing of relations between data value pairs was significantly better when directly encoded as deltas rather than implicitly between individual data points, and varied substantially depending on the task (improvement ranged from 25% to 95%). Considering the ubiquity of bar charts and dot plots, relation perception for individual data values is highly inefficient, and confirms the need for alternative designs that provide not only absolute values, but also direct encoding of critical relationships between those values.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The power of data visualization is not to convey absolute values of individual data points, but to allow the exploration of relations (increases or decreases in a data value) among them. One approach to highlighting these relations is to explicitly encode the numeric differences (deltas) between data values. Because this approach removes the context of the individual data values, it is important to measure how much of a performance improvement it actually offers, especially across differences in encodings and tasks, to ensure that it is worth adding to a visualization design. Across 3 different tasks, we measured the increase in visual processing efficiency for judging the relations between pairs of data values, from when only the values were shown, to when the deltas between the values were explicitly encoded, across position and length visual feature encodings (and slope encodings in Experiments 1 & 2). In Experiment 1, the participant's task was to locate a pair of data values with a given relation (e.g., Find the `small bar to the left of a tall bar' pair) among pairs of the opposite relation, and we measured processing efficiency from the increase in response times as the number of pairs increased. In Experiment 2, the task was to judge which of two relation types was more prevalent in a briefly presented display of 10 data pairs (e.g., Are there more `small bar to the left of a tall bar' pairs or more `tall bar to the left of a small bar' pairs?). In the final experiment, the task was to estimate the average delta within a briefly presented display of 6 data pairs (e.g., What is the average bar height difference across all `small bar to the left of a tall bar' pairs?). Across all three experiments, visual processing of relations between data value pairs was significantly better when directly encoded as deltas rather than implicitly between individual data points, and varied substantially depending on the task (improvement ranged from 25% to 95%). Considering the ubiquity of bar charts and dot plots, relation perception for individual data values is highly inefficient, and confirms the need for alternative designs that provide not only absolute values, but also direct encoding of critical relationships between those values.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The power of data visualization is not to convey absolute values of individual data points, but to allow the exploration of relations (increases or decreases in a data value) among them. One approach to highlighting these relations is to explicitly encode the numeric differences (deltas) between data values. Because this approach removes the context of the individual data values, it is important to measure how much of a performance improvement it actually offers, especially across differences in encodings and tasks, to ensure that it is worth adding to a visualization design. Across 3 different tasks, we measured the increase in visual processing efficiency for judging the relations between pairs of data values, from when only the values were shown, to when the deltas between the values were explicitly encoded, across position and length visual feature encodings (and slope encodings in Experiments 1 & 2). In Experiment 1, the participant's task was to locate a pair of data values with a given relation (e.g., Find the `small bar to the left of a tall bar' pair) among pairs of the opposite relation, and we measured processing efficiency from the increase in response times as the number of pairs increased. In Experiment 2, the task was to judge which of two relation types was more prevalent in a briefly presented display of 10 data pairs (e.g., Are there more `small bar to the left of a tall bar' pairs or more `tall bar to the left of a small bar' pairs?). In the final experiment, the task was to estimate the average delta within a briefly presented display of 6 data pairs (e.g., What is the average bar height difference across all `small bar to the left of a tall bar' pairs?). Across all three experiments, visual processing of relations between data value pairs was significantly better when directly encoded as deltas rather than implicitly between individual data points, and varied substantially depending on the task (improvement ranged from 25% to 95%). Considering the ubiquity of bar charts and dot plots, relation perception for individual data values is highly inefficient, and confirms the need for alternative designs that provide not only absolute values, but also direct encoding of critical relationships between those values.",
"title": "Measures of the Benefit of Direct Encoding of Data Deltas for Data Pair Relation Perception",
"normalizedTitle": "Measures of the Benefit of Direct Encoding of Data Deltas for Data Pair Relation Perception",
"fno": "08836120",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Handling",
"Data Visualisation",
"Visual Perception",
"Data Value Pairs",
"Average Bar Height Difference",
"Bar Pairs",
"Tall Bar Pair",
"Length Visual Feature Encodings",
"Individual Data Values",
"Numeric Differences",
"Individual Data Points",
"Data Visualization",
"Data Pair Relation Perception",
"Data Deltas",
"Direct Encoding",
"Encoding",
"Task Analysis",
"Bars",
"Visualization",
"Data Visualization",
"Time Factors",
"Information Visualization",
"Marks",
"Perception",
"Attention",
"Visual Comparison",
"Visual Search",
"Aggregation"
],
"authors": [
{
"givenName": "Christine",
"surname": "Nothelfer",
"fullName": "Christine Nothelfer",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "311-320",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2018/7202/0/720200a159",
"title": "Improving Perception Accuracy in Bar Charts with Internal Contrast and Framing Enhancements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a159/17D45WnnFWc",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08443125",
"title": "Glanceable Visualization: Studies of Data Comparison Performance on Smartwatches",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08443125/17D45XDIXRv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/02/09756627",
"title": "More Than Meets the Eye: A Closer Look at Encodings in Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2022/02/09756627/1CxvjdlL3TG",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904487",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805448",
"title": "Illusion of Causality in Visualized Data",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805448/1cG4Az22lFe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805427",
"title": "Biased Average Position Estimates in Line and Bar Graphs: Underestimation, Overestimation, and Perceptual Pull",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805427/1cG4xtnomys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093592",
"title": "Figure Captioning with Relation Maps for Reasoning",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093592/1jPbhvKoICs",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222047",
"title": "Truth or Square: Aspect Ratio Biases Recall of Position Encodings",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222047/1nTqj3fbFXq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09288884",
"title": "No mark is an island: Precision and category repulsion biases in data reproductions",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09288884/1pq6f5VhVF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557878",
"title": "Rethinking the Ranks of Visual Channels",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557878/1xquNQMVFCM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08805427",
"articleId": "1cG4xtnomys",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08794768",
"articleId": "1cr2ZlCC2xG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1fe9k9PoY5a",
"name": "ttg202001-08836120s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202001-08836120s1.zip",
"extension": "zip",
"size": "2.12 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1nTqj3fbFXq",
"doi": "10.1109/TVCG.2020.3030422",
"abstract": "Bar charts are among the most frequently used visualizations, in part because their position encoding leads them to convey data values precisely. Yet reproductions of single bars or groups of bars within a graph can be biased. Curiously, some previous work found that this bias resulted in an overestimation of reproduced data values, while other work found an underestimation. Across three empirical studies, we offer an explanation for these conflicting findings: this discrepancy is a consequence of the differing aspect ratios of the tested bar marks. Viewers are biased to remember a bar mark as being more similar to a prototypical square, leading to an overestimation of bars with a wide aspect ratio, and an underestimation of bars with a tall aspect ratio. Experiments 1 and 2 showed that the aspect ratio of the bar marks indeed influenced the direction of this bias. Experiment 3 confirmed that this pattern of misestimation bias was present for reproductions from memory, suggesting that this bias may arise when comparing values across sequential displays or views. We describe additional visualization designs that might be prone to this bias beyond bar charts (e.g., Mekko charts and treemaps), and speculate that other visual channels might hold similar biases toward prototypical values.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Bar charts are among the most frequently used visualizations, in part because their position encoding leads them to convey data values precisely. Yet reproductions of single bars or groups of bars within a graph can be biased. Curiously, some previous work found that this bias resulted in an overestimation of reproduced data values, while other work found an underestimation. Across three empirical studies, we offer an explanation for these conflicting findings: this discrepancy is a consequence of the differing aspect ratios of the tested bar marks. Viewers are biased to remember a bar mark as being more similar to a prototypical square, leading to an overestimation of bars with a wide aspect ratio, and an underestimation of bars with a tall aspect ratio. Experiments 1 and 2 showed that the aspect ratio of the bar marks indeed influenced the direction of this bias. Experiment 3 confirmed that this pattern of misestimation bias was present for reproductions from memory, suggesting that this bias may arise when comparing values across sequential displays or views. We describe additional visualization designs that might be prone to this bias beyond bar charts (e.g., Mekko charts and treemaps), and speculate that other visual channels might hold similar biases toward prototypical values.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Bar charts are among the most frequently used visualizations, in part because their position encoding leads them to convey data values precisely. Yet reproductions of single bars or groups of bars within a graph can be biased. Curiously, some previous work found that this bias resulted in an overestimation of reproduced data values, while other work found an underestimation. Across three empirical studies, we offer an explanation for these conflicting findings: this discrepancy is a consequence of the differing aspect ratios of the tested bar marks. Viewers are biased to remember a bar mark as being more similar to a prototypical square, leading to an overestimation of bars with a wide aspect ratio, and an underestimation of bars with a tall aspect ratio. Experiments 1 and 2 showed that the aspect ratio of the bar marks indeed influenced the direction of this bias. Experiment 3 confirmed that this pattern of misestimation bias was present for reproductions from memory, suggesting that this bias may arise when comparing values across sequential displays or views. We describe additional visualization designs that might be prone to this bias beyond bar charts (e.g., Mekko charts and treemaps), and speculate that other visual channels might hold similar biases toward prototypical values.",
"title": "Truth or Square: Aspect Ratio Biases Recall of Position Encodings",
"normalizedTitle": "Truth or Square: Aspect Ratio Biases Recall of Position Encodings",
"fno": "09222047",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Visual Perception",
"Bar Mark",
"Misestimation Bias",
"Bar Charts",
"Similar Biases",
"Prototypical Values",
"Position Encoding",
"Overestimation",
"Reproduced Data Values",
"Underestimation",
"Prototypical Square",
"Wide Aspect Ratio",
"Tall Aspect Ratio",
"Bar Marks",
"Bars",
"Image Color Analysis",
"Encoding",
"Visualization",
"Data Visualization",
"Estimation",
"Prototypes",
"Memory Biases",
"Position Estimation",
"Bar Charts",
"Aspect Ratio",
"Area"
],
"authors": [
{
"givenName": "Cristina R.",
"surname": "Ceja",
"fullName": "Cristina R. Ceja",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Caitlyn M.",
"surname": "McColeman",
"fullName": "Caitlyn M. McColeman",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cindy",
"surname": "Xiong",
"fullName": "Cindy Xiong",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven L.",
"surname": "Franconeri",
"fullName": "Steven L. Franconeri",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1054-1062",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/12/06876021",
"title": "Four Experiments on the Perception of Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876021/13rRUNvgz9Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192667",
"title": "Visual Encodings of Temporal Uncertainty: A Comparative User Study",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192667/13rRUwjGoLH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875915",
"title": "Error Bars Considered Harmful: Exploring Alternate Encodings for Mean and Error",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875915/13rRUxZ0o1B",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2002/03/v0255",
"title": "Hierarchical Pixel Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2002/03/v0255/13rRUyuegh1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a142",
"title": "Extracting Visual Encodings from Map Chart Images with Color-Encoded Scalar Values",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a142/17D45WaTkiB",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a159",
"title": "Improving Perception Accuracy in Bar Charts with Internal Contrast and Framing Enhancements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a159/17D45WnnFWc",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08443125",
"title": "Glanceable Visualization: Studies of Data Comparison Performance on Smartwatches",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08443125/17D45XDIXRv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2022/02/09756627",
"title": "More Than Meets the Eye: A Closer Look at Encodings in Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2022/02/09756627/1CxvjdlL3TG",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a067",
"title": "An Overview of the Design and Development for Dynamic and Physical Bar Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a067/1KaH61BvDWw",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805427",
"title": "Biased Average Position Estimates in Line and Bar Graphs: Underestimation, Overestimation, and Perceptual Pull",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805427/1cG4xtnomys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09216559",
"articleId": "1nJsOQFe8A8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09288884",
"articleId": "1pq6f5VhVF6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oa15KNUtGg",
"doi": "10.1109/TVCG.2020.3030429",
"abstract": "Data visualizations convert numbers into visual marks so that our visual system can extract data from an image instead of raw numbers. Clearly, the visual system does not compute these values as a computer would, as an arithmetic mean or a correlation. Instead, it extracts these patterns using perceptual proxies; heuristic shortcuts of the visual marks, such as a center of mass or a shape envelope. Understanding which proxies people use would lead to more effective visualizations. We present the results of a series of crowdsourced experiments that measure how powerfully a set of candidate proxies can explain human performance when comparing the mean and range of pairs of data series presented as bar charts. We generated datasets where the correct answer-the series with the larger arithmetic mean or range-was pitted against an “adversarial” series that should be seen as larger if the viewer uses a particular candidate proxy. We used both Bayesian logistic regression models and a robust Bayesian mixed-effects linear model to measure how strongly each adversarial proxy could drive viewers to answer incorrectly and whether different individuals may use different proxies. Finally, we attempt to construct adversarial datasets from scratch, using an iterative crowdsourcing procedure to perform black-box optimization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Data visualizations convert numbers into visual marks so that our visual system can extract data from an image instead of raw numbers. Clearly, the visual system does not compute these values as a computer would, as an arithmetic mean or a correlation. Instead, it extracts these patterns using perceptual proxies; heuristic shortcuts of the visual marks, such as a center of mass or a shape envelope. Understanding which proxies people use would lead to more effective visualizations. We present the results of a series of crowdsourced experiments that measure how powerfully a set of candidate proxies can explain human performance when comparing the mean and range of pairs of data series presented as bar charts. We generated datasets where the correct answer-the series with the larger arithmetic mean or range-was pitted against an “adversarial” series that should be seen as larger if the viewer uses a particular candidate proxy. We used both Bayesian logistic regression models and a robust Bayesian mixed-effects linear model to measure how strongly each adversarial proxy could drive viewers to answer incorrectly and whether different individuals may use different proxies. Finally, we attempt to construct adversarial datasets from scratch, using an iterative crowdsourcing procedure to perform black-box optimization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Data visualizations convert numbers into visual marks so that our visual system can extract data from an image instead of raw numbers. Clearly, the visual system does not compute these values as a computer would, as an arithmetic mean or a correlation. Instead, it extracts these patterns using perceptual proxies; heuristic shortcuts of the visual marks, such as a center of mass or a shape envelope. Understanding which proxies people use would lead to more effective visualizations. We present the results of a series of crowdsourced experiments that measure how powerfully a set of candidate proxies can explain human performance when comparing the mean and range of pairs of data series presented as bar charts. We generated datasets where the correct answer-the series with the larger arithmetic mean or range-was pitted against an “adversarial” series that should be seen as larger if the viewer uses a particular candidate proxy. We used both Bayesian logistic regression models and a robust Bayesian mixed-effects linear model to measure how strongly each adversarial proxy could drive viewers to answer incorrectly and whether different individuals may use different proxies. Finally, we attempt to construct adversarial datasets from scratch, using an iterative crowdsourcing procedure to perform black-box optimization.",
"title": "Revealing Perceptual Proxies with Adversarial Examples",
"normalizedTitle": "Revealing Perceptual Proxies with Adversarial Examples",
"fno": "09238508",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Bayes Methods",
"Belief Networks",
"Data Analysis",
"Data Visualisation",
"Image Representation",
"Iterative Methods",
"Neural Nets",
"Regression Analysis",
"Iterative Crowdsourcing Procedure",
"Black Box Optimization",
"Adversarial Proxy",
"Robust Bayesian Mixed Effects",
"Bayesian Logistic Regression Models",
"Adversarial Series",
"Arithmetic Mean",
"Data Series",
"Candidate Proxies",
"Shape Envelope",
"Heuristic Shortcuts",
"Visual Marks",
"Data Visualizations",
"Adversarial Examples",
"Perceptual Proxies",
"Visualization",
"Data Visualization",
"Task Analysis",
"Bars",
"Visual Systems",
"Data Mining",
"Bayes Methods",
"Perceptual Proxies",
"Vision Science",
"Crowdsourced Evaluation"
],
"authors": [
{
"givenName": "Brian D.",
"surname": "Ondov",
"fullName": "Brian D. Ondov",
"affiliation": "National Institutes of Health in Bethesda, USA and University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fumeng",
"surname": "Yang",
"fullName": "Fumeng Yang",
"affiliation": "Brown University, Providence, RI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthew",
"surname": "Kay",
"fullName": "Matthew Kay",
"affiliation": "University of Michigan, Ann Arbor, MI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "University of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Northwestern University, Evanston, IL, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1073-1083",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2015/9504/0/9504a301",
"title": "A Unified Gradient Regularization Family for Adversarial Examples",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2015/9504a301/12OmNzdoMK8",
"parentPublication": {
"id": "proceedings/icdm/2015/9504/0",
"title": "2015 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061009",
"title": "Laws of Attraction: From Perceptual Forces to Conceptual Similarity",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061009/13rRUxBa5bQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200h486",
"title": "AdvDrop: Adversarial Attack to DNNs by Dropping Information",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200h486/1BmHdwJx5Xq",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a001",
"title": "Adversarial Examples of Time Series Data based on Partial Perturbations",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a001/1GU74bfqASs",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805427",
"title": "Biased Average Position Estimates in Line and Bar Graphs: Underestimation, Overestimation, and Perceptual Pull",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805427/1cG4xtnomys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807320",
"title": "The Perceptual Proxies of Visual Comparison",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807320/1cG6vb0dTG0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428316",
"title": "Undetectable Adversarial Examples Based on Microscopical Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428316/1uilHXBjdKg",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700c713",
"title": "R-MNet: A Perceptual Adversarial Network for Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700c713/1uqGuHq6N6U",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557878",
"title": "Rethinking the Ranks of Visual Channels",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557878/1xquNQMVFCM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a071",
"title": "Jurassic Mark: Inattentional Blindness for a Datasaurus Reveals that Visualizations are Explored, not Seen",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a071/1yXujc7Nwqc",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09288884",
"articleId": "1pq6f5VhVF6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09222293",
"articleId": "1nTrGyTQ0Pm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1qLdRDWfTBC",
"name": "ttg202102-09238508s1-supp1-3030429.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202102-09238508s1-supp1-3030429.pdf",
"extension": "pdf",
"size": "406 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1pq6f5VhVF6",
"doi": "10.1109/TVCG.2020.3030345",
"abstract": "Data visualization is powerful in large part because it facilitates visual extraction of values. Yet, existing measures of perceptual precision for data channels (e.g., position, length, orientation, etc.) are based largely on verbal reports of ratio judgments between two values (e.g., [7]). Verbal report conflates multiple sources of error beyond actual visual precision, introducing a ratio computation between these values and a requirement to translate that ratio to a verbal number. Here we observe raw measures of precision by eliminating both ratio computations and verbal reports; we simply ask participants to reproduce marks (a single bar or dot) to match a previously seen one. We manipulated whether the mark was initially presented (and later drawn) alone, paired with a reference (e.g. a second `100%' bar also present at test, or a y-axis for the dot), or integrated with the reference (merging that reference bar into a stacked bar graph, or placing the dot directly on the axis). Reproductions of smaller values were overestimated, and larger values were underestimated, suggesting systematic memory biases. Average reproduction error was around 10% of the actual value, regardless of whether the reproduction was done on a common baseline with the original. In the reference and (especially) the integrated conditions, responses were repulsed from an implicit midpoint of the reference mark, such that values above 50% were overestimated, and values below 50% were underestimated. This reproduction paradigm may serve within a new suite of more fundamental measures of the precision of graphical perception.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Data visualization is powerful in large part because it facilitates visual extraction of values. Yet, existing measures of perceptual precision for data channels (e.g., position, length, orientation, etc.) are based largely on verbal reports of ratio judgments between two values (e.g., [7]). Verbal report conflates multiple sources of error beyond actual visual precision, introducing a ratio computation between these values and a requirement to translate that ratio to a verbal number. Here we observe raw measures of precision by eliminating both ratio computations and verbal reports; we simply ask participants to reproduce marks (a single bar or dot) to match a previously seen one. We manipulated whether the mark was initially presented (and later drawn) alone, paired with a reference (e.g. a second `100%' bar also present at test, or a y-axis for the dot), or integrated with the reference (merging that reference bar into a stacked bar graph, or placing the dot directly on the axis). Reproductions of smaller values were overestimated, and larger values were underestimated, suggesting systematic memory biases. Average reproduction error was around 10% of the actual value, regardless of whether the reproduction was done on a common baseline with the original. In the reference and (especially) the integrated conditions, responses were repulsed from an implicit midpoint of the reference mark, such that values above 50% were overestimated, and values below 50% were underestimated. This reproduction paradigm may serve within a new suite of more fundamental measures of the precision of graphical perception.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Data visualization is powerful in large part because it facilitates visual extraction of values. Yet, existing measures of perceptual precision for data channels (e.g., position, length, orientation, etc.) are based largely on verbal reports of ratio judgments between two values (e.g., [7]). Verbal report conflates multiple sources of error beyond actual visual precision, introducing a ratio computation between these values and a requirement to translate that ratio to a verbal number. Here we observe raw measures of precision by eliminating both ratio computations and verbal reports; we simply ask participants to reproduce marks (a single bar or dot) to match a previously seen one. We manipulated whether the mark was initially presented (and later drawn) alone, paired with a reference (e.g. a second `100%' bar also present at test, or a y-axis for the dot), or integrated with the reference (merging that reference bar into a stacked bar graph, or placing the dot directly on the axis). Reproductions of smaller values were overestimated, and larger values were underestimated, suggesting systematic memory biases. Average reproduction error was around 10% of the actual value, regardless of whether the reproduction was done on a common baseline with the original. In the reference and (especially) the integrated conditions, responses were repulsed from an implicit midpoint of the reference mark, such that values above 50% were overestimated, and values below 50% were underestimated. This reproduction paradigm may serve within a new suite of more fundamental measures of the precision of graphical perception.",
"title": "No mark is an island: Precision and category repulsion biases in data reproductions",
"normalizedTitle": "No mark is an island: Precision and category repulsion biases in data reproductions",
"fno": "09288884",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cognition",
"Data Visualisation",
"Visual Perception",
"Category Repulsion",
"Data Reproductions",
"Data Visualization",
"Visual Extraction",
"Perceptual Precision",
"Data Channels",
"Verbal Report",
"Ratio Judgments",
"Visual Precision",
"Ratio Computation",
"Verbal Number",
"Raw Measures",
"Reference Bar",
"Stacked Bar Graph",
"Smaller Values",
"Systematic Memory Biases",
"Average Reproduction Error",
"Reference Mark",
"Reproduction Paradigm",
"Bars",
"Visualization",
"Data Visualization",
"Task Analysis",
"Particle Measurements",
"Atmospheric Measurements",
"Semantics",
"Cognition And Perception",
"Graphical Perception",
"Perceptual Biases",
"Ratio Perception"
],
"authors": [
{
"givenName": "Caitlyn M.",
"surname": "McColeman",
"fullName": "Caitlyn M. McColeman",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lane",
"surname": "Harrison",
"fullName": "Lane Harrison",
"affiliation": "Worcester Polytechnic Institute",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mi",
"surname": "Feng",
"fullName": "Mi Feng",
"affiliation": "Worcester Polytechnic Institute",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Northwestern University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "1063-1072",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/grc/2011/0372/0/06122649",
"title": "A method to dynamically estimate emphasizing points and degree by using verbal and nonverbal information and physiological indices",
"doi": null,
"abstractUrl": "/proceedings-article/grc/2011/06122649/12OmNwwuE18",
"parentPublication": {
"id": "proceedings/grc/2011/0372/0",
"title": "2011 IEEE International Conference on Granular Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/07968319",
"title": "Perceptual Biases in Font Size as a Data Encoding",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/07968319/13rRUxBa5ns",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904487",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049649",
"title": "Comparing the Effects of Visual Realism on Size Perception in VR versus Real World Viewing through Physical and Verbal Judgments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049649/1KYolXflEWI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a318",
"title": "Visual Interventions for Career and Life-Design: An Exploratory Experimental Study",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a318/1cMFaOudORG",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08836120",
"title": "Measures of the Benefit of Direct Encoding of Data Deltas for Data Pair Relation Perception",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08836120/1dia2KVa7g4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222047",
"title": "Truth or Square: Aspect Ratio Biases Recall of Position Encodings",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222047/1nTqj3fbFXq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09557878",
"title": "Rethinking the Ranks of Visual Channels",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09557878/1xquNQMVFCM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a161",
"title": "Understanding the Effects of Visualizing Missing Values on Visual Data Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a161/1yXu9WURx2E",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/jcdl/2021/1770/0/177000a130",
"title": "Newsalyze: Effective Communication of Person-Targeting Biases in News Articles",
"doi": null,
"abstractUrl": "/proceedings-article/jcdl/2021/177000a130/1zJmV9QWbGo",
"parentPublication": {
"id": "proceedings/jcdl/2021/1770/0",
"title": "2021 ACM/IEEE Joint Conference on Digital Libraries (JCDL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09222047",
"articleId": "1nTqj3fbFXq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09238508",
"articleId": "1oa15KNUtGg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cG66qf6MKs",
"doi": "10.1109/TVCG.2019.2934784",
"abstract": "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the drawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts - even for visualizing periodical daily patterns.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the drawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts - even for visualizing periodical daily patterns.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the drawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts - even for visualizing periodical daily patterns.",
"title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
"normalizedTitle": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
"fno": "08807238",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Bar Charts",
"Crowdsourcing",
"Data Analysis",
"Data Visualisation",
"Pattern Recognition",
"Road Accidents",
"Road Traffic",
"Traffic Engineering Computing",
"Linear Charts",
"Radial Chart",
"Bar Chart",
"Daily Traffic Accident Pattern Visualization",
"Daily Pattern Analysis",
"Bars",
"Data Visualization",
"Layout",
"Clocks",
"Task Analysis",
"Time Series Analysis",
"Encoding",
"Radial Charts",
"Time Series Series Data",
"Daily Patterns",
"Crowd Sourced Experiment"
],
"authors": [
{
"givenName": "Manuela",
"surname": "Waldner",
"fullName": "Manuela Waldner",
"affiliation": "TU Wien",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexandra",
"surname": "Diehl",
"fullName": "Alexandra Diehl",
"affiliation": "University of Zurich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Gračanin",
"fullName": "Denis Gračanin",
"affiliation": "Virginia Tech",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rainer",
"surname": "Splechtna",
"fullName": "Rainer Splechtna",
"affiliation": "VRVis Research Center",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claudio",
"surname": "Delrieux",
"fullName": "Claudio Delrieux",
"affiliation": "Electric and Computer Eng. Dept.Universidad Nacional del SUR and CONICET",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Krešimir",
"surname": "Matković",
"fullName": "Krešimir Matković",
"affiliation": "VRVis Research Center",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "1033-1042",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2007/03/v0583",
"title": "A Radial Adaptation of the Sugiyama Framework for Visualizing Hierarchical Information",
"doi": null,
"abstractUrl": "/journal/tg/2007/03/v0583/13rRUyY28Ym",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/04/v0822",
"title": "Value-Cell Bar Charts for Visualizing Large Transaction Data Sets",
"doi": null,
"abstractUrl": "/journal/tg/2007/04/v0822/13rRUygBw71",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a159",
"title": "Improving Perception Accuracy in Bar Charts with Internal Contrast and Framing Enhancements",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a159/17D45WnnFWc",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904487",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2022/5490/0/10086019",
"title": "Music Charts for Approximating Everyday Emotions: A Dataset of Daily Charts with Music Features from 106 Cities",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2022/10086019/1M668FTwk5G",
"parentPublication": {
"id": "proceedings/aciiw/2022/5490/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a151",
"title": "The Cost of Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2021/11/08989823",
"title": "CrowdChart: Crowdsourced Data Extraction From Visualization Charts",
"doi": null,
"abstractUrl": "/journal/tk/2021/11/08989823/1hlpoAzss9i",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101527",
"title": "Crowdsourcing-based Data Extraction from Visualization Charts",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101527/1kaMJ95VHQk",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552227",
"title": "Rotate or Wrap? Interactive Visualisations of Cyclical Data on Cylindrical or Toroidal Topologies",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552227/1xibX4wTR8Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08845772",
"articleId": "1dtMPLotlKg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08812989",
"articleId": "1cOhCCWc1uU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1y11cGSPuPC",
"doi": "10.1109/TVCG.2021.3114856",
"abstract": "Infographic bar charts have been widely adopted for communicating numerical information because of their attractiveness and memorability. However, these infographics are often created manually with general tools, such as PowerPoint and Adobe Illustrator, and merely composed of primitive visual elements, such as text blocks and shapes. With the absence of chart models, updating or reusing these infographics requires tedious and error-prone manual edits. In this paper, we propose a mixed-initiative approach to mitigate this pain point. On one hand, machines are adopted to perform precise and trivial operations, such as mapping numerical values to shape attributes and aligning shapes. On the other hand, we rely on humans to perform subjective and creative tasks, such as changing embellishments or approving the edits made by machines. We encapsulate our technique in a PowerPoint add-in prototype and demonstrate the effectiveness by applying our technique on a diverse set of infographic bar chart examples.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Infographic bar charts have been widely adopted for communicating numerical information because of their attractiveness and memorability. However, these infographics are often created manually with general tools, such as PowerPoint and Adobe Illustrator, and merely composed of primitive visual elements, such as text blocks and shapes. With the absence of chart models, updating or reusing these infographics requires tedious and error-prone manual edits. In this paper, we propose a mixed-initiative approach to mitigate this pain point. On one hand, machines are adopted to perform precise and trivial operations, such as mapping numerical values to shape attributes and aligning shapes. On the other hand, we rely on humans to perform subjective and creative tasks, such as changing embellishments or approving the edits made by machines. We encapsulate our technique in a PowerPoint add-in prototype and demonstrate the effectiveness by applying our technique on a diverse set of infographic bar chart examples.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Infographic bar charts have been widely adopted for communicating numerical information because of their attractiveness and memorability. However, these infographics are often created manually with general tools, such as PowerPoint and Adobe Illustrator, and merely composed of primitive visual elements, such as text blocks and shapes. With the absence of chart models, updating or reusing these infographics requires tedious and error-prone manual edits. In this paper, we propose a mixed-initiative approach to mitigate this pain point. On one hand, machines are adopted to perform precise and trivial operations, such as mapping numerical values to shape attributes and aligning shapes. On the other hand, we rely on humans to perform subjective and creative tasks, such as changing embellishments or approving the edits made by machines. We encapsulate our technique in a PowerPoint add-in prototype and demonstrate the effectiveness by applying our technique on a diverse set of infographic bar chart examples.",
"title": "A Mixed-Initiative Approach to Reusing Infographic Charts",
"normalizedTitle": "A Mixed-Initiative Approach to Reusing Infographic Charts",
"fno": "09585700",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Bars",
"Visualization",
"Tools",
"Shape",
"Data Mining",
"Semantics",
"Image Color Analysis",
"Infographics",
"Reusable Templates",
"Graphic Design",
"Automatic Visualization"
],
"authors": [
{
"givenName": "Weiwei",
"surname": "Cui",
"fullName": "Weiwei Cui",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jinpeng",
"surname": "Wang",
"fullName": "Jinpeng Wang",
"affiliation": "Meituan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "He",
"surname": "Huang",
"fullName": "He Huang",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yun",
"surname": "Wang",
"fullName": "Yun Wang",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chin-Yew",
"surname": "Lin",
"fullName": "Chin-Yew Lin",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haidong",
"surname": "Zhang",
"fullName": "Haidong Zhang",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dongmei",
"surname": "Zhang",
"fullName": "Dongmei Zhang",
"affiliation": "Microsoft Research Asia, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "173-183",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/1994/6270/2/00576979",
"title": "An extended-shadow-code based approach for off-line signature verification. I. Evaluation of the bar mask definition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576979/12OmNyQ7FNm",
"parentPublication": {
"id": "proceedings/icpr/1994/6270/2",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07845717",
"title": "Converting Basic D3 Charts into Reusable Style Templates",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07845717/13rRUxYINfm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440833",
"title": "SmartCues: A Multitouch Query Approach for Details-on-Demand through Dynamically Computed Overlays",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440833/17D45Vw15wL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807355",
"title": "ShapeWordle: Tailoring Wordles using Shape-aware Archimedean Spirals",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807355/1cG6a6b0eys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-2/2019/2850/0/285000a042",
"title": "An Interactive Method for Visualising Physical Activity in Parks",
"doi": null,
"abstractUrl": "/proceedings-article/iv-2/2019/285000a042/1cMEPkhBBKw",
"parentPublication": {
"id": "proceedings/iv-2/2019/2850/0",
"title": "2019 23rd International Conference in Information Visualization – Part II",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933569",
"title": "Toward Interface Defaults for Vague Modifiers in Natural Language Interfaces for Visual Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933569/1fTgHCV29i0",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222351",
"title": "Palettailor: Discriminable Colorization for Categorical Data",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222351/1nTq353vBNS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a031",
"title": "Parsing and Summarizing Infographics with Synthetically Trained Icon Detection",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a031/1tTts9CdeyQ",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412507",
"title": "Anime Sketch Colorization by Component-based Matching using Deep Appearance Features and Graph Representation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412507/1tmhNNPsnrG",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552208",
"title": "Visual Arrangements of Bar Charts Influence Comparisons in Viewer Takeaways",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552208/1xibWU97C8w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09552449",
"articleId": "1xic65iQBoY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552930",
"articleId": "1xic4JnxG2k",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1zBaBdOFJo4",
"name": "ttg202201-09585700s1-supp1-3114856.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202201-09585700s1-supp1-3114856.mp4",
"extension": "mp4",
"size": "17.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwFid7w",
"title": "Jan.",
"year": "2019",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "17D45XDIXRv",
"doi": "10.1109/TVCG.2018.2865142",
"abstract": "We present the results of two perception studies to assess how quickly people can perform a simple data comparison task for small-scale visualizations on a smartwatch. The main goal of these studies is to extend our understanding of design constraints for smartwatch visualizations. Previous work has shown that a vast majority of smartwatch interactions last under 5 s. It is still unknown what people can actually perceive from visualizations during such short glances, in particular with such a limited display space of smartwatches. To shed light on this question, we conducted two perception studies that assessed the lower bounds of task time for a simple data comparison task. We tested three chart types common on smartwatches: bar charts, donut charts, and radial bar charts with three different data sizes: 7, 12, and 24 data values. In our first study, we controlled the differences of the two target bars to be compared, while the second study varied the difference randomly. For both studies, we found that participants performed the task on average in <;300 ms for the bar chart, <;220 ms for the donut chart, and in <; 1780 ms for the radial bar chart. Thresholds in the second study per chart type were on average 1.14-1.35× higher than in the first study. Our results show that bar and donut charts should be preferred on smartwatch displays when quick data comparisons are necessary.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present the results of two perception studies to assess how quickly people can perform a simple data comparison task for small-scale visualizations on a smartwatch. The main goal of these studies is to extend our understanding of design constraints for smartwatch visualizations. Previous work has shown that a vast majority of smartwatch interactions last under 5 s. It is still unknown what people can actually perceive from visualizations during such short glances, in particular with such a limited display space of smartwatches. To shed light on this question, we conducted two perception studies that assessed the lower bounds of task time for a simple data comparison task. We tested three chart types common on smartwatches: bar charts, donut charts, and radial bar charts with three different data sizes: 7, 12, and 24 data values. In our first study, we controlled the differences of the two target bars to be compared, while the second study varied the difference randomly. For both studies, we found that participants performed the task on average in <;300 ms for the bar chart, <;220 ms for the donut chart, and in <; 1780 ms for the radial bar chart. Thresholds in the second study per chart type were on average 1.14-1.35× higher than in the first study. Our results show that bar and donut charts should be preferred on smartwatch displays when quick data comparisons are necessary.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present the results of two perception studies to assess how quickly people can perform a simple data comparison task for small-scale visualizations on a smartwatch. The main goal of these studies is to extend our understanding of design constraints for smartwatch visualizations. Previous work has shown that a vast majority of smartwatch interactions last under 5 s. It is still unknown what people can actually perceive from visualizations during such short glances, in particular with such a limited display space of smartwatches. To shed light on this question, we conducted two perception studies that assessed the lower bounds of task time for a simple data comparison task. We tested three chart types common on smartwatches: bar charts, donut charts, and radial bar charts with three different data sizes: 7, 12, and 24 data values. In our first study, we controlled the differences of the two target bars to be compared, while the second study varied the difference randomly. For both studies, we found that participants performed the task on average in <;300 ms for the bar chart, <;220 ms for the donut chart, and in <; 1780 ms for the radial bar chart. Thresholds in the second study per chart type were on average 1.14-1.35× higher than in the first study. Our results show that bar and donut charts should be preferred on smartwatch displays when quick data comparisons are necessary.",
"title": "Glanceable Visualization: Studies of Data Comparison Performance on Smartwatches",
"normalizedTitle": "Glanceable Visualization: Studies of Data Comparison Performance on Smartwatches",
"fno": "08443125",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computer Displays",
"Data Visualisation",
"Human Computer Interaction",
"Mobile Computing",
"User Interfaces",
"Visual Perception",
"Donut Chart",
"Radial Bar Chart",
"Chart Type",
"Smartwatch Displays",
"Glanceable Visualization",
"Data Comparison Performance",
"Perception Studies",
"Small Scale Visualizations",
"Smartwatch Interactions",
"Data Sizes",
"Smartwatch Visualization Design Constraints",
"Time 5 0 S",
"Data Visualization",
"Bars",
"Task Analysis",
"Visualization",
"Human Computer Interaction",
"Indexes",
"Glanceable Visualization",
"Smartwatch",
"Perception",
"Quantitative Evaluation",
"Data Comparison"
],
"authors": [
{
"givenName": "Tanja",
"surname": "Blascheck",
"fullName": "Tanja Blascheck",
"affiliation": "Inria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lonni",
"surname": "Besançon",
"fullName": "Lonni Besançon",
"affiliation": "Université Paris Saclay",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anastasia",
"surname": "Bezerianos",
"fullName": "Anastasia Bezerianos",
"affiliation": "Université Paris SudInriaCNRSUniversité Paris Saclay",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bongshin",
"surname": "Lee",
"fullName": "Bongshin Lee",
"affiliation": "Microsoft Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Petra",
"surname": "Isenberg",
"fullName": "Petra Isenberg",
"affiliation": "Inria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2019-01-01 00:00:00",
"pubType": "trans",
"pages": "630-640",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2018/6420/0/642000f648",
"title": "DVQA: Understanding Data Visualizations via Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f648/17D45WZZ7EU",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/10/09802784",
"title": "Visualization in Motion: A Research Agenda and Two Evaluations",
"doi": null,
"abstractUrl": "/journal/tg/2022/10/09802784/1Eo1xk9vKuI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904487",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807238",
"title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807238/1cG66qf6MKs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a163",
"title": "Proposal and Evaluation of Textual Description Templates for Bar Charts Vocalization",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a163/1cMFc4aDtWo",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a151",
"title": "The Cost of Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933718",
"title": "Visual Cues in Estimation of Part-To-Whole Comparisons",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933718/1fTgJRMhWoM",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412144",
"title": "Hybrid Cascade Point Search Network for High Precision Bar Chart Component Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412144/1tmhwCCzbUY",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552227",
"title": "Rotate or Wrap? Interactive Visualisations of Cyclical Data on Cylindrical or Toroidal Topologies",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552227/1xibX4wTR8Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552881",
"title": "Modeling Just Noticeable Differences in Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552881/1xibXzMLm9i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08440812",
"articleId": "17D45WZZ7E3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08440806",
"articleId": "17D45We0UEy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet37",
"name": "ttg201901-08443125s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201901-08443125s1.zip",
"extension": "zip",
"size": "3.11 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1zBamVZHyne",
"title": "Jan.",
"year": "2022",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1xibXzMLm9i",
"doi": "10.1109/TVCG.2021.3114874",
"abstract": "One of the fundamental tasks in visualization is to compare two or more visual elements. However, it is often difficult to visually differentiate graphical elements encoding a small difference in value, such as the heights of similar bars in bar chart or angles of similar sections in pie chart. Perceptual laws can be used in order to model when and how we perceive this difference. In this work, we model the perception of Just Noticeable Differences (JNDs), the minimum difference in visual attributes that allow faithfully comparing similar elements, in charts. Specifically, we explore the relation between JNDs and two major visual variables: the intensity of visual elements and the distance between them, and study it in three charts: bar chart, pie chart and bubble chart. Through an empirical study, we identify main effects on JND for distance in bar charts, intensity in pie charts, and both distance and intensity in bubble charts. By fitting a linear mixed effects model, we model JND and find that JND grows as the exponential function of variables. We highlight several usage scenarios that make use of the JND modeling in which elements below the fitted JND are detected and enhanced with secondary visual cues for better discrimination.",
"abstracts": [
{
"abstractType": "Regular",
"content": "One of the fundamental tasks in visualization is to compare two or more visual elements. However, it is often difficult to visually differentiate graphical elements encoding a small difference in value, such as the heights of similar bars in bar chart or angles of similar sections in pie chart. Perceptual laws can be used in order to model when and how we perceive this difference. In this work, we model the perception of Just Noticeable Differences (JNDs), the minimum difference in visual attributes that allow faithfully comparing similar elements, in charts. Specifically, we explore the relation between JNDs and two major visual variables: the intensity of visual elements and the distance between them, and study it in three charts: bar chart, pie chart and bubble chart. Through an empirical study, we identify main effects on JND for distance in bar charts, intensity in pie charts, and both distance and intensity in bubble charts. By fitting a linear mixed effects model, we model JND and find that JND grows as the exponential function of variables. We highlight several usage scenarios that make use of the JND modeling in which elements below the fitted JND are detected and enhanced with secondary visual cues for better discrimination.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "One of the fundamental tasks in visualization is to compare two or more visual elements. However, it is often difficult to visually differentiate graphical elements encoding a small difference in value, such as the heights of similar bars in bar chart or angles of similar sections in pie chart. Perceptual laws can be used in order to model when and how we perceive this difference. In this work, we model the perception of Just Noticeable Differences (JNDs), the minimum difference in visual attributes that allow faithfully comparing similar elements, in charts. Specifically, we explore the relation between JNDs and two major visual variables: the intensity of visual elements and the distance between them, and study it in three charts: bar chart, pie chart and bubble chart. Through an empirical study, we identify main effects on JND for distance in bar charts, intensity in pie charts, and both distance and intensity in bubble charts. By fitting a linear mixed effects model, we model JND and find that JND grows as the exponential function of variables. We highlight several usage scenarios that make use of the JND modeling in which elements below the fitted JND are detected and enhanced with secondary visual cues for better discrimination.",
"title": "Modeling Just Noticeable Differences in Charts",
"normalizedTitle": "Modeling Just Noticeable Differences in Charts",
"fno": "09552881",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Bars",
"Task Analysis",
"Fans",
"Correlation",
"Computational Modeling",
"Three Dimensional Displays",
"Visual Perception",
"Charts",
"Just Noticeable Difference",
"Modeling"
],
"authors": [
{
"givenName": "Min",
"surname": "Lu",
"fullName": "Min Lu",
"affiliation": "Shenzhen University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joel",
"surname": "Lanir",
"fullName": "Joel Lanir",
"affiliation": "The University of Haifa, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chufeng",
"surname": "Wang",
"fullName": "Chufeng Wang",
"affiliation": "Shenzhen University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yucong",
"surname": "Yao",
"fullName": "Yucong Yao",
"affiliation": "Shenzhen University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wen",
"surname": "Zhang",
"fullName": "Wen Zhang",
"affiliation": "Shenzhen University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Oliver",
"surname": "Deussen",
"fullName": "Oliver Deussen",
"affiliation": "University of Konstanz, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hui",
"surname": "Huang",
"fullName": "Hui Huang",
"affiliation": "Shenzhen University, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "718-726",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/fie/2014/3922/0/07043983",
"title": "Your data deserve better than pies and bars: An R graphics workshop for the timid",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07043983/12OmNz3bdDC",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07352354",
"title": "Just Noticeable Distortion Profile for Flat-Shaded 3D Mesh Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07352354/13rRUwhHcJn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07845717",
"title": "Converting Basic D3 Charts into Reusable Style Templates",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07845717/13rRUxYINfm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904487",
"title": "Studying Early Decision Making with Progressive Bar Charts",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904487/1H1geE4olvG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2022/9007/0/900700a067",
"title": "An Overview of the Design and Development for Dynamic and Physical Bar Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2022/900700a067/1KaH61BvDWw",
"parentPublication": {
"id": "proceedings/iv/2022/9007/0",
"title": "2022 26th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807238",
"title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807238/1cG66qf6MKs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a151",
"title": "The Cost of Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a151/1cMFcqwGM5q",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933547",
"title": "Evidence for Area as the Primary Visual Cue in Pie Charts",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933547/1fTgFhkepQk",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552208",
"title": "Visual Arrangements of Bar Charts Influence Comparisons in Viewer Takeaways",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552208/1xibWU97C8w",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09585700",
"title": "A Mixed-Initiative Approach to Reusing Infographic Charts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09585700/1y11cGSPuPC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09557878",
"articleId": "1xquNQMVFCM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09552227",
"articleId": "1xibX4wTR8Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNx8fieY",
"title": "Aug.",
"year": "2016",
"issueNum": "08",
"idPrefix": "tk",
"pubType": "journal",
"volume": "28",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwfZC0E",
"doi": "10.1109/TKDE.2016.2557324",
"abstract": "Visualization provides a powerful means for data analysis. But to be practical, visual analytics tools must support smooth and flexible use of visualizations at a fast rate. This becomes increasingly onerous with the ever-increasing size of real-world datasets. First, large databases make interaction more difficult once query response time exceeds several seconds. Second, any attempt to show all data points will overload the visualization, resulting in chaos that will only confuse the user. Over the last few years, substantial effort has been put into addressing both of these issues and many innovative solutions have been proposed. Indeed, data visualization is a topic that is too large to be addressed in a single survey paper. Thus, we restrict our attention here to interactive visualization of large data sets. Our focus then is skewed in a natural way towards query processing problem—provided by an underlying database system—rather than to the actual data visualization problem.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualization provides a powerful means for data analysis. But to be practical, visual analytics tools must support smooth and flexible use of visualizations at a fast rate. This becomes increasingly onerous with the ever-increasing size of real-world datasets. First, large databases make interaction more difficult once query response time exceeds several seconds. Second, any attempt to show all data points will overload the visualization, resulting in chaos that will only confuse the user. Over the last few years, substantial effort has been put into addressing both of these issues and many innovative solutions have been proposed. Indeed, data visualization is a topic that is too large to be addressed in a single survey paper. Thus, we restrict our attention here to interactive visualization of large data sets. Our focus then is skewed in a natural way towards query processing problem—provided by an underlying database system—rather than to the actual data visualization problem.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualization provides a powerful means for data analysis. But to be practical, visual analytics tools must support smooth and flexible use of visualizations at a fast rate. This becomes increasingly onerous with the ever-increasing size of real-world datasets. First, large databases make interaction more difficult once query response time exceeds several seconds. Second, any attempt to show all data points will overload the visualization, resulting in chaos that will only confuse the user. Over the last few years, substantial effort has been put into addressing both of these issues and many innovative solutions have been proposed. Indeed, data visualization is a topic that is too large to be addressed in a single survey paper. Thus, we restrict our attention here to interactive visualization of large data sets. Our focus then is skewed in a natural way towards query processing problem—provided by an underlying database system—rather than to the actual data visualization problem.",
"title": "Interactive Visualization of Large Data Sets",
"normalizedTitle": "Interactive Visualization of Large Data Sets",
"fno": "07457691",
"hasPdf": true,
"idPrefix": "tk",
"keywords": [
"Data Visualization",
"Visualization",
"Data Analysis",
"Scalability",
"Visual Databases",
"Big Data",
"Database Visualization",
"Data Aggregation",
"Data Indexing",
"Interactive Visualization"
],
"authors": [
{
"givenName": "Parke",
"surname": "Godfrey",
"fullName": "Parke Godfrey",
"affiliation": "Department of Electrical Engineering and Computer Science, York University, Toronto, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jarek",
"surname": "Gryz",
"fullName": "Jarek Gryz",
"affiliation": "Department of Electrical Engineering and Computer Science, York University, Toronto, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Piotr",
"surname": "Lasek",
"fullName": "Piotr Lasek",
"affiliation": "York University, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2016-08-01 00:00:00",
"pubType": "trans",
"pages": "2142-2157",
"year": "2016",
"issn": "1041-4347",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2005/2766/0/27660022",
"title": "Query-Driven Visualization of Large Data Sets",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660022/12OmNvAAtDg",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2016/2020/0/07498287",
"title": "Visualization-aware sampling for very large databases",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2016/07498287/12OmNx5GTXU",
"parentPublication": {
"id": "proceedings/icde/2016/2020/0",
"title": "2016 IEEE 32nd International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2907/0/04301413",
"title": "Glyph-based Overviews of Large Datasets in Structural Bioinformatics",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/04301413/12OmNxUdv9T",
"parentPublication": {
"id": "proceedings/iv/2007/2907/0",
"title": "11th International Conference Information Visualization - Supplements (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532792",
"title": "Query-driven visualization of large data sets",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532792/12OmNzYNN5Q",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/05/mco2013050058",
"title": "Improving Healthcare with Interactive Visualization",
"doi": null,
"abstractUrl": "/magazine/co/2013/05/mco2013050058/13rRUwgyOgw",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/03/v0494",
"title": "Value and Relation Display: Interactive Visual Exploration of Large Data Sets with Hundreds of Dimensions",
"doi": null,
"abstractUrl": "/journal/tg/2007/03/v0494/13rRUyYSWsK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2019/7474/0/747400b594",
"title": "AID: An Adaptive Image Data Index for Interactive Multilevel Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2019/747400b594/1aDT05zEpAk",
"parentPublication": {
"id": "proceedings/icde/2019/7474/0",
"title": "2019 IEEE 35th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09217931",
"title": "A Structured Review of Data Management Technology for Interactive Visualization and Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09217931/1nL7qZXi89O",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400c361",
"title": "VTGeo: A Visualization Tool for Geospatial Data",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400c361/1tzz9WNDCxi",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552893",
"title": "DIEL: Interactive Visualization Beyond the Here and Now",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552893/1xic1S53KPS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07448405",
"articleId": "13rRUxZ0o1Z",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07454756",
"articleId": "13rRUxAAT85",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvGPE8n",
"title": "Jan.",
"year": "2016",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0gev9",
"doi": "10.1109/TVCG.2015.2467091",
"abstract": "We present Reactive Vega, a system architecture that provides the first robust and comprehensive treatment of declarative visual and interaction design for data visualization. Starting from a single declarative specification, Reactive Vega constructs a dataflow graph in which input data, scene graph elements, and interaction events are all treated as first-class streaming data sources. To support expressive interactive visualizations that may involve time-varying scalar, relational, or hierarchical data, Reactive Vega's dataflow graph can dynamically re-write itself at runtime by extending or pruning branches in a data-driven fashion. We discuss both compile- and run-time optimizations applied within Reactive Vega, and share the results of benchmark studies that indicate superior interactive performance to both D3 and the original, non-reactive Vega system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present Reactive Vega, a system architecture that provides the first robust and comprehensive treatment of declarative visual and interaction design for data visualization. Starting from a single declarative specification, Reactive Vega constructs a dataflow graph in which input data, scene graph elements, and interaction events are all treated as first-class streaming data sources. To support expressive interactive visualizations that may involve time-varying scalar, relational, or hierarchical data, Reactive Vega's dataflow graph can dynamically re-write itself at runtime by extending or pruning branches in a data-driven fashion. We discuss both compile- and run-time optimizations applied within Reactive Vega, and share the results of benchmark studies that indicate superior interactive performance to both D3 and the original, non-reactive Vega system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present Reactive Vega, a system architecture that provides the first robust and comprehensive treatment of declarative visual and interaction design for data visualization. Starting from a single declarative specification, Reactive Vega constructs a dataflow graph in which input data, scene graph elements, and interaction events are all treated as first-class streaming data sources. To support expressive interactive visualizations that may involve time-varying scalar, relational, or hierarchical data, Reactive Vega's dataflow graph can dynamically re-write itself at runtime by extending or pruning branches in a data-driven fashion. We discuss both compile- and run-time optimizations applied within Reactive Vega, and share the results of benchmark studies that indicate superior interactive performance to both D3 and the original, non-reactive Vega system.",
"title": "Reactive Vega: A Streaming Dataflow Architecture for Declarative Interactive Visualization",
"normalizedTitle": "Reactive Vega: A Streaming Dataflow Architecture for Declarative Interactive Visualization",
"fno": "07192704",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visualization",
"Data Models",
"Encoding",
"Indexes",
"Runtime",
"Computer Architecture",
"Streaming Data",
"Information Visualization",
"Systems",
"Toolkits",
"Declarative Specification",
"Optimization",
"Interaction",
"Streaming Data",
"Information Visualization",
"Systems",
"Toolkits",
"Declarative Specification",
"Optimization",
"Interaction"
],
"authors": [
{
"givenName": "Arvind",
"surname": "Satyanarayan",
"fullName": "Arvind Satyanarayan",
"affiliation": ", Stanford University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ryan",
"surname": "Russell",
"fullName": "Ryan Russell",
"affiliation": ", University of Washington",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jane",
"surname": "Hoffswell",
"fullName": "Jane Hoffswell",
"affiliation": ", University of Washington",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jeffrey",
"surname": "Heer",
"fullName": "Jeffrey Heer",
"affiliation": ", University of Washington",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2016-01-01 00:00:00",
"pubType": "trans",
"pages": "659-668",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dfm/2012/4954/0/4954a018",
"title": "DFScala: High Level Dataflow Support for Scala",
"doi": null,
"abstractUrl": "/proceedings-article/dfm/2012/4954a018/12OmNBtl1qa",
"parentPublication": {
"id": "proceedings/dfm/2012/4954/0",
"title": "2012 Data-Flow Execution Models for Extreme Scale Computing (DFM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1993/3970/0/00269556",
"title": "Control flow in a dataflow language",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1993/00269556/12OmNrEL2Av",
"parentPublication": {
"id": "proceedings/vl/1993/3970/0",
"title": "Proceedings 1993 IEEE Symposium on Visual Languages",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dfm/2014/8095/0/8095a038",
"title": "DFGR an Intermediate Graph Representation for Macro-Dataflow Programs",
"doi": null,
"abstractUrl": "/proceedings-article/dfm/2014/8095a038/12OmNrNh0QL",
"parentPublication": {
"id": "proceedings/dfm/2014/8095/0",
"title": "2014 Fourth Workshop on Data-Flow Execution Models for Extreme Scale Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wicsa/2004/2172/0/21720211",
"title": "Reactive Types for Dataflow-Oriented Software Architectures",
"doi": null,
"abstractUrl": "/proceedings-article/wicsa/2004/21720211/12OmNzGlRGG",
"parentPublication": {
"id": "proceedings/wicsa/2004/2172/0",
"title": "Software Architecture, Working IEEE/IFIP Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539624",
"title": "Vega-Lite: A Grammar of Interactive Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539624/13rRUIJuxvn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440831",
"title": "<italic>KnowledgePearls</italic>: Provenance-Based Visualization Retrieval",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440831/17D45Wc1ILJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440080",
"title": "Design Exposition with Literate Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440080/17D45XoXP4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09914804",
"title": "Animated Vega-Lite: Unifying Animation with a Grammar of Interactive Graphics",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09914804/1Hmgc5h7Clq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a347",
"title": "Parametric Design of Visual Simulation Based on Vega Prime",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a347/1ap5C07iOdO",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2020/8468/0/846800a001",
"title": "DIVA: A Declarative and Reactive Language for in situ Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2020/846800a001/1pZ0TBE8Yjm",
"parentPublication": {
"id": "proceedings/ldav/2020/8468/0",
"title": "2020 IEEE 10th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07192728",
"articleId": "13rRUILLkDU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07192658",
"articleId": "13rRUxBrGh3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgIx",
"name": "ttg201601-07192704s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201601-07192704s1.zip",
"extension": "zip",
"size": "5.97 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvsDHDY",
"title": "Jan.",
"year": "2020",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1cHE2tYwF7a",
"doi": "10.1109/TVCG.2019.2934537",
"abstract": "We present P5, a web-based visualization toolkit that combines declarative visualization grammar and GPU computing for progressive data analysis and visualization. To interactively analyze and explore big data, progressive analytics and visualization methods have recently emerged. Progressive visualizations of incrementally refining results have the advantages of allowing users to steer the analysis process and make early decisions. P5 leverages declarative grammar for specifying visualization designs and exploits GPU computing to accelerate progressive data processing and rendering. The declarative specifications can be modified during progressive processing to create different visualizations for analyzing the intermediate results. To enable user interactions for progressive data analysis, P5 utilizes the GPU to automatically aggregate and index data based on declarative interaction specifications to facilitate effective interactive visualization. We demonstrate the effectiveness and usefulness of P5 through a variety of example applications and several performance benchmark tests.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present P5, a web-based visualization toolkit that combines declarative visualization grammar and GPU computing for progressive data analysis and visualization. To interactively analyze and explore big data, progressive analytics and visualization methods have recently emerged. Progressive visualizations of incrementally refining results have the advantages of allowing users to steer the analysis process and make early decisions. P5 leverages declarative grammar for specifying visualization designs and exploits GPU computing to accelerate progressive data processing and rendering. The declarative specifications can be modified during progressive processing to create different visualizations for analyzing the intermediate results. To enable user interactions for progressive data analysis, P5 utilizes the GPU to automatically aggregate and index data based on declarative interaction specifications to facilitate effective interactive visualization. We demonstrate the effectiveness and usefulness of P5 through a variety of example applications and several performance benchmark tests.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present P5, a web-based visualization toolkit that combines declarative visualization grammar and GPU computing for progressive data analysis and visualization. To interactively analyze and explore big data, progressive analytics and visualization methods have recently emerged. Progressive visualizations of incrementally refining results have the advantages of allowing users to steer the analysis process and make early decisions. P5 leverages declarative grammar for specifying visualization designs and exploits GPU computing to accelerate progressive data processing and rendering. The declarative specifications can be modified during progressive processing to create different visualizations for analyzing the intermediate results. To enable user interactions for progressive data analysis, P5 utilizes the GPU to automatically aggregate and index data based on declarative interaction specifications to facilitate effective interactive visualization. We demonstrate the effectiveness and usefulness of P5 through a variety of example applications and several performance benchmark tests.",
"title": "P5: Portable Progressive Parallel Processing Pipelines for Interactive Data Analysis and Visualization",
"normalizedTitle": "P5: Portable Progressive Parallel Processing Pipelines for Interactive Data Analysis and Visualization",
"fno": "08809730",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Big Data",
"Data Analysis",
"Data Visualisation",
"Formal Specification",
"Interactive Systems",
"Internet",
"Parallel Processing",
"Rendering Computer Graphics",
"Portable Progressive Parallel Processing Pipelines",
"Interactive Data Analysis",
"Declarative Visualization Grammar",
"GPU Computing",
"Progressive Data Analysis",
"Big Data",
"Rendering",
"Declarative Interaction Specifications",
"Interactive Visualization",
"Web Based Visualization Toolkit",
"P 5 Toolkit",
"Progressive Data Visualization",
"Data Visualization",
"Graphics Processing Units",
"Parallel Processing",
"Data Analysis",
"Grammar",
"Libraries",
"Big Data",
"Information Visualization",
"Progressive Analytics",
"Visualization Software",
"GPU Computing",
"Data Exploration"
],
"authors": [
{
"givenName": "Jianping Kelvin",
"surname": "Li",
"fullName": "Jianping Kelvin Li",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2020-01-01 00:00:00",
"pubType": "trans",
"pages": "1151-1160",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdpsw/2016/3682/0/3682b038",
"title": "Coupling LAMMPS and the vl3 Framework for Co-Visualization of Atomistic Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/ipdpsw/2016/3682b038/12OmNBa2iCC",
"parentPublication": {
"id": "proceedings/ipdpsw/2016/3682/0",
"title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a260",
"title": "Linking Visualization and Scientific Understanding through Interactive Rendering of Large-Scale Data in Parallel Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a260/12OmNyRPgJQ",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/03/08468065",
"title": "P4: Portable Parallel Processing Pipelines for Interactive Information Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2020/03/08468065/13HFz2XZAUp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/05/mcg2013050026",
"title": "Fast, Progressive Loading of Binary-Encoded Declarative-3D Web Content",
"doi": null,
"abstractUrl": "/magazine/cg/2013/05/mcg2013050026/13rRUwwJWBk",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875936",
"title": "Interactive Progressive Visualization with Space-Time Error Control",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875936/13rRUxZRbo1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09079657",
"title": "Distributed Interactive Visualization Using GPU-Optimized Spark",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09079657/1jmVbp8XqZa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101184",
"title": "Interactive Cleaning for Progressive Visualization through Composite Questions",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101184/1kaMEgRXmlq",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2020/8468/0/846800a027",
"title": "Interactive Visualization of Terascale Data in the Browser: Fact or Fiction?",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2020/846800a027/1pZ0TITco5q",
"parentPublication": {
"id": "proceedings/ldav/2020/8468/0",
"title": "2020 IEEE 10th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09320596",
"title": "Impact of Cognitive Biases on Progressive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09320596/1qkwDU8VvW0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552893",
"title": "DIEL: Interactive Visualization Beyond the Here and Now",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552893/1xic1S53KPS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08809833",
"articleId": "1cHEinBcEAo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08807303",
"articleId": "1cG6axGog24",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1qL5hsvvVkc",
"title": "Feb.",
"year": "2021",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1olDLxl43Qc",
"doi": "10.1109/TVCG.2020.3030453",
"abstract": "We present P6, a declarative language for building high performance visual analytics systems through its support for specifying and integrating machine learning and interactive visualization methods. As data analysis methods based on machine learning and artificial intelligence continue to advance, a visual analytics solution can leverage these methods for better exploiting large and complex data. However, integrating machine learning methods with interactive visual analysis is challenging. Existing declarative programming libraries and toolkits for visualization lack support for coupling machine learning methods. By providing a declarative language for visual analytics, P6 can empower more developers to create visual analytics applications that combine machine learning and visualization methods for data analysis and problem solving. Through a variety of example applications, we demonstrate P6's capabilities and show the benefits of using declarative specifications to build visual analytics systems. We also identify and discuss the research opportunities and challenges for declarative visual analytics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present P6, a declarative language for building high performance visual analytics systems through its support for specifying and integrating machine learning and interactive visualization methods. As data analysis methods based on machine learning and artificial intelligence continue to advance, a visual analytics solution can leverage these methods for better exploiting large and complex data. However, integrating machine learning methods with interactive visual analysis is challenging. Existing declarative programming libraries and toolkits for visualization lack support for coupling machine learning methods. By providing a declarative language for visual analytics, P6 can empower more developers to create visual analytics applications that combine machine learning and visualization methods for data analysis and problem solving. Through a variety of example applications, we demonstrate P6's capabilities and show the benefits of using declarative specifications to build visual analytics systems. We also identify and discuss the research opportunities and challenges for declarative visual analytics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present P6, a declarative language for building high performance visual analytics systems through its support for specifying and integrating machine learning and interactive visualization methods. As data analysis methods based on machine learning and artificial intelligence continue to advance, a visual analytics solution can leverage these methods for better exploiting large and complex data. However, integrating machine learning methods with interactive visual analysis is challenging. Existing declarative programming libraries and toolkits for visualization lack support for coupling machine learning methods. By providing a declarative language for visual analytics, P6 can empower more developers to create visual analytics applications that combine machine learning and visualization methods for data analysis and problem solving. Through a variety of example applications, we demonstrate P6's capabilities and show the benefits of using declarative specifications to build visual analytics systems. We also identify and discuss the research opportunities and challenges for declarative visual analytics.",
"title": "P6: A Declarative Language for Integrating Machine Learning in Visual Analytics",
"normalizedTitle": "P6: A Declarative Language for Integrating Machine Learning in Visual Analytics",
"fno": "09246282",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Learning Artificial Intelligence",
"P 6",
"Declarative Language",
"High Performance Visual Analytics Systems",
"Interactive Visualization Methods",
"Data Analysis Methods",
"Machine Learning",
"Complex Data",
"Interactive Visual Analysis",
"Declarative Programming Libraries",
"Coupling Machine",
"Visual Analytics Applications",
"Declarative Specifications",
"Declarative Visual Analytics",
"Visual Analytics",
"Machine Learning",
"Data Visualization",
"Libraries",
"Python",
"Data Analysis",
"Visual Analytics",
"Interactive Visualization",
"Machine Learning",
"Toolkits",
"Declarative Specification"
],
"authors": [
{
"givenName": "Jianping Kelvin",
"surname": "Li",
"fullName": "Jianping Kelvin Li",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2021-02-01 00:00:00",
"pubType": "trans",
"pages": "380-389",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400514",
"title": "Big data exploration through visual analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400514/12OmNC3XhwY",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl/1995/7045/0/70450126",
"title": "Integrating algorithm animation into a declarative visual programming language",
"doi": null,
"abstractUrl": "/proceedings-article/vl/1995/70450126/12OmNwErpKe",
"parentPublication": {
"id": "proceedings/vl/1995/7045/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc/2016/4297/0/07828556",
"title": "Agile Visual Analytics in Data Science Systems",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc/2016/07828556/12OmNyPQ4Qw",
"parentPublication": {
"id": "proceedings/hpcc/2016/4297/0",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2013/4796/0/06781845",
"title": "KnowYourColors: Visual dashboards for blood metrics and healthcare analytics",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2013/06781845/12OmNzlly1J",
"parentPublication": {
"id": "proceedings/isspit/2013/4796/0",
"title": "2013 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2017/0831/0/0831a281",
"title": "Visual Analytics Solutions as ‘off-the-Shelf’ Libraries",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2017/0831a281/12OmNzmLxRg",
"parentPublication": {
"id": "proceedings/iv/2017/0831/0",
"title": "2017 21st International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/08/mco2013080090",
"title": "Bixplorer: Visual Analytics with Biclusters",
"doi": null,
"abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/01/mcg2017010100",
"title": "Sampling for Scalable Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2017/01/mcg2017010100/13rRUxBa5pi",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/07/mco2013070030",
"title": "Visual Analytics Support for Intelligence Analysis",
"doi": null,
"abstractUrl": "/magazine/co/2013/07/mco2013070030/13rRUxD9h0P",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/02/mcg2009020084",
"title": "Demystifying Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2009/02/mcg2009020084/13rRUy3gn3z",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2020/9134/0/913400a360",
"title": "Comparison of Full-text Articles and Abstracts for Visual Trend Analytics through Natural Language Processing",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2020/913400a360/1rSRdbyf9bG",
"parentPublication": {
"id": "proceedings/iv/2020/9134/0",
"title": "2020 24th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09222342",
"articleId": "1nTqOo5NR3G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09222086",
"articleId": "1nTrpup4LZe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAle6QG",
"title": "March",
"year": "2014",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIIVlcJ",
"doi": "10.1109/TVCG.2013.242",
"abstract": "Internet users are very familiar with the results of a search query displayed as a ranked list of snippets. Each textual snippet shows a content summary of the referred document (or webpage) and a link to it. This display has many advantages, for example, it affords easy navigation and is straightforward to interpret. Nonetheless, any user of search engines could possibly report some experience of disappointment with this metaphor. Indeed, it has limitations in particular situations, as it fails to provide an overview of the document collection retrieved. Moreover, depending on the nature of the query for example, it may be too general, or ambiguous, or ill expressed the desired information may be poorly ranked, or results may contemplate varied topics. Several search tasks would be easier if users were shown an overview of the returned documents, organized so as to reflect how related they are, content wise. We propose a visualization technique to display the results of web queries aimed at overcoming such limitations. It combines the neighborhood preservation capability of multidimensional projections with the familiar snippet-based representation by employing a multidimensional projection to derive two-dimensional layouts of the query search results that preserve text similarity relations, or neighborhoods. Similarity is computed by applying the cosine similarity over a \"bag-of-wordsâ' vector representation of collection built from the snippets. If the snippets are displayed directly according to the derived layout, they will overlap considerably, producing a poor visualization. We overcome this problem by defining an energy functional that considers both the overlapping among snippets and the preservation of the neighborhood structure as given in the projected layout. Minimizing this energy functional provides a neighborhood preserving two-dimensional arrangement of the textual snippets with minimum overlap. The resulting visualization conveys both a global view of the query results and visual groupings that reflect related results, as illustrated in several examples shown.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Internet users are very familiar with the results of a search query displayed as a ranked list of snippets. Each textual snippet shows a content summary of the referred document (or webpage) and a link to it. This display has many advantages, for example, it affords easy navigation and is straightforward to interpret. Nonetheless, any user of search engines could possibly report some experience of disappointment with this metaphor. Indeed, it has limitations in particular situations, as it fails to provide an overview of the document collection retrieved. Moreover, depending on the nature of the query for example, it may be too general, or ambiguous, or ill expressed the desired information may be poorly ranked, or results may contemplate varied topics. Several search tasks would be easier if users were shown an overview of the returned documents, organized so as to reflect how related they are, content wise. We propose a visualization technique to display the results of web queries aimed at overcoming such limitations. It combines the neighborhood preservation capability of multidimensional projections with the familiar snippet-based representation by employing a multidimensional projection to derive two-dimensional layouts of the query search results that preserve text similarity relations, or neighborhoods. Similarity is computed by applying the cosine similarity over a \"bag-of-wordsâ' vector representation of collection built from the snippets. If the snippets are displayed directly according to the derived layout, they will overlap considerably, producing a poor visualization. We overcome this problem by defining an energy functional that considers both the overlapping among snippets and the preservation of the neighborhood structure as given in the projected layout. Minimizing this energy functional provides a neighborhood preserving two-dimensional arrangement of the textual snippets with minimum overlap. The resulting visualization conveys both a global view of the query results and visual groupings that reflect related results, as illustrated in several examples shown.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Internet users are very familiar with the results of a search query displayed as a ranked list of snippets. Each textual snippet shows a content summary of the referred document (or webpage) and a link to it. This display has many advantages, for example, it affords easy navigation and is straightforward to interpret. Nonetheless, any user of search engines could possibly report some experience of disappointment with this metaphor. Indeed, it has limitations in particular situations, as it fails to provide an overview of the document collection retrieved. Moreover, depending on the nature of the query for example, it may be too general, or ambiguous, or ill expressed the desired information may be poorly ranked, or results may contemplate varied topics. Several search tasks would be easier if users were shown an overview of the returned documents, organized so as to reflect how related they are, content wise. We propose a visualization technique to display the results of web queries aimed at overcoming such limitations. It combines the neighborhood preservation capability of multidimensional projections with the familiar snippet-based representation by employing a multidimensional projection to derive two-dimensional layouts of the query search results that preserve text similarity relations, or neighborhoods. Similarity is computed by applying the cosine similarity over a \"bag-of-wordsâ' vector representation of collection built from the snippets. If the snippets are displayed directly according to the derived layout, they will overlap considerably, producing a poor visualization. We overcome this problem by defining an energy functional that considers both the overlapping among snippets and the preservation of the neighborhood structure as given in the projected layout. Minimizing this energy functional provides a neighborhood preserving two-dimensional arrangement of the textual snippets with minimum overlap. The resulting visualization conveys both a global view of the query results and visual groupings that reflect related results, as illustrated in several examples shown.",
"title": "Similarity Preserving Snippet-Based Visualization of Web Search Results",
"normalizedTitle": "Similarity Preserving Snippet-Based Visualization of Web Search Results",
"fno": "ttg2014030457",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Layout",
"Search Engines",
"Vectors",
"Optimization",
"Web Pages",
"Navigation",
"Web Search Visualization",
"Multidimensional Projection"
],
"authors": [
{
"givenName": "Erick",
"surname": "Gomez-Nieto",
"fullName": "Erick Gomez-Nieto",
"affiliation": "Inst. de Cienc. Mat. e de Comput. (ICMC), Univ. de Sao Paulo, Sao Carlos, Brazil and San Pablo Catholic University, Peru",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frizzi",
"surname": "San Roman",
"fullName": "Frizzi San Roman",
"affiliation": "Inst. de Cienc. Mat. e de Comput. (ICMC), Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paulo",
"surname": "Pagliosa",
"fullName": "Paulo Pagliosa",
"affiliation": "Fac. de Comput., Univ. Fed. de Mato Grosso do Sul, Campo Grande, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wallace",
"surname": "Casaca",
"fullName": "Wallace Casaca",
"affiliation": "Inst. de Cienc. Mat. e de Comput. (ICMC), Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elias S.",
"surname": "Helou",
"fullName": "Elias S. Helou",
"affiliation": "Inst. de Cienc. Mat. e de Comput. (ICMC), Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maria Cristina F.",
"surname": "de Oliveira",
"fullName": "Maria Cristina F. de Oliveira",
"affiliation": "Inst. de Cienc. Mat. e de Comput. (ICMC), Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luis Gustavo",
"surname": "Nonato",
"fullName": "Luis Gustavo Nonato",
"affiliation": "Inst. de Cienc. Mat. e de Comput. (ICMC), Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2014-03-01 00:00:00",
"pubType": "trans",
"pages": "457-470",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icsme/2017/0992/0/0992a628",
"title": "NLP2Code: Code Snippet Content Assist via Natural Language Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/icsme/2017/0992a628/12OmNA14Agr",
"parentPublication": {
"id": "proceedings/icsme/2017/0992/0",
"title": "2017 IEEE International Conference on Software Maintenance and Evolution (ICSME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2014/4143/1/4143a254",
"title": "Summarizing Search Results with Community-Based Question Answering",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2014/4143a254/12OmNxb5hw9",
"parentPublication": {
"id": "wi-iat/2014/4143/1",
"title": "2014 IEEE/WIC/ACM International Joint Conferences on Web Intelligence (WI) and Intelligent Agent Technologies (IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/c5/2011/390/0/05936691",
"title": "Slide KWIC: Snippet Generation for Browsing Slides Based on Conceptual Relationship and Presentational Structure",
"doi": null,
"abstractUrl": "/proceedings-article/c5/2011/05936691/12OmNyKJixD",
"parentPublication": {
"id": "proceedings/c5/2011/390/0",
"title": "2011 Ninth International Conference on Creating, Connecting and Collaborating through Computing (C5)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2011/4576/0/4576a252",
"title": "Fast Snippet Generation Based on CPU-GPU Hybrid System",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2011/4576a252/12OmNyqiaMI",
"parentPublication": {
"id": "proceedings/icpads/2011/4576/0",
"title": "Parallel and Distributed Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2012/2719/0/06337190",
"title": "Extraction of Relevant Snippets from Web Pages Using Hybrid Features",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2012/06337190/12OmNyv7mnc",
"parentPublication": {
"id": "proceedings/iiai-aai/2012/2719/0",
"title": "2012 IIAI International Conference on Advanced Applied Informatics (IIAIAAI 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2017/4203/0/4203a104",
"title": "Introduction of Sparkline as Snippet to Context Search Engine Result Page",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2017/4203a104/12OmNzmclVd",
"parentPublication": {
"id": "proceedings/taai/2017/4203/0",
"title": "2017 Conference on Technologies and Applications of Artificial Intelligence (TAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b169",
"title": "Video Person Re-identification with Competitive Snippet-Similarity Aggregation and Co-attentive Snippet Embedding",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b169/17D45WKWnHW",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f426",
"title": "One-Shot Doc Snippet Detection: Powering Search in Document Beyond Text",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f426/1L8qtCPG6as",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/02/09477056",
"title": "BANDAR: Benchmarking Snippet Generation Algorithms for (RDF) Dataset Search",
"doi": null,
"abstractUrl": "/journal/tk/2023/02/09477056/1v2M06rH6lW",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900q6005",
"title": "CoLA: Weakly-Supervised Temporal Action Localization with Snippet Contrastive Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900q6005/1yeItLSVgUU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014030447",
"articleId": "13rRUxjQyvj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014030471",
"articleId": "13rRUxBrGgY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRPb",
"name": "ttg2014030457s1.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014030457s1.wmv",
"extension": "wmv",
"size": "40.1 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.