data
dict |
|---|
{
"issue": {
"id": "12OmNyp9Mqu",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tp",
"pubType": "journal",
"volume": "36",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYIMWo",
"doi": "10.1109/TPAMI.2014.7",
"abstract": "The two factors that make TPAMI a wonderful journal are largely immune to disruption by a change of editors. Our community is a fertile source of exciting intellectual creations and scientific discoveries, and this factor ensures there are fine papers for the journal to publish. The other factor is the large community of volunteers who find and promote strong papers. The journal owes a great deal to the tremendous efforts, skill, and professionalism of the Associate Editors in Chief (AEICs). In 2011, TPAMI received 944 submissions, of which 171 were accepted. On average, from submission to first decision took 4.8 months, to accept took 10 months, to online publication 11.1 months, and to paper 17.5 months. Note that these numbers are not cumulative. In 2012, TPAMI received 1,033 submissions, of which 166 have thus far been accepted. On average, from submission to first decision took 3.6 months, to accept took 7.8 months, to online publication 8.4 months, and to paper 14.6 months. You can see the effect of the extra pages that Ramin organized. Figures for 2013 are not yet in, but as of September there were 703 submissions, of which 14 had already been accepted. These statistics suggest that the journal is generally efficient at handling papers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The two factors that make TPAMI a wonderful journal are largely immune to disruption by a change of editors. Our community is a fertile source of exciting intellectual creations and scientific discoveries, and this factor ensures there are fine papers for the journal to publish. The other factor is the large community of volunteers who find and promote strong papers. The journal owes a great deal to the tremendous efforts, skill, and professionalism of the Associate Editors in Chief (AEICs). In 2011, TPAMI received 944 submissions, of which 171 were accepted. On average, from submission to first decision took 4.8 months, to accept took 10 months, to online publication 11.1 months, and to paper 17.5 months. Note that these numbers are not cumulative. In 2012, TPAMI received 1,033 submissions, of which 166 have thus far been accepted. On average, from submission to first decision took 3.6 months, to accept took 7.8 months, to online publication 8.4 months, and to paper 14.6 months. You can see the effect of the extra pages that Ramin organized. Figures for 2013 are not yet in, but as of September there were 703 submissions, of which 14 had already been accepted. These statistics suggest that the journal is generally efficient at handling papers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The two factors that make TPAMI a wonderful journal are largely immune to disruption by a change of editors. Our community is a fertile source of exciting intellectual creations and scientific discoveries, and this factor ensures there are fine papers for the journal to publish. The other factor is the large community of volunteers who find and promote strong papers. The journal owes a great deal to the tremendous efforts, skill, and professionalism of the Associate Editors in Chief (AEICs). In 2011, TPAMI received 944 submissions, of which 171 were accepted. On average, from submission to first decision took 4.8 months, to accept took 10 months, to online publication 11.1 months, and to paper 17.5 months. Note that these numbers are not cumulative. In 2012, TPAMI received 1,033 submissions, of which 166 have thus far been accepted. On average, from submission to first decision took 3.6 months, to accept took 7.8 months, to online publication 8.4 months, and to paper 14.6 months. You can see the effect of the extra pages that Ramin organized. Figures for 2013 are not yet in, but as of September there were 703 submissions, of which 14 had already been accepted. These statistics suggest that the journal is generally efficient at handling papers.",
"title": "Editorial: State of the Journal",
"normalizedTitle": "Editorial: State of the Journal",
"fno": "ttp2014010001",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [],
"authors": [
{
"givenName": "David A.",
"surname": "Forsyth",
"fullName": "David A. Forsyth",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "1-1",
"year": "2014",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/01/ttg2014010001",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010001/13rRUEgarsI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/01/07765164",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tp/2017/01/07765164/13rRUwj7cql",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2014/01/ttk2014010001",
"title": "Editorial [State of the Transactions]",
"doi": null,
"abstractUrl": "/journal/tk/2014/01/ttk2014010001/13rRUx0PqpT",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/01/ttp2013010001",
"title": "Farewell state of the journal",
"doi": null,
"abstractUrl": "/journal/tp/2013/01/ttp2013010001/13rRUx0xPjl",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2017/01/07865885",
"title": "State of the Journal Editorial",
"doi": null,
"abstractUrl": "/journal/bd/2017/01/07865885/13rRUxC0SxQ",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2012/02/ttc2012020145",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tc/2012/02/ttc2012020145/13rRUxcbnBM",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2013/01/tta2013010001",
"title": "Editorial: State of the Journal",
"doi": null,
"abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2019/01/08654019",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/bd/2019/01/08654019/180h18pgnbG",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/02/08952831",
"title": "State of the Journal Editorial",
"doi": null,
"abstractUrl": "/journal/tp/2020/02/08952831/1gqpWPrYFsA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/04/09370089",
"title": "State of the Journal Editorial",
"doi": null,
"abstractUrl": "/journal/tp/2021/04/09370089/1rHavOpA8ow",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttp20140100c2",
"articleId": "13rRUy3xY3O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttp2014010002",
"articleId": "13rRUx0xPV0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvjgWIL",
"title": "June",
"year": "2014",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy3xY2Q",
"doi": "10.1109/TVCG.2014.2310791",
"abstract": "The success of a journal relies heavily on the quality of submissions and of their reviews. The latter is primarily the work and efforts of the associate editors and the anonymous reviewers. The dedication of associate editors and of external reviewers is essential to the continuing growth of the journal. To continue recognizing these \"unsung heroes\" who drive the scientific peer review process for IEEE Transactions on Visualization and Computer Graphics (TVCG), it is my pleasure to announce the 2013 Best Associate Editor Award and the 2013 Best Reviewer Award. Three associate editors (AEs) for are recognized their dedication and hard work in 2013: Shi-Min Hu, Alla Sheffer, and Shigeo Takahashi. They handled a large number of submissions efficiently with the quickest turnaround (averaging less than 50 days) and provided consistently high-quality, thoughtful AE summary to the authors. In recognizing their distinguished service to the IEEE TVCG, the 2013 TVCG Best Associate Editor Award goes to Shi-Min Hu, Alla Sheffer, and Shigeo Takahashi.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The success of a journal relies heavily on the quality of submissions and of their reviews. The latter is primarily the work and efforts of the associate editors and the anonymous reviewers. The dedication of associate editors and of external reviewers is essential to the continuing growth of the journal. To continue recognizing these \"unsung heroes\" who drive the scientific peer review process for IEEE Transactions on Visualization and Computer Graphics (TVCG), it is my pleasure to announce the 2013 Best Associate Editor Award and the 2013 Best Reviewer Award. Three associate editors (AEs) for are recognized their dedication and hard work in 2013: Shi-Min Hu, Alla Sheffer, and Shigeo Takahashi. They handled a large number of submissions efficiently with the quickest turnaround (averaging less than 50 days) and provided consistently high-quality, thoughtful AE summary to the authors. In recognizing their distinguished service to the IEEE TVCG, the 2013 TVCG Best Associate Editor Award goes to Shi-Min Hu, Alla Sheffer, and Shigeo Takahashi.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The success of a journal relies heavily on the quality of submissions and of their reviews. The latter is primarily the work and efforts of the associate editors and the anonymous reviewers. The dedication of associate editors and of external reviewers is essential to the continuing growth of the journal. To continue recognizing these \"unsung heroes\" who drive the scientific peer review process for IEEE Transactions on Visualization and Computer Graphics (TVCG), it is my pleasure to announce the 2013 Best Associate Editor Award and the 2013 Best Reviewer Award. Three associate editors (AEs) for are recognized their dedication and hard work in 2013: Shi-Min Hu, Alla Sheffer, and Shigeo Takahashi. They handled a large number of submissions efficiently with the quickest turnaround (averaging less than 50 days) and provided consistently high-quality, thoughtful AE summary to the authors. In recognizing their distinguished service to the IEEE TVCG, the 2013 TVCG Best Associate Editor Award goes to Shi-Min Hu, Alla Sheffer, and Shigeo Takahashi.",
"title": "Editor's Note [2013 Best Associate Editor Award & 2013 Best Reviewer Award]",
"normalizedTitle": "Editor's Note [2013 Best Associate Editor Award & 2013 Best Reviewer Award]",
"fno": "06805680",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [
{
"givenName": "Ming C.",
"surname": "Lin",
"fullName": "Ming C. Lin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2014-06-01 00:00:00",
"pubType": "trans",
"pages": "822-822",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "06807545",
"articleId": "13rRUwhHcQT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAolH1g",
"title": "Feb.",
"year": "2020",
"issueNum": "02",
"idPrefix": "tp",
"pubType": "journal",
"volume": "42",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1gqpWPrYFsA",
"doi": "10.1109/TPAMI.2019.2958194",
"abstract": "I would like to take this opportunity to bring our readership up to date on the state of the journal. My yearly editorial usually appears in January of each year, but I've delayed it a month so that I can announce our largest cohort yet of new associate editors. By the time this editorial appears, I will have started my fourth year in the role and first of my two-year reappointment. It's once again been a very good year, with our impact factor leaping from 9.455 (2017) to 17.30 (2018), establishing IEEE Transactions on Pattern Analysis and Machine Intelligence as the top-ranked journal in all of computer science. Moreover, our submissions are up from last year; as of 13 Nov 2019, we had 1,101 submissions, compared to 992 as of Nov 13, 2018. For papers accepted in 2018, the average time from submission to first decision is 3.8 months, while the average time from submission to publication on Xplore is 10.8 months. While both metrics reflect an improvement over last year, I'm still short of my target. As I mentioned last year, with the increasing emphasis that our community places on conference papers, I'd ideally like to get the time from submission to online publication down to 6-7 months, which is comparable to the time from conference paper submission to conference paper presentation. Over the past two years, a big part of my strategy to reduce time to acceptance and publication is to appoint more Associate Editors (AEs), which will reduce the workload per AE, hopefully allowing our AEs to focus their energy on fewer papers and shepherd them more efficiently. I’m pleased to announce a new cohort of 39 Associate Editors that have joined since January, 2019: Amr Ahmed, Xiang Bai, Dima Damen, Kosta Derpanis, Giovanni Farinella, Ryan Farrell, Yasu Furukawa, Jim Glass, Andras Gyorgy, Tim Hospedales, Brian Kingsbury, Ajay Kumar, Simon Lacoste-Julien, Lihong Li, Ce Liu, Tie-Yan Liu, Wei Liu, Chen Change Loy, Michael Maire, Deyu Meng, Vlad Morariu, Cheng Soon Ong, John Paisley, Thomas Pock, Liva Ralaivola, Xiaofen Ren, Irina Rish, Amit K. Roy-Chowdhury, Yaser Sheikh, Suvrit Sra, Ping Tan, Christian Theobalt, Radu Timofte, Lorenzo Torresani, Chong Wang, Jue Wang, Richard Wildes, Christian Wolf, and Lei Zhang. Professional biographies are presented for these individuals.",
"abstracts": [
{
"abstractType": "Regular",
"content": "I would like to take this opportunity to bring our readership up to date on the state of the journal. My yearly editorial usually appears in January of each year, but I've delayed it a month so that I can announce our largest cohort yet of new associate editors. By the time this editorial appears, I will have started my fourth year in the role and first of my two-year reappointment. It's once again been a very good year, with our impact factor leaping from 9.455 (2017) to 17.30 (2018), establishing IEEE Transactions on Pattern Analysis and Machine Intelligence as the top-ranked journal in all of computer science. Moreover, our submissions are up from last year; as of 13 Nov 2019, we had 1,101 submissions, compared to 992 as of Nov 13, 2018. For papers accepted in 2018, the average time from submission to first decision is 3.8 months, while the average time from submission to publication on Xplore is 10.8 months. While both metrics reflect an improvement over last year, I'm still short of my target. As I mentioned last year, with the increasing emphasis that our community places on conference papers, I'd ideally like to get the time from submission to online publication down to 6-7 months, which is comparable to the time from conference paper submission to conference paper presentation. Over the past two years, a big part of my strategy to reduce time to acceptance and publication is to appoint more Associate Editors (AEs), which will reduce the workload per AE, hopefully allowing our AEs to focus their energy on fewer papers and shepherd them more efficiently. I’m pleased to announce a new cohort of 39 Associate Editors that have joined since January, 2019: Amr Ahmed, Xiang Bai, Dima Damen, Kosta Derpanis, Giovanni Farinella, Ryan Farrell, Yasu Furukawa, Jim Glass, Andras Gyorgy, Tim Hospedales, Brian Kingsbury, Ajay Kumar, Simon Lacoste-Julien, Lihong Li, Ce Liu, Tie-Yan Liu, Wei Liu, Chen Change Loy, Michael Maire, Deyu Meng, Vlad Morariu, Cheng Soon Ong, John Paisley, Thomas Pock, Liva Ralaivola, Xiaofen Ren, Irina Rish, Amit K. Roy-Chowdhury, Yaser Sheikh, Suvrit Sra, Ping Tan, Christian Theobalt, Radu Timofte, Lorenzo Torresani, Chong Wang, Jue Wang, Richard Wildes, Christian Wolf, and Lei Zhang. Professional biographies are presented for these individuals.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "I would like to take this opportunity to bring our readership up to date on the state of the journal. My yearly editorial usually appears in January of each year, but I've delayed it a month so that I can announce our largest cohort yet of new associate editors. By the time this editorial appears, I will have started my fourth year in the role and first of my two-year reappointment. It's once again been a very good year, with our impact factor leaping from 9.455 (2017) to 17.30 (2018), establishing IEEE Transactions on Pattern Analysis and Machine Intelligence as the top-ranked journal in all of computer science. Moreover, our submissions are up from last year; as of 13 Nov 2019, we had 1,101 submissions, compared to 992 as of Nov 13, 2018. For papers accepted in 2018, the average time from submission to first decision is 3.8 months, while the average time from submission to publication on Xplore is 10.8 months. While both metrics reflect an improvement over last year, I'm still short of my target. As I mentioned last year, with the increasing emphasis that our community places on conference papers, I'd ideally like to get the time from submission to online publication down to 6-7 months, which is comparable to the time from conference paper submission to conference paper presentation. Over the past two years, a big part of my strategy to reduce time to acceptance and publication is to appoint more Associate Editors (AEs), which will reduce the workload per AE, hopefully allowing our AEs to focus their energy on fewer papers and shepherd them more efficiently. I’m pleased to announce a new cohort of 39 Associate Editors that have joined since January, 2019: Amr Ahmed, Xiang Bai, Dima Damen, Kosta Derpanis, Giovanni Farinella, Ryan Farrell, Yasu Furukawa, Jim Glass, Andras Gyorgy, Tim Hospedales, Brian Kingsbury, Ajay Kumar, Simon Lacoste-Julien, Lihong Li, Ce Liu, Tie-Yan Liu, Wei Liu, Chen Change Loy, Michael Maire, Deyu Meng, Vlad Morariu, Cheng Soon Ong, John Paisley, Thomas Pock, Liva Ralaivola, Xiaofen Ren, Irina Rish, Amit K. Roy-Chowdhury, Yaser Sheikh, Suvrit Sra, Ping Tan, Christian Theobalt, Radu Timofte, Lorenzo Torresani, Chong Wang, Jue Wang, Richard Wildes, Christian Wolf, and Lei Zhang. Professional biographies are presented for these individuals.",
"title": "State of the Journal Editorial",
"normalizedTitle": "State of the Journal Editorial",
"fno": "08952831",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [],
"authors": [
{
"givenName": "Sven",
"surname": "Dickinson",
"fullName": "Sven Dickinson",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "02",
"pubDate": "2020-02-01 00:00:00",
"pubType": "trans",
"pages": "253-260",
"year": "2020",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/01/ttg2014010001",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010001/13rRUEgarsI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2018/01/08173510",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/td/2018/01/08173510/13rRUIJuxv3",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2012/01/tta2012010001",
"title": "Editorial: State of the Journal",
"doi": null,
"abstractUrl": "/journal/ta/2012/01/tta2012010001/13rRUwbaqT5",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2014/01/ttd2014010001",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/td/2014/01/ttd2014010001/13rRUwd9CFL",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/01/ttp2014010001",
"title": "Editorial: State of the Journal",
"doi": null,
"abstractUrl": "/journal/tp/2014/01/ttp2014010001/13rRUxYIMWo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2012/02/ttc2012020145",
"title": "State of the Journal",
"doi": null,
"abstractUrl": "/journal/tc/2012/02/ttc2012020145/13rRUxcbnBM",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2013/01/tta2013010001",
"title": "Editorial: State of the Journal",
"doi": null,
"abstractUrl": "/journal/ta/2013/01/tta2013010001/13rRUyY28WG",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2016/01/07347536",
"title": "State of the Journal Editorial",
"doi": null,
"abstractUrl": "/journal/tk/2016/01/07347536/13rRUyfKIIj",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2020/01/08945475",
"title": "Editorial on the Opening of the New Editorial Year—The State of the IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"doi": null,
"abstractUrl": "/journal/si/2020/01/08945475/1gbtZxspGUM",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/04/09370089",
"title": "State of the Journal Editorial",
"doi": null,
"abstractUrl": "/journal/tp/2021/04/09370089/1rHavOpA8ow",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08952814",
"articleId": "1gqq0HI0hqg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08468042",
"articleId": "13HFzhLGKr3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAnuTs6",
"title": "Nov.-Dec.",
"year": "2013",
"issueNum": "06",
"idPrefix": "tb",
"pubType": "journal",
"volume": "10",
"label": "Nov.-Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLku4",
"doi": "10.1109/TCBB.2013.189",
"abstract": "This special section includes a selection of papers presented at the Eighth International Symposium on Bioinformatics Research and Application (ISBRA), which was held in Dallas, Texas, on 21-23 May 2012. The ISBRA symposium provides a forum for the exchange of ideas and results among researchers, developers, and practitioners working on all aspects of bioinformatics and computational biology and their applications. In 2012, 66 papers were submitted in response to the call for papers, out of which 26 papers appeared in the ISBRA proceedings published as volume 7292 of Springer Verlag's Lecture Notes in Bioinformatics series. Extended versions of nine symposium papers were invited and accepted for publication in this special section following a rigorous review process. The selected papers cover a broad range of bioinformatics topics, including biological networks, computational complexity of problems in structural biology and genomics, and phylogenetic inference and analysis. Herein, we briefly introduce each of them.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This special section includes a selection of papers presented at the Eighth International Symposium on Bioinformatics Research and Application (ISBRA), which was held in Dallas, Texas, on 21-23 May 2012. The ISBRA symposium provides a forum for the exchange of ideas and results among researchers, developers, and practitioners working on all aspects of bioinformatics and computational biology and their applications. In 2012, 66 papers were submitted in response to the call for papers, out of which 26 papers appeared in the ISBRA proceedings published as volume 7292 of Springer Verlag's Lecture Notes in Bioinformatics series. Extended versions of nine symposium papers were invited and accepted for publication in this special section following a rigorous review process. The selected papers cover a broad range of bioinformatics topics, including biological networks, computational complexity of problems in structural biology and genomics, and phylogenetic inference and analysis. Herein, we briefly introduce each of them.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This special section includes a selection of papers presented at the Eighth International Symposium on Bioinformatics Research and Application (ISBRA), which was held in Dallas, Texas, on 21-23 May 2012. The ISBRA symposium provides a forum for the exchange of ideas and results among researchers, developers, and practitioners working on all aspects of bioinformatics and computational biology and their applications. In 2012, 66 papers were submitted in response to the call for papers, out of which 26 papers appeared in the ISBRA proceedings published as volume 7292 of Springer Verlag's Lecture Notes in Bioinformatics series. Extended versions of nine symposium papers were invited and accepted for publication in this special section following a rigorous review process. The selected papers cover a broad range of bioinformatics topics, including biological networks, computational complexity of problems in structural biology and genomics, and phylogenetic inference and analysis. Herein, we briefly introduce each of them.",
"title": "Guest Editors' introduction to the special section on bioinformatics research and applications",
"normalizedTitle": "Guest Editors' introduction to the special section on bioinformatics research and applications",
"fno": "ttb2013061345",
"hasPdf": true,
"idPrefix": "tb",
"keywords": [
"Special Issues And Sections",
"Research And Development",
"Bioinformatics",
"Meetings",
"Informatics"
],
"authors": [
{
"givenName": "Ion I.",
"surname": "Mandoiu",
"fullName": "Ion I. Mandoiu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianxin",
"surname": "Wang",
"fullName": "Jianxin Wang",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Zelikovsky",
"fullName": "Alexander Zelikovsky",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2013-11-01 00:00:00",
"pubType": "trans",
"pages": "1345-1346",
"year": "2013",
"issn": "1545-5963",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttb20130600c2",
"articleId": "13rRUxbTMxe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttb2013061347",
"articleId": "13rRUxlgxMU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvjgWIL",
"title": "June",
"year": "2014",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwhHcQT",
"doi": "10.1109/TVCG.2014.2310532",
"abstract": "The IEEE International Symposium on Mixed and Augmented Reality continues to be the leading venue for publishing the latest Mixed andAugmented Reality research, applications and technologies. This special section presents significantly extended versions of the four best papers of the IEEE ISMAR 2012 proceedings. These papers demonstrate the wide range of topics in Augmented Reality research. IEEE ISMAR 2012 had 143 submissions; each paper was reviewed by at least four experts in the field. An international program committee of 15 AR experts invited reviewers, led discussions, invited a rebuttal by the paper authors and prepared a consensus review. To select the final papers for publication, an in-person two-day PC meeting was held, where each paper was discussed, resulting in an overall acceptance rate of 27%. In an additional selection process, an independent Award Committee reviewed the 10 best ranked submissions again to determine the awards for Best Paper and Honorable Mention. For this special section, the authors of the award papers were invited to submit an extended version of their conference paper, with a clear focus on additional content that expands the scientific contribution of the original conference paper. A standard TVCG reviewing cycle was initiated and all four papers required multiple revisions and reviews.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The IEEE International Symposium on Mixed and Augmented Reality continues to be the leading venue for publishing the latest Mixed andAugmented Reality research, applications and technologies. This special section presents significantly extended versions of the four best papers of the IEEE ISMAR 2012 proceedings. These papers demonstrate the wide range of topics in Augmented Reality research. IEEE ISMAR 2012 had 143 submissions; each paper was reviewed by at least four experts in the field. An international program committee of 15 AR experts invited reviewers, led discussions, invited a rebuttal by the paper authors and prepared a consensus review. To select the final papers for publication, an in-person two-day PC meeting was held, where each paper was discussed, resulting in an overall acceptance rate of 27%. In an additional selection process, an independent Award Committee reviewed the 10 best ranked submissions again to determine the awards for Best Paper and Honorable Mention. For this special section, the authors of the award papers were invited to submit an extended version of their conference paper, with a clear focus on additional content that expands the scientific contribution of the original conference paper. A standard TVCG reviewing cycle was initiated and all four papers required multiple revisions and reviews.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The IEEE International Symposium on Mixed and Augmented Reality continues to be the leading venue for publishing the latest Mixed andAugmented Reality research, applications and technologies. This special section presents significantly extended versions of the four best papers of the IEEE ISMAR 2012 proceedings. These papers demonstrate the wide range of topics in Augmented Reality research. IEEE ISMAR 2012 had 143 submissions; each paper was reviewed by at least four experts in the field. An international program committee of 15 AR experts invited reviewers, led discussions, invited a rebuttal by the paper authors and prepared a consensus review. To select the final papers for publication, an in-person two-day PC meeting was held, where each paper was discussed, resulting in an overall acceptance rate of 27%. In an additional selection process, an independent Award Committee reviewed the 10 best ranked submissions again to determine the awards for Best Paper and Honorable Mention. For this special section, the authors of the award papers were invited to submit an extended version of their conference paper, with a clear focus on additional content that expands the scientific contribution of the original conference paper. A standard TVCG reviewing cycle was initiated and all four papers required multiple revisions and reviews.",
"title": "Guest Editor's Introduction: Special Section on the International Symposium on Mixed and Augmented Reality 2012",
"normalizedTitle": "Guest Editor's Introduction: Special Section on the International Symposium on Mixed and Augmented Reality 2012",
"fno": "06807545",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Augmented Reality",
"Three Dimensional Displays",
"Real Time Systems",
"Meetings"
],
"authors": [
{
"givenName": "Maribeth",
"surname": "Gandy",
"fullName": "Maribeth Gandy",
"affiliation": "Interactive Media Technology Center, and the Media, Institute for People and Technology,",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"fullName": "Kiyoshi Kiyokawa",
"affiliation": "Cybermedia Center, Osaka University, Toyonaka Educational Research Center 517, 1-32 Machikaneyama, Toyonaka, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerhard",
"surname": "Reitmayr",
"fullName": "Gerhard Reitmayr",
"affiliation": "Technische Unversitaet Graz, Inffeldgasse 16c, 2. Floor, Room ID02058, 8010 Graz, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2014-06-01 00:00:00",
"pubType": "trans",
"pages": "823-824",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/08/07138667",
"title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014",
"doi": null,
"abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010001",
"title": "Guest Editor's Introduction Special Section on the Virtual Reality Conference (VR)",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010001/13rRUwIF6l4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010002",
"title": "Guest Editor's Introduction: Special Section on the ACM SIGGRAPH/Eurographics Symposium on Computer Animation (SCA)",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010002/13rRUwfZC0h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/05/07067526",
"title": "Guest Editor's Introduction to the Special Section on the International Symposium on Mixed and Augmented Reality 2013",
"doi": null,
"abstractUrl": "/journal/tg/2015/05/07067526/13rRUwwaKta",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2013/08/ttm2013081470",
"title": "Guest Editorial: Special Section on Outstanding Papers from MobiSys 2012",
"doi": null,
"abstractUrl": "/journal/tm/2013/08/ttm2013081470/13rRUxAATh6",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/07/ttg2013071076",
"title": "Guest Editors' Introduction: Special Section on the IEEE Conference on Visual Analytics Science and Technology (VAST)",
"doi": null,
"abstractUrl": "/journal/tg/2013/07/ttg2013071076/13rRUxOdD2D",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06805680",
"articleId": "13rRUy3xY2Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06636302",
"articleId": "13rRUwcAqqj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzXFovR",
"title": "November",
"year": "2011",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "November",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNW1Zj",
"doi": "10.1109/TVCG.2010.271",
"abstract": "We often interact with fluids in our daily life, either through tools such as when holding a glass of water or directly with our body when we swim or we wash our hands. Multimodal interactions with virtual fluids would greatly improve the simulations realism, particularly through haptic interaction. However, achieving realistic, stable, and real-time force feedback from fluids is particularly challenging. In this work, we propose a novel approach that allows real-time six Degrees of Freedom (DoF) haptic interaction with fluids of variable viscosity. Our haptic rendering technique, based on a Smoothed-Particle Hydrodynamics physical model, provides a realistic haptic feedback through physically based forces. 6DoF haptic interaction with fluids is made possible thanks to a new coupling scheme and a unified particle model, allowing the use of arbitrary-shaped rigid bodies. Particularly, fluid containers can be created to hold fluid and hence transmit to the user force feedback coming from fluid stirring, pouring, shaking, and scooping, to name a few. Moreover, we adapted an existing visual rendering algorithm to meet the frame rate requirements of the haptic algorithms. We evaluate and illustrate the main features of our approach through different scenarios, highlighting the 6DoF haptic feedback and the use of containers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We often interact with fluids in our daily life, either through tools such as when holding a glass of water or directly with our body when we swim or we wash our hands. Multimodal interactions with virtual fluids would greatly improve the simulations realism, particularly through haptic interaction. However, achieving realistic, stable, and real-time force feedback from fluids is particularly challenging. In this work, we propose a novel approach that allows real-time six Degrees of Freedom (DoF) haptic interaction with fluids of variable viscosity. Our haptic rendering technique, based on a Smoothed-Particle Hydrodynamics physical model, provides a realistic haptic feedback through physically based forces. 6DoF haptic interaction with fluids is made possible thanks to a new coupling scheme and a unified particle model, allowing the use of arbitrary-shaped rigid bodies. Particularly, fluid containers can be created to hold fluid and hence transmit to the user force feedback coming from fluid stirring, pouring, shaking, and scooping, to name a few. Moreover, we adapted an existing visual rendering algorithm to meet the frame rate requirements of the haptic algorithms. We evaluate and illustrate the main features of our approach through different scenarios, highlighting the 6DoF haptic feedback and the use of containers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We often interact with fluids in our daily life, either through tools such as when holding a glass of water or directly with our body when we swim or we wash our hands. Multimodal interactions with virtual fluids would greatly improve the simulations realism, particularly through haptic interaction. However, achieving realistic, stable, and real-time force feedback from fluids is particularly challenging. In this work, we propose a novel approach that allows real-time six Degrees of Freedom (DoF) haptic interaction with fluids of variable viscosity. Our haptic rendering technique, based on a Smoothed-Particle Hydrodynamics physical model, provides a realistic haptic feedback through physically based forces. 6DoF haptic interaction with fluids is made possible thanks to a new coupling scheme and a unified particle model, allowing the use of arbitrary-shaped rigid bodies. Particularly, fluid containers can be created to hold fluid and hence transmit to the user force feedback coming from fluid stirring, pouring, shaking, and scooping, to name a few. Moreover, we adapted an existing visual rendering algorithm to meet the frame rate requirements of the haptic algorithms. We evaluate and illustrate the main features of our approach through different scenarios, highlighting the 6DoF haptic feedback and the use of containers.",
"title": "Six Degrees-of-Freedom Haptic Interaction with Fluids",
"normalizedTitle": "Six Degrees-of-Freedom Haptic Interaction with Fluids",
"fno": "ttg2011111714",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"6 Do F Haptic Interaction",
"Computational Fluid Dynamics",
"Smoothed Particle Hydrodynamics",
"Rigid Bodies"
],
"authors": [
{
"givenName": "Gabriel",
"surname": "Cirio",
"fullName": "Gabriel Cirio",
"affiliation": "IRISA/INRIA, Campus Universitaire de Beaulieu, Rennes",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maud",
"surname": "Marchal",
"fullName": "Maud Marchal",
"affiliation": "INRIA/IRISA/INSA, IRISA/INRIA, Campus Universitaire de Beaulieu, Rennes",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sébastien",
"surname": "Hillaire",
"fullName": "Sébastien Hillaire",
"affiliation": "INRIA/IRISA/Orange Labs, IRISA/INRIA Rennes, Campus Universitaire de Beaulieu, Rennes",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "INRIA, Rennes",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2011-11-01 00:00:00",
"pubType": "trans",
"pages": "1714-1727",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2011/0039/0/05759449",
"title": "\"Tap, squeeze and stir\" the virtual world: Touching the different states of matter through 6DoF haptic interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759449/12OmNBWi6JL",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aihas/1994/6440/0/00390502",
"title": "Fluids in a distributed interactive simulation",
"doi": null,
"abstractUrl": "/proceedings-article/aihas/1994/00390502/12OmNC4wtKv",
"parentPublication": {
"id": "proceedings/aihas/1994/6440/0",
"title": "Fifth Annual Conference on AI, and Planning in High Autonomy Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2003/1890/0/18900010",
"title": "Haptic Displays Based on Magnetorheological Fluids: Design, Realization and Ppsychophysical Validation",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2003/18900010/12OmNqBKTOL",
"parentPublication": {
"id": "proceedings/haptics/2003/1890/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240191",
"title": "A Six Degree-of-Freedom God-Object Method for Haptic Display of Rigid Bodies",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240191/12OmNqGitZq",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2008/2047/0/04476595",
"title": "Real-Time 3D Fluid Interaction with a Haptic User Interface",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2008/04476595/12OmNvxKu1U",
"parentPublication": {
"id": "proceedings/3dui/2008/2047/0",
"title": "2008 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cerma/2011/4563/0/4563a391",
"title": "CUDA-enabled Particle-Based 3D Fluid Haptic Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/cerma/2011/4563a391/12OmNzlly2C",
"parentPublication": {
"id": "proceedings/cerma/2011/4563/0",
"title": "Electronics, Robotics and Automotive Mechanics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2015/01/06983631",
"title": "Simulating Sharp Geometric Features in Six Degrees-of-Freedom Haptic Rendering",
"doi": null,
"abstractUrl": "/journal/th/2015/01/06983631/13rRUEgarjB",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2009/01/tth2009010015",
"title": "Data-Driven Haptic Rendering—From Viscous Fluids to Visco-Elastic Solids",
"doi": null,
"abstractUrl": "/journal/th/2009/01/tth2009010015/13rRUxAASW2",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/03/mcg2007030090",
"title": "A Precomputed Approach for Real-Time Haptic Interaction with Fluids",
"doi": null,
"abstractUrl": "/magazine/cg/2007/03/mcg2007030090/13rRUxjyX6p",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/02/07412781",
"title": "Six Degree-of-Freedom Haptic Simulation of Probing Dental Caries Within a Narrow Oral Cavity",
"doi": null,
"abstractUrl": "/journal/th/2016/02/07412781/13rRUzpzeBh",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011111702",
"articleId": "13rRUxC0SEe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011111728",
"articleId": "13rRUwIF6l5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRNi",
"name": "ttg2011111714s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2011111714s1.mp4",
"extension": "mp4",
"size": "34.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAGepWJ",
"title": "Oct.",
"year": "2015",
"issueNum": "10",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxlgy3K",
"doi": "10.1109/TVCG.2015.2449303",
"abstract": "We propose a new method to simulate large scale water phenomena by combining particle, 3D grid and height field methods. In contrast to most hybrid approaches that use particles to simulate foam and spray only, we also represent the bulk of water near the surface with both particles and a grid depending on the regions of interest and switch between those two representations during the course of the simulation. For the coupling we leverage the recent idea of tracking the water surface with a density field in grid based methods. Combining particles and a grid simulation then amounts to adding the density field of the particles and the one stored on the grid. For open scenes, we simulate the water outside of the 3D grid domain by solving the Shallow Water Equations on a height field. We propose new methods to couple these two domains such that waves travel naturally across the border. We demonstrate the effectiveness of our approach in various scenarios including a whale breaching simulation, all running in real-time or at interactive rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new method to simulate large scale water phenomena by combining particle, 3D grid and height field methods. In contrast to most hybrid approaches that use particles to simulate foam and spray only, we also represent the bulk of water near the surface with both particles and a grid depending on the regions of interest and switch between those two representations during the course of the simulation. For the coupling we leverage the recent idea of tracking the water surface with a density field in grid based methods. Combining particles and a grid simulation then amounts to adding the density field of the particles and the one stored on the grid. For open scenes, we simulate the water outside of the 3D grid domain by solving the Shallow Water Equations on a height field. We propose new methods to couple these two domains such that waves travel naturally across the border. We demonstrate the effectiveness of our approach in various scenarios including a whale breaching simulation, all running in real-time or at interactive rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new method to simulate large scale water phenomena by combining particle, 3D grid and height field methods. In contrast to most hybrid approaches that use particles to simulate foam and spray only, we also represent the bulk of water near the surface with both particles and a grid depending on the regions of interest and switch between those two representations during the course of the simulation. For the coupling we leverage the recent idea of tracking the water surface with a density field in grid based methods. Combining particles and a grid simulation then amounts to adding the density field of the particles and the one stored on the grid. For open scenes, we simulate the water outside of the 3D grid domain by solving the Shallow Water Equations on a height field. We propose new methods to couple these two domains such that waves travel naturally across the border. We demonstrate the effectiveness of our approach in various scenarios including a whale breaching simulation, all running in real-time or at interactive rates.",
"title": "Coupling 3D Eulerian, Heightfield and Particle Methods for Interactive Simulation of Large Scale Liquid Phenomena",
"normalizedTitle": "Coupling 3D Eulerian, Heightfield and Particle Methods for Interactive Simulation of Large Scale Liquid Phenomena",
"fno": "07132780",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Liquids",
"Mathematical Model",
"Computational Modeling",
"Solid Modeling",
"Couplings",
"Interpolation",
"Physics Based Animation",
"Fluid Simulation"
],
"authors": [
{
"givenName": "Nuttapong",
"surname": "Chentanez",
"fullName": "Nuttapong Chentanez",
"affiliation": ", NVIDIA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthias",
"surname": "Muller",
"fullName": "Matthias Muller",
"affiliation": ", NVIDIA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tae-Yong",
"surname": "Kim",
"fullName": "Tae-Yong Kim",
"affiliation": ", NVIDIA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "10",
"pubDate": "2015-10-01 00:00:00",
"pubType": "trans",
"pages": "1116-1128",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2016/2303/0/2303a235",
"title": "Individual Time-Stepping for Rigid-Fluid Coupling of Particle Based Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a235/12OmNvT2oZL",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cad-graphics/2013/2576/0/06815003",
"title": "Synthesizing Solid-Induced Turbulence for Particle-Based Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cad-graphics/2013/06815003/12OmNzlD9EF",
"parentPublication": {
"id": "proceedings/cad-graphics/2013/2576/0",
"title": "2013 International Conference on Computer-Aided Design and Computer Graphics (CAD/Graphics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010017",
"title": "Mass-Conserving Eulerian Liquid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010017/13rRUB7a1fQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/02/v0164",
"title": "The Lattice-Boltzmann Method for Simulating Gaseous Phenomena",
"doi": null,
"abstractUrl": "/journal/tg/2004/02/v0164/13rRUwh80H2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/01/mcg2015010046",
"title": "Simulating Drops Settling in a Still Liquid",
"doi": null,
"abstractUrl": "/magazine/cg/2015/01/mcg2015010046/13rRUwjXZMj",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07845705",
"title": "Animating Wall-Bounded Turbulent Smoke via Filament-Mesh Particle-Particle Method",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07845705/13rRUxBJhvD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/04/ttg2008040797",
"title": "Two-Way Coupled SPH and Particle Level Set Fluid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2008/04/ttg2008040797/13rRUxE04tu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2019/5045/0/504500a323",
"title": "Simulation of Liquid Atomization in Cyclone Atomizer Based on VOF-DPM Coupling Model",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2019/504500a323/1hHLqjKLWkU",
"parentPublication": {
"id": "proceedings/wcmeim/2019/5045/0",
"title": "2019 2nd World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089636",
"title": "Multiple-scale Simulation Method for Liquid with Trapped Air under Particle-based Framework",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089636/1jIx9StwsnK",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09524524",
"title": "Simulating Multi-Scale, Granular Materials and Their Transitions With a Hybrid Euler-Lagrange Solver",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09524524/1wpqubOKAne",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07164346",
"articleId": "13rRUzpzeB6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07130660",
"articleId": "13rRUy3gn7z",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRVd",
"name": "ttg201510-07132780s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201510-07132780s1.zip",
"extension": "zip",
"size": "94.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1rvygosnS8M",
"title": "April",
"year": "2021",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1e9h4gzgnmw",
"doi": "10.1109/TVCG.2019.2947437",
"abstract": "We propose a novel implicit density projection approach for hybrid Eulerian/Lagrangian methods like FLIP and APIC to enforce volume conservation of incompressible liquids. Our approach is able to robustly recover from highly degenerate configurations and incorporates volume-conserving boundary handling. A problem of the standard divergence-free pressure solver is that it only has a differential view on density changes. Numerical volume errors, which occur due to large time steps and the limited accuracy of pressure projections, are invisible to the solver and cannot be corrected. Moreover, these errors accumulate over time and can lead to drastic volume changes, especially in long-running simulations or interactive scenarios. Therefore, we introduce a novel method that enforces constant density throughout the fluid. The density itself is tracked via the particles of the hybrid Eulerian/Lagrangian simulation algorithm. To achieve constant density, we use the continuous mass conservation law to derive a pressure Poisson equation which also takes density deviations into account. It can be discretized with standard approaches and easily implemented into existing code by extending the regular pressure solver. Our method enables us to relax the strict time step and solver accuracy requirements of a regular solver, leading to significantly higher performance. Moreover, our approach is able to push fluid particles out of solid obstacles without losing volume and generates more uniform particle distributions, which makes frequent particle resampling unnecessary. We compare the proposed method to standard FLIP and APIC and to previous volume correction approaches in several simulations and demonstrate significant improvements in terms of incompressibility, visual realism, and computational performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel implicit density projection approach for hybrid Eulerian/Lagrangian methods like FLIP and APIC to enforce volume conservation of incompressible liquids. Our approach is able to robustly recover from highly degenerate configurations and incorporates volume-conserving boundary handling. A problem of the standard divergence-free pressure solver is that it only has a differential view on density changes. Numerical volume errors, which occur due to large time steps and the limited accuracy of pressure projections, are invisible to the solver and cannot be corrected. Moreover, these errors accumulate over time and can lead to drastic volume changes, especially in long-running simulations or interactive scenarios. Therefore, we introduce a novel method that enforces constant density throughout the fluid. The density itself is tracked via the particles of the hybrid Eulerian/Lagrangian simulation algorithm. To achieve constant density, we use the continuous mass conservation law to derive a pressure Poisson equation which also takes density deviations into account. It can be discretized with standard approaches and easily implemented into existing code by extending the regular pressure solver. Our method enables us to relax the strict time step and solver accuracy requirements of a regular solver, leading to significantly higher performance. Moreover, our approach is able to push fluid particles out of solid obstacles without losing volume and generates more uniform particle distributions, which makes frequent particle resampling unnecessary. We compare the proposed method to standard FLIP and APIC and to previous volume correction approaches in several simulations and demonstrate significant improvements in terms of incompressibility, visual realism, and computational performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel implicit density projection approach for hybrid Eulerian/Lagrangian methods like FLIP and APIC to enforce volume conservation of incompressible liquids. Our approach is able to robustly recover from highly degenerate configurations and incorporates volume-conserving boundary handling. A problem of the standard divergence-free pressure solver is that it only has a differential view on density changes. Numerical volume errors, which occur due to large time steps and the limited accuracy of pressure projections, are invisible to the solver and cannot be corrected. Moreover, these errors accumulate over time and can lead to drastic volume changes, especially in long-running simulations or interactive scenarios. Therefore, we introduce a novel method that enforces constant density throughout the fluid. The density itself is tracked via the particles of the hybrid Eulerian/Lagrangian simulation algorithm. To achieve constant density, we use the continuous mass conservation law to derive a pressure Poisson equation which also takes density deviations into account. It can be discretized with standard approaches and easily implemented into existing code by extending the regular pressure solver. Our method enables us to relax the strict time step and solver accuracy requirements of a regular solver, leading to significantly higher performance. Moreover, our approach is able to push fluid particles out of solid obstacles without losing volume and generates more uniform particle distributions, which makes frequent particle resampling unnecessary. We compare the proposed method to standard FLIP and APIC and to previous volume correction approaches in several simulations and demonstrate significant improvements in terms of incompressibility, visual realism, and computational performance.",
"title": "Implicit Density Projection for Volume Conserving Liquids",
"normalizedTitle": "Implicit Density Projection for Volume Conserving Liquids",
"fno": "08869736",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computational Fluid Dynamics",
"Flow Simulation",
"Poisson Equation",
"Two Phase Flow",
"Strict Time Step",
"Regular Solver",
"Fluid Particles",
"Previous Volume Correction",
"Volume Conserving",
"Novel Implicit Density Projection Approach",
"Volume Conservation",
"Incompressible Liquids",
"Highly Degenerate Configurations",
"Volume Conserving Boundary Handling",
"Standard Divergence Free Pressure Solver",
"Differential View",
"Density Changes",
"Numerical Volume Errors",
"Time Steps",
"Pressure Projections",
"Drastic Volume Changes",
"Enforces Constant Density",
"Continuous Mass Conservation Law",
"Pressure Poisson Equation",
"Density Deviations",
"Regular Pressure Solver",
"Solid Modeling",
"Computational Modeling",
"Liquids",
"Standards",
"Poisson Equations",
"Visualization",
"Solids",
"Fluid Simulation",
"Volume Conservation",
"FLIP",
"APIC"
],
"authors": [
{
"givenName": "Tassilo",
"surname": "Kugelstadt",
"fullName": "Tassilo Kugelstadt",
"affiliation": "RWTH Aachen University, Aachen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Longva",
"fullName": "Andreas Longva",
"affiliation": "RWTH Aachen University, Aachen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nils",
"surname": "Thuerey",
"fullName": "Nils Thuerey",
"affiliation": "Technical University of Munich, München, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jan",
"surname": "Bender",
"fullName": "Jan Bender",
"affiliation": "RWTH Aachen University, Aachen, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2021-04-01 00:00:00",
"pubType": "trans",
"pages": "2385-2395",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hipcw/2016/5773/0/07837054",
"title": "Sub Cooled Boiling: Validation by Using Different CFD Models",
"doi": null,
"abstractUrl": "/proceedings-article/hipcw/2016/07837054/12OmNrGsDnH",
"parentPublication": {
"id": "proceedings/hipcw/2016/5773/0",
"title": "2016 IEEE 23rd International Conference on High-Performance Computing: Workshops (HiPCW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2016/0811/0/0811a012",
"title": "Surface Tension and Wettability Modeling for Flowing Liquids",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2016/0811a012/12OmNyuya8j",
"parentPublication": {
"id": "proceedings/cgiv/2016/0811/0",
"title": "2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010017",
"title": "Mass-Conserving Eulerian Liquid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010017/13rRUB7a1fQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07364293",
"title": "Solving the Fluid Pressure Poisson Equation Using Multigrid—Evaluation and Improvements",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07364293/13rRUwvBy8Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/03/ttg2014030426",
"title": "Implicit Incompressible SPH",
"doi": null,
"abstractUrl": "/journal/tg/2014/03/ttg2014030426/13rRUxYrbMg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/10/06747389",
"title": "Large-Scale Liquid Simulation on Adaptive Hexahedral Grids",
"doi": null,
"abstractUrl": "/journal/tg/2014/10/06747389/13rRUxYrbMj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06171181",
"title": "A Multigrid Fluid Pressure Solver Handling Separating Solid Boundary Conditions",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06171181/13rRUxlgxTi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/03/08478400",
"title": "A Novel CNN-Based Poisson Solver for Fluid Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2020/03/08478400/141AnpAbeCg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/10/09123549",
"title": "Implicit Frictional Boundary Handling for SPH",
"doi": null,
"abstractUrl": "/journal/tg/2020/10/09123549/1kTxv3ChLeE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09524524",
"title": "Simulating Multi-Scale, Granular Materials and Their Transitions With a Hybrid Euler-Lagrange Solver",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09524524/1wpqubOKAne",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09288641",
"articleId": "1pq6982P34Q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08886386",
"articleId": "1ewvxnLTZgk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1rvyjqj9c9W",
"name": "ttg202104-08869736s1-tvcgfinal.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202104-08869736s1-tvcgfinal.mp4",
"extension": "mp4",
"size": "208 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNrYlmwq",
"title": "March/April",
"year": "2007",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "13",
"label": "March/April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwfZC07",
"doi": "10.1109/TVCG.2007.39",
"abstract": "Abstract—We propose a novel approach to fracturing (and denting) brittle materials. To avoid the computational burden imposed by the stringent time step restrictions of explicit methods or with solving nonlinear systems of equations for implicit methods, we treat the material as a fully rigid body in the limit of infinite stiffness. In addition to a triangulated surface mesh and level set volume for collisions, each rigid body is outfitted with a tetrahedral mesh upon which finite element analysis can be carried out to provide a stress map for fracture criteria. We demonstrate that the commonly used stress criteria can lead to arbitrary fracture (especially for stiff materials) and instead propose the notion of a time averaged stress directly into the FEM analysis. When objects fracture, the virtual node algorithm provides new triangle and tetrahedral meshes in a straightforward and robust fashion. Although each new rigid body can be rasterized to obtain a new level set, small shards can be difficult to accurately resolve. Therefore, we propose a novel collision handling technique for treating both rigid bodies and rigid body thin shells represented by only a triangle mesh.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We propose a novel approach to fracturing (and denting) brittle materials. To avoid the computational burden imposed by the stringent time step restrictions of explicit methods or with solving nonlinear systems of equations for implicit methods, we treat the material as a fully rigid body in the limit of infinite stiffness. In addition to a triangulated surface mesh and level set volume for collisions, each rigid body is outfitted with a tetrahedral mesh upon which finite element analysis can be carried out to provide a stress map for fracture criteria. We demonstrate that the commonly used stress criteria can lead to arbitrary fracture (especially for stiff materials) and instead propose the notion of a time averaged stress directly into the FEM analysis. When objects fracture, the virtual node algorithm provides new triangle and tetrahedral meshes in a straightforward and robust fashion. Although each new rigid body can be rasterized to obtain a new level set, small shards can be difficult to accurately resolve. Therefore, we propose a novel collision handling technique for treating both rigid bodies and rigid body thin shells represented by only a triangle mesh.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We propose a novel approach to fracturing (and denting) brittle materials. To avoid the computational burden imposed by the stringent time step restrictions of explicit methods or with solving nonlinear systems of equations for implicit methods, we treat the material as a fully rigid body in the limit of infinite stiffness. In addition to a triangulated surface mesh and level set volume for collisions, each rigid body is outfitted with a tetrahedral mesh upon which finite element analysis can be carried out to provide a stress map for fracture criteria. We demonstrate that the commonly used stress criteria can lead to arbitrary fracture (especially for stiff materials) and instead propose the notion of a time averaged stress directly into the FEM analysis. When objects fracture, the virtual node algorithm provides new triangle and tetrahedral meshes in a straightforward and robust fashion. Although each new rigid body can be rasterized to obtain a new level set, small shards can be difficult to accurately resolve. Therefore, we propose a novel collision handling technique for treating both rigid bodies and rigid body thin shells represented by only a triangle mesh.",
"title": "Fracturing Rigid Materials",
"normalizedTitle": "Fracturing Rigid Materials",
"fno": "v0370",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Finite Element Methods",
"Level Set",
"Stress",
"Surface Cracks",
"Deformable Models",
"Nonlinear Systems",
"Robustness",
"Nonlinear Equations",
"Surface Treatment",
"Glass",
"Fracture",
"Rigid Bodies",
"Finite Element Analysis"
],
"authors": [
{
"givenName": "Zhaosheng",
"surname": "Bao",
"fullName": "Zhaosheng Bao",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jeong-Mo",
"surname": "Hong",
"fullName": "Jeong-Mo Hong",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph",
"surname": "Teran",
"fullName": "Joseph Teran",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ronald",
"surname": "Fedkiw",
"fullName": "Ronald Fedkiw",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2007-03-01 00:00:00",
"pubType": "trans",
"pages": "370-378",
"year": "2007",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ectc/2017/6315/0/07999990",
"title": "Toughening Underfills by Stress-Absorbing Core-Shell Fillers",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999990/12OmNAq3hvK",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d083",
"title": "Robust Non-rigid Motion Tracking and Surface Reconstruction Using L0 Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d083/12OmNB9KHwl",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2001/7237/0/00982388",
"title": "Merging deformable and rigid body mechanics simulation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2001/00982388/12OmNqIhFMw",
"parentPublication": {
"id": "proceedings/ca/2001/7237/0",
"title": "Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mmbia/1996/7367/0/73670104",
"title": "Deformations Incorporating Rigid Structures",
"doi": null,
"abstractUrl": "/proceedings-article/mmbia/1996/73670104/12OmNx19k4j",
"parentPublication": {
"id": "proceedings/mmbia/1996/7367/0",
"title": "Mathematical Methods in Biomedical Image Analysis, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2014/4261/0/4261a954",
"title": "Study on Fracture Mechanism of Walnut Shell According to Brittle Fracture Area",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2014/4261a954/12OmNx76TPw",
"parentPublication": {
"id": "proceedings/isdea/2014/4261/0",
"title": "2014 Fifth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2007/0905/0/04161009",
"title": "Balanced Hierarchies for Collision Detection between Fracturing Objects",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2007/04161009/12OmNyQYttV",
"parentPublication": {
"id": "proceedings/vr/2007/0905/0",
"title": "2007 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a049",
"title": "Rigid Body Simulation with Local Fracturing Effects",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a049/12OmNyv7mu5",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/05/07888591",
"title": "Robust Non-Rigid Motion Tracking and Surface Reconstruction Using Z_$L_0$_Z Regularization",
"doi": null,
"abstractUrl": "/journal/tg/2018/05/07888591/13rRUILtJqX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010030",
"title": "Fast Collision Detection for Fracturing Rigid Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010030/13rRUxNW1TU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122218",
"title": "Cracking Failure Analysis on Waste Heat Boiler Tube",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122218/1kRSDLbv7OM",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0357",
"articleId": "13rRUxlgxOc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0379",
"articleId": "13rRUwkfAZb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNqJZgIg",
"title": "May/June",
"year": "2006",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "12",
"label": "May/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPTI",
"doi": "10.1109/TVCG.2006.48",
"abstract": "Abstract—We propose a novel approach for dynamically simulating articulated rigid bodies undergoing frequent and unpredictable contact and collision. In order to leverage existing algorithms for nonconvex bodies, multiple collisions, large contact groups, stacking, etc., we use maximal rather than generalized coordinates and take an impulse-based approach that allows us to treat articulation, contact, and collision in a unified manner. Traditional constraint handling methods are subject to drift, and we propose a novel prestabilization method that does not require tunable potentially stiff parameters as does Baumgarte stabilization. This differs from poststabilization in that we compute allowable trajectories before moving the rigid bodies to their new positions, instead of correcting them after the fact when it can be difficult to incorporate the effects of contact and collision. A poststabilization technique is used for momentum and angular momentum. Our approach works with any black box method for specifying valid joint constraints and no special considerations are required for arbitrary closed loops or branching. Moreover, our implementation is linear both in the number of bodies and in the number of auxiliary contact and collision constraints, unlike many other methods that are linear in the number of bodies, but not in the number of auxiliary constraints.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We propose a novel approach for dynamically simulating articulated rigid bodies undergoing frequent and unpredictable contact and collision. In order to leverage existing algorithms for nonconvex bodies, multiple collisions, large contact groups, stacking, etc., we use maximal rather than generalized coordinates and take an impulse-based approach that allows us to treat articulation, contact, and collision in a unified manner. Traditional constraint handling methods are subject to drift, and we propose a novel prestabilization method that does not require tunable potentially stiff parameters as does Baumgarte stabilization. This differs from poststabilization in that we compute allowable trajectories before moving the rigid bodies to their new positions, instead of correcting them after the fact when it can be difficult to incorporate the effects of contact and collision. A poststabilization technique is used for momentum and angular momentum. Our approach works with any black box method for specifying valid joint constraints and no special considerations are required for arbitrary closed loops or branching. Moreover, our implementation is linear both in the number of bodies and in the number of auxiliary contact and collision constraints, unlike many other methods that are linear in the number of bodies, but not in the number of auxiliary constraints.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We propose a novel approach for dynamically simulating articulated rigid bodies undergoing frequent and unpredictable contact and collision. In order to leverage existing algorithms for nonconvex bodies, multiple collisions, large contact groups, stacking, etc., we use maximal rather than generalized coordinates and take an impulse-based approach that allows us to treat articulation, contact, and collision in a unified manner. Traditional constraint handling methods are subject to drift, and we propose a novel prestabilization method that does not require tunable potentially stiff parameters as does Baumgarte stabilization. This differs from poststabilization in that we compute allowable trajectories before moving the rigid bodies to their new positions, instead of correcting them after the fact when it can be difficult to incorporate the effects of contact and collision. A poststabilization technique is used for momentum and angular momentum. Our approach works with any black box method for specifying valid joint constraints and no special considerations are required for arbitrary closed loops or branching. Moreover, our implementation is linear both in the number of bodies and in the number of auxiliary contact and collision constraints, unlike many other methods that are linear in the number of bodies, but not in the number of auxiliary constraints.",
"title": "Dynamic Simulation of Articulated Rigid Bodies with Contact and Collision",
"normalizedTitle": "Dynamic Simulation of Articulated Rigid Bodies with Contact and Collision",
"fno": "v0365",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computer Graphics",
"Stacking",
"Animation",
"Robot Kinematics",
"Robotic Assembly",
"Application Software",
"Computational Modeling",
"Iterative Algorithms",
"Humans",
"Games",
"Kinematics And Dynamics",
"Computer Graphics",
"Physically Based Modeling",
"Animation"
],
"authors": [
{
"givenName": "Rachel",
"surname": "Weinstein",
"fullName": "Rachel Weinstein",
"affiliation": "Dept. of Comput. Sci., Stanford Univ., CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph",
"surname": "Teran",
"fullName": "Joseph Teran",
"affiliation": "Dept. of Comput. Sci., Stanford Univ., CA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ron",
"surname": "Fedkiw",
"fullName": "Ron Fedkiw",
"affiliation": "Dept. of Comput. Sci., Stanford Univ., CA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2006-05-01 00:00:00",
"pubType": "trans",
"pages": "365-374",
"year": "2006",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2006/0224/0/02240191",
"title": "A Six Degree-of-Freedom God-Object Method for Haptic Display of Rigid Bodies",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240191/12OmNqGitZq",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131661",
"title": "Collision avoidance for two SCARA robots",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131661/12OmNyq0zMW",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/2000/0868/0/08680328",
"title": "Collision Detection for Clothed Human Animation",
"doi": null,
"abstractUrl": "/proceedings-article/pg/2000/08680328/12OmNz4SOAO",
"parentPublication": {
"id": "proceedings/pg/2000/0868/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324012",
"title": "Global methods for simulating contacting flexible bodies",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324012/12OmNzahbS3",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1988/0852/0/00012197",
"title": "On the spatial motion of a rigid body with line contact",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1988/00012197/12OmNzmLxL0",
"parentPublication": {
"id": "proceedings/robot/1988/0852/0",
"title": "Proceedings. 1988 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010070",
"title": "Fluid Simulation with Articulated Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010070/13rRUxDqS8f",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010030",
"title": "Fast Collision Detection for Fracturing Rigid Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010030/13rRUxNW1TU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1995/03/mcg1995030063",
"title": "Interactive Simulation of Solid Rigid Bodies",
"doi": null,
"abstractUrl": "/magazine/cg/1995/03/mcg1995030063/13rRUy3gn2z",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2004/02/v0189",
"title": "A Fast Impulsive Contact Suite for Rigid Body Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2004/02/v0189/13rRUygT7mK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060991",
"title": "Interpenetration Free Simulation of Thin Shell Rigid Bodies",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060991/13rRUygT7yb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0353",
"articleId": "13rRUIJuxpo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0375",
"articleId": "13rRUxZ0o1m",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAlvHDC",
"title": "June",
"year": "2013",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7yb",
"doi": "10.1109/TVCG.2012.179",
"abstract": "We propose a new algorithm for rigid body simulation that guarantees each body is in an interpenetration free state, both increasing the accuracy and robustness of the simulation as well as alleviating the need for ad hoc methods to separate bodies for subsequent simulation and rendering. We cleanly separate collision and contact resolution such that objects move and collide in the first step, with resting contact handled in the second step. The first step of our algorithm guarantees that each time step produces geometry that does not intersect or overlap by using an approximation to the continuous collision detection (and response) problem and, thus, is amenable to thin shells and degenerately flat objects moving at high speeds. In addition, we introduce a novel fail-safe that allows us to resolve all interpenetration without iterating to convergence. Since the first step guarantees a noninterfering state for the geometry, in the second step we propose a contact model for handling thin shells in proximity considering only the instantaneous locations at the ends of the time step.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new algorithm for rigid body simulation that guarantees each body is in an interpenetration free state, both increasing the accuracy and robustness of the simulation as well as alleviating the need for ad hoc methods to separate bodies for subsequent simulation and rendering. We cleanly separate collision and contact resolution such that objects move and collide in the first step, with resting contact handled in the second step. The first step of our algorithm guarantees that each time step produces geometry that does not intersect or overlap by using an approximation to the continuous collision detection (and response) problem and, thus, is amenable to thin shells and degenerately flat objects moving at high speeds. In addition, we introduce a novel fail-safe that allows us to resolve all interpenetration without iterating to convergence. Since the first step guarantees a noninterfering state for the geometry, in the second step we propose a contact model for handling thin shells in proximity considering only the instantaneous locations at the ends of the time step.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new algorithm for rigid body simulation that guarantees each body is in an interpenetration free state, both increasing the accuracy and robustness of the simulation as well as alleviating the need for ad hoc methods to separate bodies for subsequent simulation and rendering. We cleanly separate collision and contact resolution such that objects move and collide in the first step, with resting contact handled in the second step. The first step of our algorithm guarantees that each time step produces geometry that does not intersect or overlap by using an approximation to the continuous collision detection (and response) problem and, thus, is amenable to thin shells and degenerately flat objects moving at high speeds. In addition, we introduce a novel fail-safe that allows us to resolve all interpenetration without iterating to convergence. Since the first step guarantees a noninterfering state for the geometry, in the second step we propose a contact model for handling thin shells in proximity considering only the instantaneous locations at the ends of the time step.",
"title": "Interpenetration Free Simulation of Thin Shell Rigid Bodies",
"normalizedTitle": "Interpenetration Free Simulation of Thin Shell Rigid Bodies",
"fno": "ttg2013060991",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Geometry",
"Dynamics",
"Vectors",
"Computational Modeling",
"Robustness",
"Level Set",
"Approximation Methods",
"Thin Shells",
"Computer Graphics",
"Rigid Bodies"
],
"authors": [
{
"givenName": "R. E.",
"surname": "English",
"fullName": "R. E. English",
"affiliation": "Comput. Sci. Dept., Stanford Univ., Stanford, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Lentine",
"fullName": "M. Lentine",
"affiliation": "Lucas Arts, San Francisco, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Fedkiw",
"fullName": "R. Fedkiw",
"affiliation": "Ind. Light + Magic, San Francisco, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2013-06-01 00:00:00",
"pubType": "trans",
"pages": "991-1004",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2006/0224/0/02240191",
"title": "A Six Degree-of-Freedom God-Object Method for Haptic Display of Rigid Bodies",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240191/12OmNqGitZq",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220016",
"title": "Dynamics of rigid bodies undergoing multiple frictional contacts",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220016/12OmNqIhG70",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/2/3507b522",
"title": "Non-rigid Medical Image Registration Based on the Thin-Plate Spline Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507b522/12OmNrnJ6JI",
"parentPublication": {
"id": "proceedings/csie/2009/3507/2",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iros/1995/7108/2/71082305",
"title": "Fast dynamic simulation of rigid and deformable objects",
"doi": null,
"abstractUrl": "/proceedings-article/iros/1995/71082305/12OmNvEhg21",
"parentPublication": {
"id": "proceedings/iros/1995/7108/2",
"title": "Intelligent Robots and Systems, IEEE/RSJ International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2011/4350/0/4350a868",
"title": "Influence of Multiple Cutouts on the Buckling of Large-Scale Thin-Walled Cylindrical Shells of Desulphurizing Tower under Wind Loading",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2011/4350a868/12OmNz4SOAV",
"parentPublication": {
"id": "proceedings/cdciem/2011/4350/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/06/ttg2011060822",
"title": "Orientation-Preserving Rod Elements for Real-Time Thin-Shell Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2011/06/ttg2011060822/13rRUEgarBp",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0370",
"title": "Fracturing Rigid Materials",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0370/13rRUwfZC07",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06200365",
"title": "Robust Interactive Collision Handling between Tools and Thin Volumetric Objects",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06200365/13rRUyp7tWV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013060978",
"articleId": "13rRUwjGoLE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013061005",
"articleId": "13rRUNvgz9M",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesOb",
"name": "ttg2013060991s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013060991s1.mp4",
"extension": "mp4",
"size": "47.5 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTYesOa",
"name": "ttg2013060991s2.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013060991s2.mp4",
"extension": "mp4",
"size": "7.37 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAnuTsb",
"title": "July",
"year": "2016",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUB7a1fV",
"doi": "10.1109/TVCG.2015.2502583",
"abstract": "Occlusion presents a major challenge in visualizing 3D flow and tensor fields using streamlines. Displaying too many streamlines creates a dense visualization filled with occluded structures, but displaying too few streams risks losing important features. We propose a new streamline exploration approach by visually manipulating the cluttered streamlines by pulling visible layers apart and revealing the hidden structures underneath. This paper presents a customized view-dependent deformation algorithm and an interactive visualization tool to minimize visual clutter in 3D vector and tensor fields. The algorithm is able to maintain the overall integrity of the fields and expose previously hidden structures. Our system supports both mouse and direct-touch interactions to manipulate the viewing perspectives and visualize the streamlines in depth. By using a lens metaphor of different shapes to select the transition zone of the targeted area interactively, the users can move their focus and examine the vector or tensor field freely.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occlusion presents a major challenge in visualizing 3D flow and tensor fields using streamlines. Displaying too many streamlines creates a dense visualization filled with occluded structures, but displaying too few streams risks losing important features. We propose a new streamline exploration approach by visually manipulating the cluttered streamlines by pulling visible layers apart and revealing the hidden structures underneath. This paper presents a customized view-dependent deformation algorithm and an interactive visualization tool to minimize visual clutter in 3D vector and tensor fields. The algorithm is able to maintain the overall integrity of the fields and expose previously hidden structures. Our system supports both mouse and direct-touch interactions to manipulate the viewing perspectives and visualize the streamlines in depth. By using a lens metaphor of different shapes to select the transition zone of the targeted area interactively, the users can move their focus and examine the vector or tensor field freely.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occlusion presents a major challenge in visualizing 3D flow and tensor fields using streamlines. Displaying too many streamlines creates a dense visualization filled with occluded structures, but displaying too few streams risks losing important features. We propose a new streamline exploration approach by visually manipulating the cluttered streamlines by pulling visible layers apart and revealing the hidden structures underneath. This paper presents a customized view-dependent deformation algorithm and an interactive visualization tool to minimize visual clutter in 3D vector and tensor fields. The algorithm is able to maintain the overall integrity of the fields and expose previously hidden structures. Our system supports both mouse and direct-touch interactions to manipulate the viewing perspectives and visualize the streamlines in depth. By using a lens metaphor of different shapes to select the transition zone of the targeted area interactively, the users can move their focus and examine the vector or tensor field freely.",
"title": "View-Dependent Streamline Deformation and Exploration",
"normalizedTitle": "View-Dependent Streamline Deformation and Exploration",
"fno": "07332955",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lenses",
"Context",
"Streaming Media",
"Three Dimensional Displays",
"Visualization",
"Shape",
"Deformable Models",
"Occlusion",
"Flow Visualization",
"Streamline",
"White Matter Tracts",
"Focus Context",
"Deformation",
"Occlusion",
"Flow Visualization",
"Streamline",
"White Matter Tracts",
"Focus Context",
"Deformation"
],
"authors": [
{
"givenName": "Xin",
"surname": "Tong",
"fullName": "Xin Tong",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Edwards",
"fullName": "John Edwards",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, Salt Lake City, UT",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chun-Ming",
"surname": "Chen",
"fullName": "Chun-Ming Chen",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Han-Wei",
"surname": "Shen",
"fullName": "Han-Wei Shen",
"affiliation": "Department of Computer Science and Engineering, The Ohio State University, Columbus, OH",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris R.",
"surname": "Johnson",
"fullName": "Chris R. Johnson",
"affiliation": "Scientific Computing and Imaging Institute, University of Utah, Salt Lake City, UT",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pak Chung",
"surname": "Wong",
"fullName": "Pak Chung Wong",
"affiliation": "Pacific Northwest National Laboratory, Richland, WA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2016-07-01 00:00:00",
"pubType": "trans",
"pages": "1788-1801",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pacificvis/2011/935/0/05742376",
"title": "View point evaluation and streamline filtering for flow visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2011/05742376/12OmNqyDjoV",
"parentPublication": {
"id": "proceedings/pacificvis/2011/935/0",
"title": "2011 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2013/4797/0/06596153",
"title": "Exploring vector fields with distribution-based streamline analysis",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2013/06596153/12OmNvAiSjV",
"parentPublication": {
"id": "proceedings/pacificvis/2013/4797/0",
"title": "2013 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2008/1966/0/04475462",
"title": "Illustrative Streamline Placement and Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2008/04475462/12OmNwoPty5",
"parentPublication": {
"id": "proceedings/pacificvis/2008/1966/0",
"title": "IEEE Pacific Visualization Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498zheng",
"title": "Volume Deformation For Tensor Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498zheng/12OmNxA3YXe",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156349",
"title": "Interactive streamline exploration and manipulation using deformation",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156349/12OmNznkJUv",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030407",
"title": "Streamline Embedding for 3D Vector Field Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030407/13rRUwInvsM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061578",
"title": "View-Dependent Streamlines for 3D Vector Fields",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061578/13rRUxASuGd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06025348",
"title": "Hierarchical Streamline Bundles",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06025348/13rRUyY28Yt",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/07/ttg2013071185",
"title": "Parallel Streamline Placement for 2D Flow Fields",
"doi": null,
"abstractUrl": "/journal/tg/2013/07/ttg2013071185/13rRUyfbwqG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09234714",
"title": "WYSIWYG Design of Hypnotic Line Art",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09234714/1o6IGahpX6o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07478595",
"articleId": "13rRUEgarsL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07390081",
"articleId": "13rRUxly8XJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyvY9o5",
"title": "February",
"year": "2011",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPTN",
"doi": "10.1109/TVCG.2010.34",
"abstract": "The growing sizes of volumetric data sets pose a great challenge for interactive visualization. In this paper, we present a feature-preserving data reduction and focus+context visualization method based on transfer function driven, continuous voxel repositioning and resampling techniques. Rendering reduced data can enhance interactivity. Focus+context visualization can show details of selected features in context on display devices with limited resolution. Our method utilizes the input transfer function to assign importance values to regularly partitioned regions of the volume data. According to user interaction, it can then magnify regions corresponding to the features of interest while compressing the rest by deforming the 3D mesh. The level of data reduction achieved is significant enough to improve overall efficiency. By using continuous deformation, our method avoids the need to smooth the transition between low and high-resolution regions as often required by multiresolution methods. Furthermore, it is particularly attractive for focus+context visualization of multiple features. We demonstrate the effectiveness and efficiency of our method with several volume data sets from medical applications and scientific simulations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The growing sizes of volumetric data sets pose a great challenge for interactive visualization. In this paper, we present a feature-preserving data reduction and focus+context visualization method based on transfer function driven, continuous voxel repositioning and resampling techniques. Rendering reduced data can enhance interactivity. Focus+context visualization can show details of selected features in context on display devices with limited resolution. Our method utilizes the input transfer function to assign importance values to regularly partitioned regions of the volume data. According to user interaction, it can then magnify regions corresponding to the features of interest while compressing the rest by deforming the 3D mesh. The level of data reduction achieved is significant enough to improve overall efficiency. By using continuous deformation, our method avoids the need to smooth the transition between low and high-resolution regions as often required by multiresolution methods. Furthermore, it is particularly attractive for focus+context visualization of multiple features. We demonstrate the effectiveness and efficiency of our method with several volume data sets from medical applications and scientific simulations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The growing sizes of volumetric data sets pose a great challenge for interactive visualization. In this paper, we present a feature-preserving data reduction and focus+context visualization method based on transfer function driven, continuous voxel repositioning and resampling techniques. Rendering reduced data can enhance interactivity. Focus+context visualization can show details of selected features in context on display devices with limited resolution. Our method utilizes the input transfer function to assign importance values to regularly partitioned regions of the volume data. According to user interaction, it can then magnify regions corresponding to the features of interest while compressing the rest by deforming the 3D mesh. The level of data reduction achieved is significant enough to improve overall efficiency. By using continuous deformation, our method avoids the need to smooth the transition between low and high-resolution regions as often required by multiresolution methods. Furthermore, it is particularly attractive for focus+context visualization of multiple features. We demonstrate the effectiveness and efficiency of our method with several volume data sets from medical applications and scientific simulations.",
"title": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization",
"normalizedTitle": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization",
"fno": "ttg2011020171",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Reduction",
"Focus Context Visualization",
"Interactive Visualization",
"Mesh Deformation",
"Transfer Functions",
"Volume Rendering"
],
"authors": [
{
"givenName": "Yu-Shuen",
"surname": "Wang",
"fullName": "Yu-Shuen Wang",
"affiliation": "National Cheng Kung University, Tainan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chaoli",
"surname": "Wang",
"fullName": "Chaoli Wang",
"affiliation": "Michigan Technological University, Houghton",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tong-Yee",
"surname": "Lee",
"fullName": "Tong-Yee Lee",
"affiliation": "National Cheng Kung University, Tainan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "University of California, Davis, Davis",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2011-02-01 00:00:00",
"pubType": "trans",
"pages": "171-181",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icis/2005/2296/0/22960573",
"title": "Visualizing Hierarchical Information Using a New Focus+Context Method",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2005/22960573/12OmNAJVcDe",
"parentPublication": {
"id": "proceedings/icis/2005/2296/0",
"title": "Proceedings. Fourth Annual ACIS International Conference on Computer and Information Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2000/0804/0/08040085",
"title": "Redefining the Focus and Context of Focus+Context Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2000/08040085/12OmNB0nW9E",
"parentPublication": {
"id": "proceedings/ieee-infovis/2000/0804/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2004/8788/0/87880385",
"title": "The VesselGlyph: Focus & Context Visualization in CT-Angiography",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880385/12OmNqFJhQU",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a389",
"title": "2.5D Focus+Context Map Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a389/12OmNyKrHen",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660047",
"title": "The Magic Volume Lens: An Interactive Focus+Context Technique for Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660047/12OmNzmLxM5",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010042",
"title": "A Deformation Framework for Focus+Context Flow Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010042/13rRUwjGoG2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v0893",
"title": "Outlier-Preserving Focus+Context Visualization in Parallel Coordinates",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v0893/13rRUx0xPmS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/11/ttg2012111928",
"title": "Conformal Magnifier: A Focus+Context Technique with Local Shape Preservation",
"doi": null,
"abstractUrl": "/journal/tg/2012/11/ttg2012111928/13rRUxNmPDQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2012/05/mcs2012050063",
"title": "Distance-Based Focus + Context Models for Exploring Large Volumetric Medical Datasets",
"doi": null,
"abstractUrl": "/magazine/cs/2012/05/mcs2012050063/13rRUytF44W",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011020159",
"articleId": "13rRUwcAqqb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011020182",
"articleId": "13rRUyfKIHF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgOf",
"name": "ttg2011020171s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2011020171s1.mp4",
"extension": "mp4",
"size": "42.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNrFBPWq",
"title": "September-October",
"year": "2006",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "12",
"label": "September-October",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPmS",
"doi": "10.1109/TVCG.2006.170",
"abstract": "Focus+context visualization integrates a visually accentuated representation of selected data items in focus (more details, more opacity, etc.) with a visually deemphasized representation of the rest of the data, i.e., the context. The role of context visualization is to provide an overview of the data for improved user orientation and improved navigation. A good overview comprises the representation of both outliers and trends. Up to now, however, context visualization not really treated outliers sufficiently. In this paper we present a new approach to focus+context visualization in parallel coordinates which is truthful to outliers in the sense that small-scale features are detected before visualization and then treated specially during context visualization. Generally, we present a solution which enables context visualization at several levels of abstraction, both for the representation of outliers and trends. We introduce outlier detection and context generation to parallel coordinates on the basis of a binned data representation. This leads to an output-oriented visualization approach which means that only those parts of the visualization process are executed which actually affect the final rendering. Accordingly, the performance of this solution is much more dependent on the visualization size than on the data size which makes it especially interesting for large datasets. Previous approaches are outperformed, the new solution was successfully applied to datasets with up to 3 million data records and up to 50 dimensions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Focus+context visualization integrates a visually accentuated representation of selected data items in focus (more details, more opacity, etc.) with a visually deemphasized representation of the rest of the data, i.e., the context. The role of context visualization is to provide an overview of the data for improved user orientation and improved navigation. A good overview comprises the representation of both outliers and trends. Up to now, however, context visualization not really treated outliers sufficiently. In this paper we present a new approach to focus+context visualization in parallel coordinates which is truthful to outliers in the sense that small-scale features are detected before visualization and then treated specially during context visualization. Generally, we present a solution which enables context visualization at several levels of abstraction, both for the representation of outliers and trends. We introduce outlier detection and context generation to parallel coordinates on the basis of a binned data representation. This leads to an output-oriented visualization approach which means that only those parts of the visualization process are executed which actually affect the final rendering. Accordingly, the performance of this solution is much more dependent on the visualization size than on the data size which makes it especially interesting for large datasets. Previous approaches are outperformed, the new solution was successfully applied to datasets with up to 3 million data records and up to 50 dimensions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Focus+context visualization integrates a visually accentuated representation of selected data items in focus (more details, more opacity, etc.) with a visually deemphasized representation of the rest of the data, i.e., the context. The role of context visualization is to provide an overview of the data for improved user orientation and improved navigation. A good overview comprises the representation of both outliers and trends. Up to now, however, context visualization not really treated outliers sufficiently. In this paper we present a new approach to focus+context visualization in parallel coordinates which is truthful to outliers in the sense that small-scale features are detected before visualization and then treated specially during context visualization. Generally, we present a solution which enables context visualization at several levels of abstraction, both for the representation of outliers and trends. We introduce outlier detection and context generation to parallel coordinates on the basis of a binned data representation. This leads to an output-oriented visualization approach which means that only those parts of the visualization process are executed which actually affect the final rendering. Accordingly, the performance of this solution is much more dependent on the visualization size than on the data size which makes it especially interesting for large datasets. Previous approaches are outperformed, the new solution was successfully applied to datasets with up to 3 million data records and up to 50 dimensions.",
"title": "Outlier-Preserving Focus+Context Visualization in Parallel Coordinates",
"normalizedTitle": "Outlier-Preserving Focus+Context Visualization in Parallel Coordinates",
"fno": "v0893",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Structures",
"Data Visualisation",
"Feature Extraction",
"Rendering Computer Graphics",
"Outlier Detection",
"Context Visualization",
"Parallel Coordinate",
"Focus Visualization",
"Data Representation",
"Small Scale Feature Detection",
"Data Abstraction",
"Output Oriented Visualization Approach",
"Rendering Technique",
"Data Visualization",
"Humans",
"Navigation",
"Computer Vision",
"Visual System",
"Information Processing",
"Concurrent Computing",
"Jamming",
"Multidimensional Systems",
"Costs",
"Parallel Coordinates",
"Focus Context Visualization",
"Outliers Trends",
"Large Data Visualization"
],
"authors": [
{
"givenName": "Matej",
"surname": "Novotny",
"fullName": "Matej Novotny",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Helwig",
"surname": "Hauser",
"fullName": "Helwig Hauser",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2006-09-01 00:00:00",
"pubType": "trans",
"pages": "893-900",
"year": "2006",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2012/4525/0/4525b835",
"title": "A Focus + Context Technique for Visualizing a Document Collection",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2012/4525b835/12OmNA14A63",
"parentPublication": {
"id": "proceedings/hicss/2012/4525/0",
"title": "2012 45th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2000/0804/0/08040085",
"title": "Redefining the Focus and Context of Focus+Context Visualizations",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2000/08040085/12OmNB0nW9E",
"parentPublication": {
"id": "proceedings/ieee-infovis/2000/0804/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2005/2392/0/23920162",
"title": "DualView: A Focus+Context Technique for Navigating Large Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2005/23920162/12OmNBBQZqd",
"parentPublication": {
"id": "proceedings/cgiv/2005/2392/0",
"title": "International Conference on Computer Graphics, Imaging and Visualization (CGIV'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/1999/0431/0/04310053",
"title": "A Framework for Focus+Context Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/1999/04310053/12OmNBhpS0L",
"parentPublication": {
"id": "proceedings/ieee-infovis/1999/0431/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2002/7498/0/7498jayarama",
"title": "A Radial Focus+Context Visualization for Multi-Dimensional Functions",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2002/7498jayarama/12OmNCw3z94",
"parentPublication": {
"id": "proceedings/ieee-vis/2002/7498/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a389",
"title": "2.5D Focus+Context Map Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a389/12OmNyKrHen",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020171",
"title": "Feature-Preserving Volume Data Reduction and Focus+Context Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020171/13rRUx0xPTN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122249",
"title": "Context-Preserving Visual Links",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122249/13rRUxYrbUD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v0885",
"articleId": "13rRUyuegoZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0901",
"articleId": "13rRUyoPSOX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyq0zFB",
"title": "May",
"year": "2016",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyft7D5",
"doi": "10.1109/TVCG.2015.2443804",
"abstract": "Occlusions are a severe bottleneck for the visualization of large and complex datasets. Conventional images only show dataset elements to which there is a direct line of sight, which significantly limits the information bandwidth of the visualization. Multiperspective visualization is a powerful approach for alleviating occlusions to show more than what is visible from a single viewpoint. However, constructing and rendering multiperspective visualizations is challenging. We present a framework for designing multiperspective focus+context visualizations with great flexibility by manipulating the underlying camera model. The focus region viewpoint is adapted to alleviate occlusions. The framework supports multiperspective visualization in three scenarios. In a first scenario, the viewpoint is altered independently for individual image regions to avoid occlusions. In a second scenario, conventional input images are connected into a multiperspective image. In a third scenario, one or several data subsets of interest (i.e., targets) are visualized where they would be seen in the absence of occluders, as the user navigates or the targets move. The multiperspective images are rendered at interactive rates, leveraging the camera model's fast projection operation. We demonstrate the framework on terrain, urban, and molecular biology geometric datasets, as well as on volume rendered density datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Occlusions are a severe bottleneck for the visualization of large and complex datasets. Conventional images only show dataset elements to which there is a direct line of sight, which significantly limits the information bandwidth of the visualization. Multiperspective visualization is a powerful approach for alleviating occlusions to show more than what is visible from a single viewpoint. However, constructing and rendering multiperspective visualizations is challenging. We present a framework for designing multiperspective focus+context visualizations with great flexibility by manipulating the underlying camera model. The focus region viewpoint is adapted to alleviate occlusions. The framework supports multiperspective visualization in three scenarios. In a first scenario, the viewpoint is altered independently for individual image regions to avoid occlusions. In a second scenario, conventional input images are connected into a multiperspective image. In a third scenario, one or several data subsets of interest (i.e., targets) are visualized where they would be seen in the absence of occluders, as the user navigates or the targets move. The multiperspective images are rendered at interactive rates, leveraging the camera model's fast projection operation. We demonstrate the framework on terrain, urban, and molecular biology geometric datasets, as well as on volume rendered density datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Occlusions are a severe bottleneck for the visualization of large and complex datasets. Conventional images only show dataset elements to which there is a direct line of sight, which significantly limits the information bandwidth of the visualization. Multiperspective visualization is a powerful approach for alleviating occlusions to show more than what is visible from a single viewpoint. However, constructing and rendering multiperspective visualizations is challenging. We present a framework for designing multiperspective focus+context visualizations with great flexibility by manipulating the underlying camera model. The focus region viewpoint is adapted to alleviate occlusions. The framework supports multiperspective visualization in three scenarios. In a first scenario, the viewpoint is altered independently for individual image regions to avoid occlusions. In a second scenario, conventional input images are connected into a multiperspective image. In a third scenario, one or several data subsets of interest (i.e., targets) are visualized where they would be seen in the absence of occluders, as the user navigates or the targets move. The multiperspective images are rendered at interactive rates, leveraging the camera model's fast projection operation. We demonstrate the framework on terrain, urban, and molecular biology geometric datasets, as well as on volume rendered density datasets.",
"title": "Multiperspective Focus+Context Visualization",
"normalizedTitle": "Multiperspective Focus+Context Visualization",
"fno": "07120994",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Data Visualization",
"Context",
"Image Segmentation",
"Rendering Computer Graphics",
"Solid Modeling",
"Three Dimensional Displays",
"Focus Context",
"Occlusion Management",
"Camera Models",
"Multiperspective Visualization",
"Interactive Visualization",
"Focus Context",
"Occlusion Management",
"Camera Models",
"Multiperspective Visualization",
"Interactive Visualization"
],
"authors": [
{
"givenName": "Meng-Lin",
"surname": "Wu",
"fullName": "Meng-Lin Wu",
"affiliation": ", Department of Computer Science, Purdue University, West Lafayette, IN",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Voicu",
"surname": "Popescu",
"fullName": "Voicu Popescu",
"affiliation": ", Department of Computer Science, Purdue University, West Lafayette, IN",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2016-05-01 00:00:00",
"pubType": "trans",
"pages": "1555-1567",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2004/8788/0/87880385",
"title": "The VesselGlyph: Focus & Context Visualization in CT-Angiography",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880385/12OmNqFJhQU",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2008/3382/1/3382a368",
"title": "Intelligent Focus+Context Volume Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2008/3382a368/12OmNy5R3sk",
"parentPublication": {
"id": "proceedings/isda/2008/3382/1",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122183",
"title": "The FLOWLENS: A Focus-and-Context Visualization Approach for Exploration of Blood Flow in Cerebral Aneurysms",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122183/13rRUx0xPIB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061235",
"title": "A Curved Ray Camera for Handling Occlusions through Continuous Multiperspective Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061235/13rRUxBa5bR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876031",
"title": "Predicate-Based Focus-and-Context Visualization for 3D Ultrasound",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876031/13rRUynHujd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08123949",
"title": "Efficient VR and AR Navigation Through Multiperspective Occlusion Management",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08123949/14H4WNoi7Yc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2011/938/0/05766878",
"title": "Distributed context-aware visualization",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2011/05766878/17D45XoXP49",
"parentPublication": {
"id": "proceedings/percomw/2011/938/0",
"title": "2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2011). PerCom-Workshops 2011: 2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09332290",
"title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2021/2354/0/235400a025",
"title": "A New Focus+Context Visualization Technique for Inspecting Black Oil Reservoir Models",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2021/235400a025/1zurn3PI1dC",
"parentPublication": {
"id": "proceedings/sibgrapi/2021/2354/0",
"title": "2021 34th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07127036",
"articleId": "13rRUILc8fe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07127016",
"articleId": "13rRUxBa5c3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRJr",
"name": "ttg201605-07120994s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201605-07120994s1.zip",
"extension": "zip",
"size": "95.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCm7Bxu",
"title": "July",
"year": "2011",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgarBq",
"doi": "10.1109/TVCG.2010.119",
"abstract": "Euler diagrams are effective tools for visualizing set intersections. They have a large number of application areas ranging from statistical data analysis to software engineering. However, the automated generation of Euler diagrams has never been easy: given an abstract description of a required Euler diagram, it is computationally expensive to generate the diagram. Moreover, the generated diagrams represent sets by polygons, sometimes with quite irregular shapes that make the diagrams less comprehensible. In this paper, we address these two issues by developing the theory of piercings, where we define single piercing curves and double piercing curves. We prove that if a diagram can be built inductively by successively adding piercing curves under certain constraints, then it can be drawn with circles, which are more esthetically pleasing than arbitrary polygons. The theory of piercings is developed at the abstract level. In addition, we present a Java implementation that, given an inductively pierced abstract description, generates an Euler diagram consisting only of circles within polynomial time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Euler diagrams are effective tools for visualizing set intersections. They have a large number of application areas ranging from statistical data analysis to software engineering. However, the automated generation of Euler diagrams has never been easy: given an abstract description of a required Euler diagram, it is computationally expensive to generate the diagram. Moreover, the generated diagrams represent sets by polygons, sometimes with quite irregular shapes that make the diagrams less comprehensible. In this paper, we address these two issues by developing the theory of piercings, where we define single piercing curves and double piercing curves. We prove that if a diagram can be built inductively by successively adding piercing curves under certain constraints, then it can be drawn with circles, which are more esthetically pleasing than arbitrary polygons. The theory of piercings is developed at the abstract level. In addition, we present a Java implementation that, given an inductively pierced abstract description, generates an Euler diagram consisting only of circles within polynomial time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Euler diagrams are effective tools for visualizing set intersections. They have a large number of application areas ranging from statistical data analysis to software engineering. However, the automated generation of Euler diagrams has never been easy: given an abstract description of a required Euler diagram, it is computationally expensive to generate the diagram. Moreover, the generated diagrams represent sets by polygons, sometimes with quite irregular shapes that make the diagrams less comprehensible. In this paper, we address these two issues by developing the theory of piercings, where we define single piercing curves and double piercing curves. We prove that if a diagram can be built inductively by successively adding piercing curves under certain constraints, then it can be drawn with circles, which are more esthetically pleasing than arbitrary polygons. The theory of piercings is developed at the abstract level. In addition, we present a Java implementation that, given an inductively pierced abstract description, generates an Euler diagram consisting only of circles within polynomial time.",
"title": "Drawing Euler Diagrams with Circles: The Theory of Piercings",
"normalizedTitle": "Drawing Euler Diagrams with Circles: The Theory of Piercings",
"fno": "ttg2011071020",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Automated Diagram Drawing",
"Euler Diagrams",
"Diagrammatic Reasoning",
"Information Visualization"
],
"authors": [
{
"givenName": "Gem",
"surname": "Stapleton",
"fullName": "Gem Stapleton",
"affiliation": "University of Brighton, Brighton",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leishi",
"surname": "Zhang",
"fullName": "Leishi Zhang",
"affiliation": "University of Kent, Kent",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John",
"surname": "Howse",
"fullName": "John Howse",
"affiliation": "University of Brighton, Brighton",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Rodgers",
"fullName": "Peter Rodgers",
"affiliation": "University of Kent, Kent",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2011-07-01 00:00:00",
"pubType": "trans",
"pages": "1020-1032",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vl/2000/0840/0/08400119",
"title": "Projections in Venn-Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/vl/2000/08400119/12OmNB8Cj3l",
"parentPublication": {
"id": "proceedings/vl/2000/0840/0",
"title": "Visual Languages, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a594",
"title": "Visualise Undrawable Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2003/1988/0/19880272",
"title": "Layout Metrics for Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2003/19880272/12OmNvD8RBs",
"parentPublication": {
"id": "proceedings/iv/2003/1988/0",
"title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070401",
"title": "Drawing Euler diagrams with circles and ellipses",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070401/12OmNvpew49",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2011/1246/0/06070382",
"title": "SketchSet: Creating Euler diagrams using pen or mouse",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2011/06070382/12OmNx965CA",
"parentPublication": {
"id": "proceedings/vlhcc/2011/1246/0",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/29000771",
"title": "Evaluating the Comprehension of Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000771/12OmNxjjEhx",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a585",
"title": "Embedding Wellformed Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/05999665",
"title": "Wellformedness Properties in Euler Diagrams: Which Should Be Used?",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/05999665/13rRUILLkvo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061090",
"title": "Untangling Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061090/13rRUILtJm3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010088",
"title": "Inductively Generating Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010088/13rRUNvgziB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011071007",
"articleId": "13rRUIM2VBC",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzFdtc6",
"title": "November/December",
"year": "2010",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "November/December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILtJm3",
"doi": "10.1109/TVCG.2010.210",
"abstract": "In many common data analysis scenarios the data elements are logically grouped into sets. Venn and Euler style diagrams are a common visual representation of such set membership where the data elements are represented by labels or glyphs and sets are indicated by boundaries surrounding their members. Generating such diagrams automatically such that set regions do not intersect unless the corresponding sets have a non-empty intersection is a difficult problem. Further, it may be impossible in some cases if regions are required to be continuous and convex. Several approaches exist to draw such set regions using more complex shapes, however, the resulting diagrams can be difficult to interpret. In this paper we present two novel approaches for simplifying a complex collection of intersecting sets into a strict hierarchy that can be more easily automatically arranged and drawn (Figure 1). In the first approach, we use compact rectangular shapes for drawing each set, attempting to improve the readability of the set intersections. In the second approach, we avoid drawing intersecting set regions by duplicating elements belonging to multiple sets. We compared both of our techniques to the traditional non-convex region technique using five readability tasks. Our results show that the compact rectangular shapes technique was often preferred by experimental subjects even though the use of duplications dramatically improves the accuracy and performance time for most of our tasks. In addition to general set representation our techniques are also applicable to visualization of networks with intersecting clusters of nodes",
"abstracts": [
{
"abstractType": "Regular",
"content": "In many common data analysis scenarios the data elements are logically grouped into sets. Venn and Euler style diagrams are a common visual representation of such set membership where the data elements are represented by labels or glyphs and sets are indicated by boundaries surrounding their members. Generating such diagrams automatically such that set regions do not intersect unless the corresponding sets have a non-empty intersection is a difficult problem. Further, it may be impossible in some cases if regions are required to be continuous and convex. Several approaches exist to draw such set regions using more complex shapes, however, the resulting diagrams can be difficult to interpret. In this paper we present two novel approaches for simplifying a complex collection of intersecting sets into a strict hierarchy that can be more easily automatically arranged and drawn (Figure 1). In the first approach, we use compact rectangular shapes for drawing each set, attempting to improve the readability of the set intersections. In the second approach, we avoid drawing intersecting set regions by duplicating elements belonging to multiple sets. We compared both of our techniques to the traditional non-convex region technique using five readability tasks. Our results show that the compact rectangular shapes technique was often preferred by experimental subjects even though the use of duplications dramatically improves the accuracy and performance time for most of our tasks. In addition to general set representation our techniques are also applicable to visualization of networks with intersecting clusters of nodes",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In many common data analysis scenarios the data elements are logically grouped into sets. Venn and Euler style diagrams are a common visual representation of such set membership where the data elements are represented by labels or glyphs and sets are indicated by boundaries surrounding their members. Generating such diagrams automatically such that set regions do not intersect unless the corresponding sets have a non-empty intersection is a difficult problem. Further, it may be impossible in some cases if regions are required to be continuous and convex. Several approaches exist to draw such set regions using more complex shapes, however, the resulting diagrams can be difficult to interpret. In this paper we present two novel approaches for simplifying a complex collection of intersecting sets into a strict hierarchy that can be more easily automatically arranged and drawn (Figure 1). In the first approach, we use compact rectangular shapes for drawing each set, attempting to improve the readability of the set intersections. In the second approach, we avoid drawing intersecting set regions by duplicating elements belonging to multiple sets. We compared both of our techniques to the traditional non-convex region technique using five readability tasks. Our results show that the compact rectangular shapes technique was often preferred by experimental subjects even though the use of duplications dramatically improves the accuracy and performance time for most of our tasks. In addition to general set representation our techniques are also applicable to visualization of networks with intersecting clusters of nodes",
"title": "Untangling Euler Diagrams",
"normalizedTitle": "Untangling Euler Diagrams",
"fno": "ttg2010061090",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Information Visualization",
"Euler Diagrams",
"Set Visualization",
"Graph Visualization"
],
"authors": [
{
"givenName": "Nathalie Henry",
"surname": "Riche",
"fullName": "Nathalie Henry Riche",
"affiliation": "Microsoft Research",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Dwyer",
"fullName": "Tim Dwyer",
"affiliation": "Microsoft Corporation Microsoft Corporation",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2010-11-01 00:00:00",
"pubType": "trans",
"pages": "1090-1099",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2008/3268/0/3268a594",
"title": "Visualise Undrawable Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a594/12OmNBOllkb",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2009/3733/0/3733a673",
"title": "An Heuristic for the Construction of Intersection Graphs",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a673/12OmNrY3LCy",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2003/1988/0/19880272",
"title": "Layout Metrics for Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2003/19880272/12OmNvD8RBs",
"parentPublication": {
"id": "proceedings/iv/2003/1988/0",
"title": "Proceedings on Seventh International Conference on Information Visualization, 2003. IV 2003.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/29000771",
"title": "Evaluating the Comprehension of Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000771/12OmNxjjEhx",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2008/3268/0/3268a585",
"title": "Embedding Wellformed Euler Diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2008/3268a585/12OmNyuya3M",
"parentPublication": {
"id": "proceedings/iv/2008/3268/0",
"title": "2008 12th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgciot/2015/7910/0/07380712",
"title": "Spherule diagrams: A matrix-based set visualization compared with Euler diagrams",
"doi": null,
"abstractUrl": "/proceedings-article/icgciot/2015/07380712/12OmNyvGyfY",
"parentPublication": {
"id": "proceedings/icgciot/2015/7910/0",
"title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/07/ttg2011071020",
"title": "Drawing Euler Diagrams with Circles: The Theory of Piercings",
"doi": null,
"abstractUrl": "/journal/tg/2011/07/ttg2011071020/13rRUEgarBq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/07/05999665",
"title": "Wellformedness Properties in Euler Diagrams: Which Should Be Used?",
"doi": null,
"abstractUrl": "/journal/tg/2012/07/05999665/13rRUILLkvo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010088",
"title": "Inductively Generating Euler Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010088/13rRUNvgziB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/06/10081433",
"title": "LinSets.zip: Compressing Linear Set Diagrams",
"doi": null,
"abstractUrl": "/journal/tg/2023/06/10081433/1LRbR78bpDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010061082",
"articleId": "13rRUx0xPmX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2010061100",
"articleId": "13rRUxD9gXB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet0g",
"name": "ttg2010061090s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2010061090s1.mp4",
"extension": "mp4",
"size": "3.35 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyv7moM",
"title": "Jan.",
"year": "2014",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvB6",
"doi": "10.1109/TVCG.2013.90",
"abstract": "We propose an approach for verification of volume rendering correctness based on an analysis of the volume rendering integral, the basis of most DVR algorithms. With respect to the most common discretization of this continuous model (Riemann summation), we make assumptions about the impact of parameter changes on the rendered results and derive convergence curves describing the expected behavior. Specifically, we progressively refine the number of samples along the ray, the grid size, and the pixel size, and evaluate how the errors observed during refinement compare against the expected approximation errors. We derive the theoretical foundations of our verification approach, explain how to realize it in practice, and discuss its limitations. We also report the errors identified by our approach when applied to two publicly available volume rendering packages.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an approach for verification of volume rendering correctness based on an analysis of the volume rendering integral, the basis of most DVR algorithms. With respect to the most common discretization of this continuous model (Riemann summation), we make assumptions about the impact of parameter changes on the rendered results and derive convergence curves describing the expected behavior. Specifically, we progressively refine the number of samples along the ray, the grid size, and the pixel size, and evaluate how the errors observed during refinement compare against the expected approximation errors. We derive the theoretical foundations of our verification approach, explain how to realize it in practice, and discuss its limitations. We also report the errors identified by our approach when applied to two publicly available volume rendering packages.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an approach for verification of volume rendering correctness based on an analysis of the volume rendering integral, the basis of most DVR algorithms. With respect to the most common discretization of this continuous model (Riemann summation), we make assumptions about the impact of parameter changes on the rendered results and derive convergence curves describing the expected behavior. Specifically, we progressively refine the number of samples along the ray, the grid size, and the pixel size, and evaluate how the errors observed during refinement compare against the expected approximation errors. We derive the theoretical foundations of our verification approach, explain how to realize it in practice, and discuss its limitations. We also report the errors identified by our approach when applied to two publicly available volume rendering packages.",
"title": "Verifying Volume Rendering Using Discretization Error Analysis",
"normalizedTitle": "Verifying Volume Rendering Using Discretization Error Analysis",
"fno": "ttg2014010140",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Volume Measurements",
"Error Analysis",
"Testing",
"Discretization Errors",
"Volume Rendering",
"Verifiable Visualization",
"Verification"
],
"authors": [
{
"givenName": "Tiago",
"surname": "Etiene",
"fullName": "Tiago Etiene",
"affiliation": "Sch. of Comput., Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Jonsson",
"fullName": "Daniel Jonsson",
"affiliation": "Campus Norrkoping, Linkopings Univ., Norrkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timo",
"surname": "Ropinski",
"fullName": "Timo Ropinski",
"affiliation": "Campus Norrkoping, Linkopings Univ., Norrkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Carlos",
"surname": "Scheidegger",
"fullName": "Carlos Scheidegger",
"affiliation": "AT&T Labs.-Res., Florham Park, NJ, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joao L. D.",
"surname": "Comba",
"fullName": "Joao L. D. Comba",
"affiliation": "Univ. Fed. do Rio Grande do Sul, Porto Alegre, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luis Gustavo",
"surname": "Nonato",
"fullName": "Luis Gustavo Nonato",
"affiliation": "Depto Mat. Aplic. e Estatistica-ICMC/USP, Univ. de Sao Paulo, Sao Carlos, Brazil",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert M.",
"surname": "Kirby",
"fullName": "Robert M. Kirby",
"affiliation": "Sch. of Comput., Univ. of Utah, Salt Lake City, UT, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anders",
"surname": "Ynnerman",
"fullName": "Anders Ynnerman",
"affiliation": "Campus Norrkoping, Linkopings Univ., Norrkoping, Sweden",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claudio T.",
"surname": "Silva",
"fullName": "Claudio T. Silva",
"affiliation": "Center for Urban Sci. & Progress, New York Univ., Brooklyn, NY, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2014-01-01 00:00:00",
"pubType": "trans",
"pages": "140-154",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2003/2030/0/20300038",
"title": "Acceleration Techniques for GPU-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2003/20300038/12OmNC2xhD8",
"parentPublication": {
"id": "proceedings/ieee-vis/2003/2030/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/esiat/2009/3682/2/3682b575",
"title": "Rapid Texture-based Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/esiat/2009/3682b575/12OmNx7G5VW",
"parentPublication": {
"id": "esiat/2009/3682/2",
"title": "Environmental Science and Information Application Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2015/7962/0/7962a017",
"title": "Accurate Volume Rendering Based on Adaptive Numerical Integration",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2015/7962a017/12OmNxaNGjy",
"parentPublication": {
"id": "proceedings/sibgrapi/2015/7962/0",
"title": "2015 28th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/27660038",
"title": "Scale-Invariant Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/27660038/12OmNxb5hu0",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2000/6478/0/64780039",
"title": "Two-Level Volume Rendering-Fusing MIP and DVR",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780039/12OmNxzMnWP",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870011",
"title": "Interactive Maximum Projection Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870011/12OmNzZmZv2",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122364",
"title": "Historygrams: Enabling Interactive Global Illumination in Direct Volume Rendering using Photon Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122364/13rRUyYjK5h",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/v1600",
"title": "Transform Coding for Hardware-accelerated Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/v1600/13rRUyeTVhV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122335",
"title": "Fuzzy Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122335/13rRUyeTVi0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2007/01/04020508",
"title": "Hypergraph-Partitioning-Based Remapping Models for Image-Space-Parallel Direct Volume Rendering of Unstructured Grids",
"doi": null,
"abstractUrl": "/journal/td/2007/01/04020508/13rRUygT7eL",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2014010125",
"articleId": "13rRUxNEqPT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2014010155",
"articleId": "13rRUyYSWkZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgDB",
"name": "ttg2014010140s2.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010140s2.pdf",
"extension": "pdf",
"size": "313 kB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgDA",
"name": "ttg2014010140s1.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2014010140s1.pdf",
"extension": "pdf",
"size": "49.5 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwJPMX5",
"title": "Dec.",
"year": "2011",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwkxc5o",
"doi": "10.1109/TVCG.2011.198",
"abstract": "Direct volume rendering has become a popular method for visualizing volumetric datasets. Even though computers are continually getting faster, it remains a challenge to incorporate sophisticated illumination models into direct volume rendering while maintaining interactive frame rates. In this paper, we present a novel approach for advanced illumination in direct volume rendering based on GPU ray-casting. Our approach features directional soft shadows taking scattering into account, ambient occlusion and color bleeding effects while achieving very competitive frame rates. In particular, multiple dynamic lights and interactive transfer function changes are fully supported. Commonly, direct volume rendering is based on a very simplified discrete version of the original volume rendering integral, including the development of the original exponential extinction into a-blending. In contrast to a-blending forming a product when sampling along a ray, the original exponential extinction coefficient is an integral and its discretization a Riemann sum. The fact that it is a sum can cleverly be exploited to implement volume lighting effects, i.e. soft directional shadows, ambient occlusion and color bleeding. We will show how this can be achieved and how it can be implemented on the GPU.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Direct volume rendering has become a popular method for visualizing volumetric datasets. Even though computers are continually getting faster, it remains a challenge to incorporate sophisticated illumination models into direct volume rendering while maintaining interactive frame rates. In this paper, we present a novel approach for advanced illumination in direct volume rendering based on GPU ray-casting. Our approach features directional soft shadows taking scattering into account, ambient occlusion and color bleeding effects while achieving very competitive frame rates. In particular, multiple dynamic lights and interactive transfer function changes are fully supported. Commonly, direct volume rendering is based on a very simplified discrete version of the original volume rendering integral, including the development of the original exponential extinction into a-blending. In contrast to a-blending forming a product when sampling along a ray, the original exponential extinction coefficient is an integral and its discretization a Riemann sum. The fact that it is a sum can cleverly be exploited to implement volume lighting effects, i.e. soft directional shadows, ambient occlusion and color bleeding. We will show how this can be achieved and how it can be implemented on the GPU.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Direct volume rendering has become a popular method for visualizing volumetric datasets. Even though computers are continually getting faster, it remains a challenge to incorporate sophisticated illumination models into direct volume rendering while maintaining interactive frame rates. In this paper, we present a novel approach for advanced illumination in direct volume rendering based on GPU ray-casting. Our approach features directional soft shadows taking scattering into account, ambient occlusion and color bleeding effects while achieving very competitive frame rates. In particular, multiple dynamic lights and interactive transfer function changes are fully supported. Commonly, direct volume rendering is based on a very simplified discrete version of the original volume rendering integral, including the development of the original exponential extinction into a-blending. In contrast to a-blending forming a product when sampling along a ray, the original exponential extinction coefficient is an integral and its discretization a Riemann sum. The fact that it is a sum can cleverly be exploited to implement volume lighting effects, i.e. soft directional shadows, ambient occlusion and color bleeding. We will show how this can be achieved and how it can be implemented on the GPU.",
"title": "Extinction-Based Shading and Illumination in GPU Volume Ray-Casting",
"normalizedTitle": "Extinction-Based Shading and Illumination in GPU Volume Ray-Casting",
"fno": "ttg2011121795",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Rendering",
"Shadows",
"Ambient Occlusion",
"GPU Ray Casting",
"Exponential Extinction"
],
"authors": [
{
"givenName": "Philipp",
"surname": "Schlegel",
"fullName": "Philipp Schlegel",
"affiliation": "University of Zurich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maxim",
"surname": "Makhinya",
"fullName": "Maxim Makhinya",
"affiliation": "University of Zurich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Renato",
"surname": "Pajarola",
"fullName": "Renato Pajarola",
"affiliation": "University of Zurich",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2011-12-01 00:00:00",
"pubType": "trans",
"pages": "1795-1802",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/mvhi/2010/4009/0/4009a468",
"title": "An Accelerative Ray Casting Algorithm Based on Crossing-Area Technique",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a468/12OmNArbG2a",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rt/2006/0693/0/04061557",
"title": "Ray Casting of Trimmed NURBS Surfaces on the GPU",
"doi": null,
"abstractUrl": "/proceedings-article/rt/2006/04061557/12OmNBNM8TN",
"parentPublication": {
"id": "proceedings/rt/2006/0693/0",
"title": "IEEE Symposium on Interactive Ray Tracing 2006",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/1998/9180/0/91800055",
"title": "Adaptive Perspective Ray Casting",
"doi": null,
"abstractUrl": "/proceedings-article/vv/1998/91800055/12OmNBRsVxg",
"parentPublication": {
"id": "proceedings/vv/1998/9180/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2014/5215/0/07013200",
"title": "Cache-aware sampling strategies for texture-based ray casting on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2014/07013200/12OmNxWcH5i",
"parentPublication": {
"id": "proceedings/ldav/2014/5215/0",
"title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/frontiers/1995/6965/0/69650238",
"title": "An optimal parallel algorithm for volume ray casting",
"doi": null,
"abstractUrl": "/proceedings-article/frontiers/1995/69650238/12OmNxisQY8",
"parentPublication": {
"id": "proceedings/frontiers/1995/6965/0",
"title": "Frontiers of Massively Parallel Processing, Symposium on the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539653",
"title": "Direct Multifield Volume Ray Casting of Fiber Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539653/13rRUB6Sq0C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/11/06851204",
"title": "Study of a Ray Casting Technique for the Visualization of Deformable Volumes",
"doi": null,
"abstractUrl": "/journal/tg/2014/11/06851204/13rRUEgarBv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/07/07470264",
"title": "Extinction-Optimized Volume Illumination",
"doi": null,
"abstractUrl": "/journal/tg/2017/07/07470264/13rRUwI5TR3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061571",
"title": "Volume Ray Casting with Peak Finding and Differential Sampling",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061571/13rRUxBa55W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccgiv/2022/9250/0/925000a183",
"title": "Ellipsoidal ray casting algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/iccgiv/2022/925000a183/1LxfqGjszTi",
"parentPublication": {
"id": "proceedings/iccgiv/2022/9250/0",
"title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2011121785",
"articleId": "13rRUNvyaeW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011121803",
"articleId": "13rRUzphDxV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFv1",
"name": "ttg2011121795s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2011121795s1.zip",
"extension": "zip",
"size": "37.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxD9h56",
"doi": "10.1109/TVCG.2012.218",
"abstract": "This paper presents the Element Visualizer (ElVis), a new, open-source scientific visualization system for use with highorder finite element solutions to PDEs in three dimensions. This system is designed to minimize visualization errors of these types of fields by querying the underlying finite element basis functions (e.g., high-order polynomials) directly, leading to pixel-exact representations of solutions and geometry. The system interacts with simulation data through runtime plugins, which only require users to implement a handful of operations fundamental to finite element solvers. The data in turn can be visualized through the use of cut surfaces, contours, isosurfaces, and volume rendering. These visualization algorithms are implemented using NVIDIA’s OptiX GPU-based ray-tracing engine, which provides accelerated ray traversal of the high-order geometry, and CUDA, which allows for effective parallel evaluation of the visualization algorithms. The direct interface between ElVis and the underlying data differentiates it from existing visualization tools. Current tools assume the underlying data is composed of linear primitives; high-order data must be interpolated with linear functions as a result. In this work, examples drawn from aerodynamic simulations-high-order discontinuous Galerkin finite element solutions of aerodynamic flows in particular-will demonstrate the superiority of ElVis’ pixel-exact approach when compared with traditional linear-interpolation methods. Such methods can introduce a number of inaccuracies in the resulting visualization, making it unclear if visual artifacts are genuine to the solution data or if these artifacts are the result of interpolation errors. Linear methods additionally cannot properly visualize curved geometries (elements or boundaries) which can greatly inhibit developers’ debugging efforts. As we will show, pixel-exact visualization exhibits none of these issues, removing the visualization scheme as a source of uncertainty for engineers using ElVis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents the Element Visualizer (ElVis), a new, open-source scientific visualization system for use with highorder finite element solutions to PDEs in three dimensions. This system is designed to minimize visualization errors of these types of fields by querying the underlying finite element basis functions (e.g., high-order polynomials) directly, leading to pixel-exact representations of solutions and geometry. The system interacts with simulation data through runtime plugins, which only require users to implement a handful of operations fundamental to finite element solvers. The data in turn can be visualized through the use of cut surfaces, contours, isosurfaces, and volume rendering. These visualization algorithms are implemented using NVIDIA’s OptiX GPU-based ray-tracing engine, which provides accelerated ray traversal of the high-order geometry, and CUDA, which allows for effective parallel evaluation of the visualization algorithms. The direct interface between ElVis and the underlying data differentiates it from existing visualization tools. Current tools assume the underlying data is composed of linear primitives; high-order data must be interpolated with linear functions as a result. In this work, examples drawn from aerodynamic simulations-high-order discontinuous Galerkin finite element solutions of aerodynamic flows in particular-will demonstrate the superiority of ElVis’ pixel-exact approach when compared with traditional linear-interpolation methods. Such methods can introduce a number of inaccuracies in the resulting visualization, making it unclear if visual artifacts are genuine to the solution data or if these artifacts are the result of interpolation errors. Linear methods additionally cannot properly visualize curved geometries (elements or boundaries) which can greatly inhibit developers’ debugging efforts. As we will show, pixel-exact visualization exhibits none of these issues, removing the visualization scheme as a source of uncertainty for engineers using ElVis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents the Element Visualizer (ElVis), a new, open-source scientific visualization system for use with highorder finite element solutions to PDEs in three dimensions. This system is designed to minimize visualization errors of these types of fields by querying the underlying finite element basis functions (e.g., high-order polynomials) directly, leading to pixel-exact representations of solutions and geometry. The system interacts with simulation data through runtime plugins, which only require users to implement a handful of operations fundamental to finite element solvers. The data in turn can be visualized through the use of cut surfaces, contours, isosurfaces, and volume rendering. These visualization algorithms are implemented using NVIDIA’s OptiX GPU-based ray-tracing engine, which provides accelerated ray traversal of the high-order geometry, and CUDA, which allows for effective parallel evaluation of the visualization algorithms. The direct interface between ElVis and the underlying data differentiates it from existing visualization tools. Current tools assume the underlying data is composed of linear primitives; high-order data must be interpolated with linear functions as a result. In this work, examples drawn from aerodynamic simulations-high-order discontinuous Galerkin finite element solutions of aerodynamic flows in particular-will demonstrate the superiority of ElVis’ pixel-exact approach when compared with traditional linear-interpolation methods. Such methods can introduce a number of inaccuracies in the resulting visualization, making it unclear if visual artifacts are genuine to the solution data or if these artifacts are the result of interpolation errors. Linear methods additionally cannot properly visualize curved geometries (elements or boundaries) which can greatly inhibit developers’ debugging efforts. As we will show, pixel-exact visualization exhibits none of these issues, removing the visualization scheme as a source of uncertainty for engineers using ElVis.",
"title": "ElVis: A System for the Accurate and Interactive Visualization of High-Order Finite Element Solutions",
"normalizedTitle": "ElVis: A System for the Accurate and Interactive Visualization of High-Order Finite Element Solutions",
"fno": "ttg2012122325",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Finite Element Methods",
"Isosurfaces",
"Polynomials",
"Geometry",
"Rendering Computer Graphics",
"Data Models",
"Isosurfaces",
"High Order Finite Elements",
"Spectral Hp Elements",
"Discontinuous Galerkin",
"Fluid Flow Simulation",
"Cut Surface Extraction",
"Contours"
],
"authors": [
{
"givenName": "Blake",
"surname": "Nelson",
"fullName": "Blake Nelson",
"affiliation": "University of Utah",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eric",
"surname": "Liu",
"fullName": "Eric Liu",
"affiliation": "MIT",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert M.",
"surname": "Kirby",
"fullName": "Robert M. Kirby",
"affiliation": "University of Utah",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Haimes",
"fullName": "Robert Haimes",
"affiliation": "MIT",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2325-2334",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2004/8788/0/87880425",
"title": "Pixel-Exact Rendering of Spacetime Finite Element Solutions",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2004/87880425/12OmNBezSF4",
"parentPublication": {
"id": "proceedings/ieee-vis/2004/8788/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2009/3728/0/3728a537",
"title": "H1-Galerkin Mixed Finite Element Method for the Viscoelasticity Type Equation",
"doi": null,
"abstractUrl": "/proceedings-article/case/2009/3728a537/12OmNBr4eA3",
"parentPublication": {
"id": "proceedings/case/2009/3728/0",
"title": "2009 IITA International Conference on Control, Automation and Systems Engineering, CASE 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1991/2245/0/00175823",
"title": "Applying 3D visualization techniques to finite element analysis",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1991/00175823/12OmNqBKTSD",
"parentPublication": {
"id": "proceedings/visual/1991/2245/0",
"title": "1991 Proceeding Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icinis/2012/4855/0/4855a092",
"title": "An Efficient Adaptive Finite Element Analysis of Heterogeneous Materials Based on Centroid Material",
"doi": null,
"abstractUrl": "/proceedings-article/icinis/2012/4855a092/12OmNwcCIQA",
"parentPublication": {
"id": "proceedings/icinis/2012/4855/0",
"title": "Intelligent Networks and Intelligent Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2010/4270/0/4270a281",
"title": "Lagrange-Galerkin Discontinuous Finite Element Methods for the Navier-Stokes Equations",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2010/4270a281/12OmNxGALdu",
"parentPublication": {
"id": "proceedings/iccis/2010/4270/0",
"title": "2010 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/1993/4340/0/01263415",
"title": "A massively parallel adaptive finite element method with dynamic load balancing",
"doi": null,
"abstractUrl": "/proceedings-article/sc/1993/01263415/12OmNyv7mng",
"parentPublication": {
"id": "proceedings/sc/1993/4340/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010070",
"title": "GPU-Based Volume Visualization from High-Order Finite Element Fields",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010070/13rRUEgs2M1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/05/v1015",
"title": "Particle Systems for Efficient and Accurate High-Order Finite Element Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2007/05/v1015/13rRUIIVlcE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/01/v0114",
"title": "Ray-Tracing Polymorphic Multidomain Spectral/hp Elements for Isosurface Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2006/01/v0114/13rRUIM2VBx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011121803",
"title": "GPU-Based Interactive Cut-Surface Extraction From High-Order Finite Element Fields",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011121803/13rRUzphDxV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122315",
"articleId": "13rRUxYIMUY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122335",
"articleId": "13rRUyeTVi0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxeutf3",
"title": "Jan.",
"year": "2013",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuGj",
"doi": "10.1109/TVCG.2012.106",
"abstract": "We study the relationship between the noise in the vertex coordinates of a triangle mesh and normal noise. First, we compute in closed form the expectation for the angle θ between the new and the old normal when uniform noise is added to a single vertex of a triangle. Next, we propose and experimentally validate an approximation and lower and upper bounds for θ when uniform noise is added to all three vertices of the triangle. In all cases, for small amounts of spatial noise that do not severely distort the mesh, there is a linear correlation between θ and simple functions of the heights of the triangles and thus, θ can be computed efficiently. The addition of uniform spatial noise to a mesh can be seen as a dithered quantization of its vertices. We use the obtained linear correlations between spatial and normal noise to compute the level of dithered quantization of the mesh vertices when a tolerance for the average normal distortion is given.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We study the relationship between the noise in the vertex coordinates of a triangle mesh and normal noise. First, we compute in closed form the expectation for the angle θ between the new and the old normal when uniform noise is added to a single vertex of a triangle. Next, we propose and experimentally validate an approximation and lower and upper bounds for θ when uniform noise is added to all three vertices of the triangle. In all cases, for small amounts of spatial noise that do not severely distort the mesh, there is a linear correlation between θ and simple functions of the heights of the triangles and thus, θ can be computed efficiently. The addition of uniform spatial noise to a mesh can be seen as a dithered quantization of its vertices. We use the obtained linear correlations between spatial and normal noise to compute the level of dithered quantization of the mesh vertices when a tolerance for the average normal distortion is given.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We study the relationship between the noise in the vertex coordinates of a triangle mesh and normal noise. First, we compute in closed form the expectation for the angle θ between the new and the old normal when uniform noise is added to a single vertex of a triangle. Next, we propose and experimentally validate an approximation and lower and upper bounds for θ when uniform noise is added to all three vertices of the triangle. In all cases, for small amounts of spatial noise that do not severely distort the mesh, there is a linear correlation between θ and simple functions of the heights of the triangles and thus, θ can be computed efficiently. The addition of uniform spatial noise to a mesh can be seen as a dithered quantization of its vertices. We use the obtained linear correlations between spatial and normal noise to compute the level of dithered quantization of the mesh vertices when a tolerance for the average normal distortion is given.",
"title": "Linear Correlations between Spatial and Normal Noise in Triangle Meshes",
"normalizedTitle": "Linear Correlations between Spatial and Normal Noise in Triangle Meshes",
"fno": "ttg2013010045",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Mesh Generation",
"Approximation Theory",
"Computational Geometry",
"Average Normal Distortion",
"Linear Correlations",
"Spatial Noise",
"Normal Noise",
"Triangle Meshes",
"Uniform Noise",
"Dithered Vertex Quantization",
"Mesh Vertices",
"Noise",
"Quantization",
"Degradation",
"Upper Bound",
"Linear Approximation",
"Rendering Computer Graphics",
"Normal Noise",
"Mesh Generation",
"Approximation Theory",
"Computational Geometry",
"Average Normal Distortion",
"Linear Correlations",
"Spatial Noise",
"Normal Noise",
"Triangle Meshes",
"Uniform Noise",
"Dithered Vertex Quantization",
"Mesh Vertices",
"Noise",
"Quantization",
"Degradation",
"Upper Bound",
"Linear Approximation",
"Rendering Computer Graphics",
"Vertex Quantization",
"Triangle Mesh"
],
"authors": [
{
"givenName": null,
"surname": "Ying Yang",
"fullName": "Ying Yang",
"affiliation": "Sch. of Eng. & Comput. Sci., Durham Univ., Durham, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "N.",
"surname": "Peyerimhoff",
"fullName": "N. Peyerimhoff",
"affiliation": "Dept. of Math. Sci., Durham Univ., Durham, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "I.",
"surname": "Ivrissimtzis",
"fullName": "I. Ivrissimtzis",
"affiliation": "Sch. of Eng. & Comput. Sci., Durham Univ., Durham, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2013-01-01 00:00:00",
"pubType": "trans",
"pages": "45-55",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cmsp/2011/4356/1/4356a184",
"title": "Consistent Mesh Segmentation Using Protrusion Function and Graph Cut",
"doi": null,
"abstractUrl": "/proceedings-article/cmsp/2011/4356a184/12OmNBW0vCO",
"parentPublication": {
"id": "proceedings/cmsp/2011/4356/1",
"title": "Multimedia and Signal Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsit/2008/3308/0/3308a669",
"title": "A Hybrid Approach to Surface Segmentation of Sparse Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/iccsit/2008/3308a669/12OmNBp52IP",
"parentPublication": {
"id": "proceedings/iccsit/2008/3308/0",
"title": "2008 International Conference on Computer Science and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1994/5637/0/00305946",
"title": "On lattice quantization noise",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1994/00305946/12OmNvm6VFl",
"parentPublication": {
"id": "proceedings/dcc/1994/5637/0",
"title": "Proceedings of IEEE Data Compression Conference (DCC'94)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2012/4899/0/4899a432",
"title": "2D Shape Manipulation Using Equilateral Triangle Mesh",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2012/4899a432/12OmNxETa4O",
"parentPublication": {
"id": "proceedings/icdh/2012/4899/0",
"title": "4th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2012/4829/0/4829a110",
"title": "ESQ: Editable SQuad Representation for Triangle Meshes",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2012/4829a110/12OmNxR5UPg",
"parentPublication": {
"id": "proceedings/sibgrapi/2012/4829/0",
"title": "2012 25th SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2009/3813/0/3813a072",
"title": "Surface Reconstruction: An Improved Marching Triangle Algorithm for Scalar and Vector Implicit Field Representations",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2009/3813a072/12OmNy2Jt55",
"parentPublication": {
"id": "proceedings/sibgrapi/2009/3813/0",
"title": "2009 XXII Brazilian Symposium on Computer Graphics and Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500538",
"title": "Robust generation of signed distance fields from triangle meshes",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500538/12OmNzT7Otj",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1999/01/v0047",
"title": "Edgebreaker: Connectivity Compression for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1999/01/v0047/13rRUILLkve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2000/01/v0079",
"title": "Compressed Progressive Meshes",
"doi": null,
"abstractUrl": "/journal/tg/2000/01/v0079/13rRUwhpBNZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1998/02/v0145",
"title": "Constructing Hierarchies for Triangle Meshes",
"doi": null,
"abstractUrl": "/journal/tg/1998/02/v0145/13rRUy0qnGc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013010030",
"articleId": "13rRUwfZC0g",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013010056",
"articleId": "13rRUygT7sD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzn38Jg",
"title": "Nov.",
"year": "2014",
"issueNum": "11",
"idPrefix": "tp",
"pubType": "journal",
"volume": "36",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbVR",
"doi": "10.1109/TPAMI.2014.2316828",
"abstract": "3D object recognition in cluttered scenes is a rapidly growing research area. Based on the used types of features, 3D object recognition methods can broadly be divided into two categories-global or local feature based methods. Intensive research has been done on local surface feature based methods as they are more robust to occlusion and clutter which are frequently present in a real-world scene. This paper presents a comprehensive survey of existing local surface feature based 3D object recognition methods. These methods generally comprise three phases: 3D keypoint detection, local surface feature description, and surface matching. This paper covers an extensive literature survey of each phase of the process. It also enlists a number of popular and contemporary databases together with their relevant attributes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D object recognition in cluttered scenes is a rapidly growing research area. Based on the used types of features, 3D object recognition methods can broadly be divided into two categories-global or local feature based methods. Intensive research has been done on local surface feature based methods as they are more robust to occlusion and clutter which are frequently present in a real-world scene. This paper presents a comprehensive survey of existing local surface feature based 3D object recognition methods. These methods generally comprise three phases: 3D keypoint detection, local surface feature description, and surface matching. This paper covers an extensive literature survey of each phase of the process. It also enlists a number of popular and contemporary databases together with their relevant attributes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D object recognition in cluttered scenes is a rapidly growing research area. Based on the used types of features, 3D object recognition methods can broadly be divided into two categories-global or local feature based methods. Intensive research has been done on local surface feature based methods as they are more robust to occlusion and clutter which are frequently present in a real-world scene. This paper presents a comprehensive survey of existing local surface feature based 3D object recognition methods. These methods generally comprise three phases: 3D keypoint detection, local surface feature description, and surface matching. This paper covers an extensive literature survey of each phase of the process. It also enlists a number of popular and contemporary databases together with their relevant attributes.",
"title": "3D Object Recognition in Cluttered Scenes with Local Surface Features: A Survey",
"normalizedTitle": "3D Object Recognition in Cluttered Scenes with Local Surface Features: A Survey",
"fno": "06787078",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Feature Extraction",
"Image Matching",
"Object Detection",
"Object Recognition",
"Cluttered Scenes",
"Local Surface Features",
"3 D Object Recognition Methods",
"Local Feature Based Methods",
"Global Feature Based Methods",
"3 D Keypoint Detection",
"Local Surface Feature Description",
"Surface Matching",
"Contemporary Databases",
"Three Dimensional Displays",
"Object Recognition",
"Feature Extraction",
"Shape",
"Databases",
"Smoothing Methods",
"Robustness",
"3 D Object Recognition",
"Keypoint Detection",
"Feature Description",
"Range Image",
"Local Feature"
],
"authors": [
{
"givenName": "Yulan",
"surname": "Guo",
"fullName": "Yulan Guo",
"affiliation": "College of Electronic Science and Engineering, National University of Defense Technology, Changsha, Hunan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mohammed",
"surname": "Bennamoun",
"fullName": "Mohammed Bennamoun",
"affiliation": "School of Computer Science and Software Engineering, The University of Western Australia, Perth, WA, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ferdous",
"surname": "Sohel",
"fullName": "Ferdous Sohel",
"affiliation": "School of Computer Science and Software Engineering, The University of Western Australia, Perth, WA, Australia",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Min",
"surname": "Lu",
"fullName": "Min Lu",
"affiliation": "College of Electronic Science and Engineering, National University of Defense Technology, Changsha, Hunan, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jianwei",
"surname": "Wan",
"fullName": "Jianwei Wan",
"affiliation": "College of Electronic Science and Engineering, National University of Defense Technology, Changsha, Hunan, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2014-11-01 00:00:00",
"pubType": "trans",
"pages": "2270-2287",
"year": "2014",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cgiv/2009/3789/0/3789a250",
"title": "Surface Mesh Segmentation Using Local Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a250/12OmNAL3B9E",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a638",
"title": "A Novel Local Surface Description for Automatic 3D Object Recognition in Low Resolution Cluttered Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a638/12OmNCuDzvl",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391c273",
"title": "Dense Image Registration and Deformable Surface Reconstruction in Presence of Occlusions and Minimal Texture",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391c273/12OmNvqmUEa",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c957",
"title": "Robust 3D Features for Matching between Distorted Range Scans Captured by Moving Systems",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c957/12OmNy5hRg0",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840c144",
"title": "Support Surface Prediction in Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840c144/12OmNzRqdJl",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460425",
"title": "Area-weighted surface normals for 3D object recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460425/12OmNzZEApJ",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/08/06035704",
"title": "Inference-Based Surface Reconstruction of Cluttered Environments",
"doi": null,
"abstractUrl": "/journal/tg/2012/08/06035704/13rRUwj7cp9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545603",
"title": "Advancing Surface Feature Encoding and Matching for More Accurate 3D Biometric Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545603/17D45VtKitX",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08546202",
"title": "Hybrid 3D Surface Description with Global Frames and Local Signatures of Histograms",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08546202/17D45WrVg1L",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a859",
"title": "Residual Geometric Feature Transform Network for 3D Surface Super-Resolution",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a859/1zWEeQ2Ft72",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06787114",
"articleId": "13rRUygT7gB",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06684145",
"articleId": "13rRUxNmPFd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNqHItJa",
"title": "May",
"year": "2012",
"issueNum": "05",
"idPrefix": "tp",
"pubType": "journal",
"volume": "34",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxZRbpi",
"doi": "10.1109/TPAMI.2011.206",
"abstract": "We present a method for real-time 3D object instance detection that does not require a time-consuming training stage, and can handle untextured objects. At its core, our approach is a novel image representation for template matching designed to be robust to small image transformations. This robustness is based on spread image gradient orientations and allows us to test only a small subset of all possible pixel locations when parsing the image, and to represent a 3D object with a limited set of templates. In addition, we demonstrate that if a dense depth sensor is available we can extend our approach for an even better performance also taking 3D surface normal orientations into account. We show how to take advantage of the architecture of modern computers to build an efficient but very discriminant representation of the input images that can be used to consider thousands of templates in real time. We demonstrate in many experiments on real data that our method is much faster and more robust with respect to background clutter than current state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a method for real-time 3D object instance detection that does not require a time-consuming training stage, and can handle untextured objects. At its core, our approach is a novel image representation for template matching designed to be robust to small image transformations. This robustness is based on spread image gradient orientations and allows us to test only a small subset of all possible pixel locations when parsing the image, and to represent a 3D object with a limited set of templates. In addition, we demonstrate that if a dense depth sensor is available we can extend our approach for an even better performance also taking 3D surface normal orientations into account. We show how to take advantage of the architecture of modern computers to build an efficient but very discriminant representation of the input images that can be used to consider thousands of templates in real time. We demonstrate in many experiments on real data that our method is much faster and more robust with respect to background clutter than current state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a method for real-time 3D object instance detection that does not require a time-consuming training stage, and can handle untextured objects. At its core, our approach is a novel image representation for template matching designed to be robust to small image transformations. This robustness is based on spread image gradient orientations and allows us to test only a small subset of all possible pixel locations when parsing the image, and to represent a 3D object with a limited set of templates. In addition, we demonstrate that if a dense depth sensor is available we can extend our approach for an even better performance also taking 3D surface normal orientations into account. We show how to take advantage of the architecture of modern computers to build an efficient but very discriminant representation of the input images that can be used to consider thousands of templates in real time. We demonstrate in many experiments on real data that our method is much faster and more robust with respect to background clutter than current state-of-the-art methods.",
"title": "Gradient Response Maps for Real-Time Detection of Textureless Objects",
"normalizedTitle": "Gradient Response Maps for Real-Time Detection of Textureless Objects",
"fno": "06042881",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Object Detection",
"Computer Vision",
"Gradient Methods",
"Image Matching",
"Image Representation",
"3 D Surface Normal Orientation",
"Gradient Response Map",
"Real Time Detection",
"Textureless Object",
"3 D Object Instance Detection",
"Image Representation",
"Template Matching",
"Spread Image Gradient Orientation",
"Dense Depth Sensor",
"Robustness",
"Real Time Systems",
"Three Dimensional Displays",
"Image Edge Detection",
"Training",
"Clutter",
"Transforms",
"Multimodality Template Matching",
"Computer Vision",
"Real Time Detection And Object Recognition",
"Tracking"
],
"authors": [
{
"givenName": "P.",
"surname": "Sturm",
"fullName": "P. Sturm",
"affiliation": "STEEP Team, INRIA Grenoble-Rhone-Alpes, St. Ismier, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Ilic",
"fullName": "S. Ilic",
"affiliation": "Dept. of Comput.-Aided Med. Procedures (CAMP), Tech. Univ. Munchen, Garching, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "C.",
"surname": "Cagniart",
"fullName": "C. Cagniart",
"affiliation": "Dept. of Comput.-Aided Med. Procedures (CAMP), Tech. Univ. Munchen, Garching, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Hinterstoisser",
"fullName": "S. Hinterstoisser",
"affiliation": "Dept. of Comput.-Aided Med. Procedures (CAMP), Tech. Univ. Munchen, Garching, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "N.",
"surname": "Navab",
"fullName": "N. Navab",
"affiliation": "Dept. of Comput.-Aided Med. Procedures (CAMP), Tech. Univ. Munchen, Garching, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "P.",
"surname": "Fua",
"fullName": "P. Fua",
"affiliation": "Comput. Vision Lab. (CVLAB), Ecole Polytech. Federate de Lausane, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "V.",
"surname": "Lepetit",
"fullName": "V. Lepetit",
"affiliation": "Comput. Vision Lab. (CVLAB), Ecole Polytech. Federate de Lausane, Lausanne, Switzerland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2012-05-01 00:00:00",
"pubType": "trans",
"pages": "876-888",
"year": "2012",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2011/1101/0/06126326",
"title": "Multimodal templates for real-time detection of texture-less objects in heavily cluttered scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126326/12OmNBlFQTT",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109c378",
"title": "Robust Fourier-Based Image Alignment with Gradient Complex Image",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109c378/12OmNCcbEhx",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a398",
"title": "Amodal Detection of 3D Objects: Inferring 3D Bounding Boxes from 2D Ones in RGB-Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a398/12OmNvAiSEn",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2010/6984/0/05539908",
"title": "Dominant orientation templates for real-time detection of texture-less objects",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2010/05539908/12OmNvFpEAi",
"parentPublication": {
"id": "proceedings/cvpr/2010/6984/0",
"title": "2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2011/4584/0/4584b238",
"title": "The Multispectral Image Edge Detection Based on Clifford Gradient",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2011/4584b238/12OmNybfqYX",
"parentPublication": {
"id": "proceedings/cis/2011/4584/0",
"title": "2011 Seventh International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c505",
"title": "Unsupervised Learning of Dictionaries of Hierarchical Compositional Models",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c505/12OmNyprnrB",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995385",
"title": "Aggregating gradient distributions into intensity orders: A novel local image descriptor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995385/12OmNzTYBVR",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/295P2C33",
"title": "Learning 3D object templates by hierarchical quantization of geometry and appearance spaces",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/295P2C33/12OmNzZEArF",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2002/04/i0495",
"title": "Geometric Probing of Dense Range Data",
"doi": null,
"abstractUrl": "/journal/tp/2002/04/i0495/13rRUxBa57a",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0412",
"title": "TAPA-MVS: Textureless-Aware PAtchMatch Multi-View Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0412/1hQqq0e4nGU",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06035718",
"articleId": "13rRUwbs1TH",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzV70s0",
"title": "May",
"year": "2015",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwghd9a",
"doi": "10.1109/TVCG.2014.2385056",
"abstract": "Text readability with augmented reality head-worn displays is critical and at present time, there are no standard guidelines to follow. The readability depends mainly on background lighting, display technology (i.e., OST: optical see-through or VST: video see-through), and text style (e.g., plain text, outline or billboard). In this work, we addressed the readability limits for industrial activities. We experimented the effects of two background illuminances levels (1,000 lx for very fine basic industrial tasks and 4,000 lx for fine machining), two commercially available head-worn display technologies, variable outline widths and contrast polarity of text. We analyzed the performance of 12 subjects by collecting about 3,400 measurements using a specific test application and followed by qualitative interviews. With high illuminances, VST performed better than OST, regardless of contrast polarity and outline width. We found that negative contrast polarity is preferable with VST, and that just a minimum outline (1 px) around black text is optimal. On the contrary, positive contrast polarity should be used with OST and outline is not effective. Therefore, we evaluated the usage limits of the OST by sampling its contrast sensitivity function.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Text readability with augmented reality head-worn displays is critical and at present time, there are no standard guidelines to follow. The readability depends mainly on background lighting, display technology (i.e., OST: optical see-through or VST: video see-through), and text style (e.g., plain text, outline or billboard). In this work, we addressed the readability limits for industrial activities. We experimented the effects of two background illuminances levels (1,000 lx for very fine basic industrial tasks and 4,000 lx for fine machining), two commercially available head-worn display technologies, variable outline widths and contrast polarity of text. We analyzed the performance of 12 subjects by collecting about 3,400 measurements using a specific test application and followed by qualitative interviews. With high illuminances, VST performed better than OST, regardless of contrast polarity and outline width. We found that negative contrast polarity is preferable with VST, and that just a minimum outline (1 px) around black text is optimal. On the contrary, positive contrast polarity should be used with OST and outline is not effective. Therefore, we evaluated the usage limits of the OST by sampling its contrast sensitivity function.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Text readability with augmented reality head-worn displays is critical and at present time, there are no standard guidelines to follow. The readability depends mainly on background lighting, display technology (i.e., OST: optical see-through or VST: video see-through), and text style (e.g., plain text, outline or billboard). In this work, we addressed the readability limits for industrial activities. We experimented the effects of two background illuminances levels (1,000 lx for very fine basic industrial tasks and 4,000 lx for fine machining), two commercially available head-worn display technologies, variable outline widths and contrast polarity of text. We analyzed the performance of 12 subjects by collecting about 3,400 measurements using a specific test application and followed by qualitative interviews. With high illuminances, VST performed better than OST, regardless of contrast polarity and outline width. We found that negative contrast polarity is preferable with VST, and that just a minimum outline (1 px) around black text is optimal. On the contrary, positive contrast polarity should be used with OST and outline is not effective. Therefore, we evaluated the usage limits of the OST by sampling its contrast sensitivity function.",
"title": "Effect of Text Outline and Contrast Polarity on AR Text Readability in Industrial Lighting",
"normalizedTitle": "Effect of Text Outline and Contrast Polarity on AR Text Readability in Industrial Lighting",
"fno": "06994851",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Image Color Analysis",
"Color",
"Cameras",
"Visualization",
"Standards",
"Optical Sensors",
"Contrast Sensitivity Function",
"Augmented Reality",
"Industrial Lighting",
"Optical See Through",
"Video See Through",
"Contrast Sensitivity Function",
"Augmented Reality",
"Industrial Lighting",
"Optical See Through",
"Video See Through"
],
"authors": [
{
"givenName": "Michele",
"surname": "Gattullo",
"fullName": "Michele Gattullo",
"affiliation": "Department of Mechanics, Mathematics and Management, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Antonio Emmanuele",
"surname": "Uva",
"fullName": "Antonio Emmanuele Uva",
"affiliation": "Department of Mechanics, Mathematics and Management, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michele",
"surname": "Fiorentino",
"fullName": "Michele Fiorentino",
"affiliation": "Department of Mechanics, Mathematics and Management, Bari, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Giuseppe",
"surname": "Monno",
"fullName": "Giuseppe Monno",
"affiliation": "Department of Mechanics, Mathematics and Management, Bari, Italy",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2015-05-01 00:00:00",
"pubType": "trans",
"pages": "638-651",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/das/2014/3244/0/3244a262",
"title": "A Seed-Based Segmentation Method for Scene Text Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/das/2014/3244a262/12OmNqIhG12",
"parentPublication": {
"id": "proceedings/das/2014/3244/0",
"title": "2014 11th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a052",
"title": "[POSTER] Hybrid Video/Optical See-Through HMD",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a052/12OmNy4r3Ph",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034d023",
"title": "Global and Local Contrast Adaptive Enhancement for Non-uniform Illumination Color Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034d023/12OmNzwHvp4",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007218",
"title": "Occlusion Leak Compensation for Optical See-Through Displays Using a Single-Layer Transmissive Spatial Light Modulator",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007218/13rRUxcbnHi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676153",
"title": "Light Attenuation Display: Subtractive See-Through Near-Eye Display via Spatial Color Filtering",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676153/18LFbQfp6x2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a649",
"title": "Comparing World and Screen Coordinate Systems in Optical See-Through Head-Mounted Displays for Text Readability while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a649/1pysvKFdazS",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09490310",
"title": "Shedding Light on Cast Shadows: An Investigation of Perceived Ground Contact in AR and VR",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09490310/1vmGThNh9jq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a115",
"title": "Perceived Transparency in Optical See-Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a115/1yeQLPBHFBe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06991551",
"articleId": "13rRUNvyakP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07006805",
"articleId": "13rRUxYIMV0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBCZnUj",
"title": "Mar.-Apr.",
"year": "2015",
"issueNum": "02",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "35",
"label": "Mar.-Apr.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxjQyxG",
"doi": "10.1109/MCG.2015.36",
"abstract": "In the Industrie 4.0 vision, the creation of leading-edge options for interaction between people and technology occupies a key role. In this context, augmented reality (AR) is one of the most suitable solutions. However, it is still not ready to be effectively used in industry. A crucial problem is the legibility of text seen through AR head-worn displays (HWDs). AR interface designers have no standard guidelines to follow, especially for these devices. Literature and anecdotal evidence suggest that legibility depends mainly on background, display technology (that is, see-through optical or video HWDs), and text style (for example, plain text, outline, or billboard). Furthermore, there are constraints to consider in industrial environments, such as standard color-coding practices and workplace lighting. The authors examine aspects affecting text legibility with an emphasis on deriving guidelines to support AR interface designers. Their results suggest that enhancing text contrast via software, along with using the outline or billboard style, is an effective practice to improve legibility in many situations. If one text style is needed for both types of HWD, their results suggest that colored billboards (with neutral white text) are effective. When color coding is not mandatory, white text and blue billboard are more effective than other styles tested.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the Industrie 4.0 vision, the creation of leading-edge options for interaction between people and technology occupies a key role. In this context, augmented reality (AR) is one of the most suitable solutions. However, it is still not ready to be effectively used in industry. A crucial problem is the legibility of text seen through AR head-worn displays (HWDs). AR interface designers have no standard guidelines to follow, especially for these devices. Literature and anecdotal evidence suggest that legibility depends mainly on background, display technology (that is, see-through optical or video HWDs), and text style (for example, plain text, outline, or billboard). Furthermore, there are constraints to consider in industrial environments, such as standard color-coding practices and workplace lighting. The authors examine aspects affecting text legibility with an emphasis on deriving guidelines to support AR interface designers. Their results suggest that enhancing text contrast via software, along with using the outline or billboard style, is an effective practice to improve legibility in many situations. If one text style is needed for both types of HWD, their results suggest that colored billboards (with neutral white text) are effective. When color coding is not mandatory, white text and blue billboard are more effective than other styles tested.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the Industrie 4.0 vision, the creation of leading-edge options for interaction between people and technology occupies a key role. In this context, augmented reality (AR) is one of the most suitable solutions. However, it is still not ready to be effectively used in industry. A crucial problem is the legibility of text seen through AR head-worn displays (HWDs). AR interface designers have no standard guidelines to follow, especially for these devices. Literature and anecdotal evidence suggest that legibility depends mainly on background, display technology (that is, see-through optical or video HWDs), and text style (for example, plain text, outline, or billboard). Furthermore, there are constraints to consider in industrial environments, such as standard color-coding practices and workplace lighting. The authors examine aspects affecting text legibility with an emphasis on deriving guidelines to support AR interface designers. Their results suggest that enhancing text contrast via software, along with using the outline or billboard style, is an effective practice to improve legibility in many situations. If one text style is needed for both types of HWD, their results suggest that colored billboards (with neutral white text) are effective. When color coding is not mandatory, white text and blue billboard are more effective than other styles tested.",
"title": "Legibility in Industrial AR: Text Style, Color Coding, and Illuminance",
"normalizedTitle": "Legibility in Industrial AR: Text Style, Color Coding, and Illuminance",
"fno": "mcg2015020052",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Augmented Reality",
"Human Computer Interaction",
"Manufacturing",
"Style Guides",
"Mixed Reality",
"Computer Graphics",
"Augmented Reality",
"Head Worn Displays",
"Industrie 4 0",
"Human Computer Interface",
"Style Guides",
"Vision I O",
"Usability Engineering"
],
"authors": [
{
"givenName": "Michele",
"surname": "Gattullo",
"fullName": "Michele Gattullo",
"affiliation": "Polytechnic Institute of Bari",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Antonio E.",
"surname": "Uva",
"fullName": "Antonio E. Uva",
"affiliation": "Polytechnic Institute of Bari",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michele",
"surname": "Fiorentino",
"fullName": "Michele Fiorentino",
"affiliation": "Polytechnic Institute of Bari",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph L.",
"surname": "Gabbard",
"fullName": "Joseph L. Gabbard",
"affiliation": "Virginia Tech",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2015-03-01 00:00:00",
"pubType": "mags",
"pages": "52-61",
"year": "2015",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ozchi/1996/7525/0/75250094",
"title": "Testing a User Interface Style Guide",
"doi": null,
"abstractUrl": "/proceedings-article/ozchi/1996/75250094/12OmNAGNCev",
"parentPublication": {
"id": "proceedings/ozchi/1996/7525/0",
"title": "Proceedings Sixth Australian Conference on Computer-Human Interaction",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2001/1263/0/12630997",
"title": "Prediction of Handwriting Legibility",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2001/12630997/12OmNqJZgM0",
"parentPublication": {
"id": "proceedings/icdar/2001/1263/0",
"title": "Proceedings of Sixth International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948436",
"title": "[Poster] Ongoing development of a user-centered, AR testbed in industry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948436/12OmNs59JLY",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549411",
"title": "Early steps towards understanding text legibility in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549411/12OmNy6HQV1",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/02/ttg2009020193",
"title": "Comprehensible Visualization for Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2009/02/ttg2009020193/13rRUxASuGc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/04/mcg2008040040",
"title": "Toward Next-Gen Mobile AR Games",
"doi": null,
"abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/01/ttg2014010125",
"title": "Text Readability in Head-Worn Displays: Color and Style Optimization in Video versus Optical See-Through Devices",
"doi": null,
"abstractUrl": "/journal/tg/2014/01/ttg2014010125/13rRUxNEqPT",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089433",
"title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090496",
"title": "The influence of text rotation, font and distance on legibility in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090496/1jIxn3eRfnq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a379",
"title": "AR Creator: A Mobile Application of Logic Education Based on AR",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a379/1vg86AfolmE",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2015020041",
"articleId": "13rRUwhpBQ7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2015020062",
"articleId": "13rRUynZ5qk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAle6Qx",
"title": "November/December",
"year": "2007",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "13",
"label": "November/December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyeTVhV",
"doi": "10.1109/TVCG.2007.70606",
"abstract": "Hardware-accelerated volume rendering using the GPU is now the standard approach for real-time volume rendering, although limited graphics memory can present a problem when rendering large volume data sets. Volumetric compression in which the decompression is coupled to rendering has been shown to be an effective solution to this problem; however, most existing techniques were developed in the context of software volume rendering, and all but the simplest approaches are prohibitive in a real-time hardware-accelerated volume rendering context. In this paper we present a novel block-based transform coding scheme designed specifically with real-time volume rendering in mind, such that the decompression is fast without sacrificing compression quality. This is made possible by consolidating the inverse transform with dequantization in such a way as to allow most of the reprojection to be precomputed. Furthermore, we take advantage of the freedom afforded by off-line compression in order to optimize the encoding as much as possible while hiding this complexity from the decoder. In this context we develop a new block classification scheme which allows us to preserve perceptually important features in the compression. The result of this work is an asymmetric transform coding scheme that allows very large volumes to be compressed and then decompressed in real-time while rendering on the GPU.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hardware-accelerated volume rendering using the GPU is now the standard approach for real-time volume rendering, although limited graphics memory can present a problem when rendering large volume data sets. Volumetric compression in which the decompression is coupled to rendering has been shown to be an effective solution to this problem; however, most existing techniques were developed in the context of software volume rendering, and all but the simplest approaches are prohibitive in a real-time hardware-accelerated volume rendering context. In this paper we present a novel block-based transform coding scheme designed specifically with real-time volume rendering in mind, such that the decompression is fast without sacrificing compression quality. This is made possible by consolidating the inverse transform with dequantization in such a way as to allow most of the reprojection to be precomputed. Furthermore, we take advantage of the freedom afforded by off-line compression in order to optimize the encoding as much as possible while hiding this complexity from the decoder. In this context we develop a new block classification scheme which allows us to preserve perceptually important features in the compression. The result of this work is an asymmetric transform coding scheme that allows very large volumes to be compressed and then decompressed in real-time while rendering on the GPU.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hardware-accelerated volume rendering using the GPU is now the standard approach for real-time volume rendering, although limited graphics memory can present a problem when rendering large volume data sets. Volumetric compression in which the decompression is coupled to rendering has been shown to be an effective solution to this problem; however, most existing techniques were developed in the context of software volume rendering, and all but the simplest approaches are prohibitive in a real-time hardware-accelerated volume rendering context. In this paper we present a novel block-based transform coding scheme designed specifically with real-time volume rendering in mind, such that the decompression is fast without sacrificing compression quality. This is made possible by consolidating the inverse transform with dequantization in such a way as to allow most of the reprojection to be precomputed. Furthermore, we take advantage of the freedom afforded by off-line compression in order to optimize the encoding as much as possible while hiding this complexity from the decoder. In this context we develop a new block classification scheme which allows us to preserve perceptually important features in the compression. The result of this work is an asymmetric transform coding scheme that allows very large volumes to be compressed and then decompressed in real-time while rendering on the GPU.",
"title": "Transform Coding for Hardware-accelerated Volume Rendering",
"normalizedTitle": "Transform Coding for Hardware-accelerated Volume Rendering",
"fno": "v1600",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Compression",
"Compressed Volume Rendering",
"Transform Coding",
"Hardware Accelerated Volume Rendering"
],
"authors": [
{
"givenName": "Nathaniel",
"surname": "Fout",
"fullName": "Nathaniel Fout",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kwan-Liu",
"surname": "Ma",
"fullName": "Kwan-Liu Ma",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2007-11-01 00:00:00",
"pubType": "trans",
"pages": "1600-1607",
"year": "2007",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a205",
"title": "A Smart Compression Scheme for GPU-Accelerated Volume Rendering of Time-Varying Data",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a205/12OmNrnJ6SL",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscid/2008/3311/2/3311b030",
"title": "Real-time Medical Image Volume Rendering Based on GPU Accelerated Method",
"doi": null,
"abstractUrl": "/proceedings-article/iscid/2008/3311b030/12OmNvjgWRZ",
"parentPublication": {
"id": "proceedings/iscid/2008/3311/2",
"title": "2008 International Symposium on Computational Intelligence and Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vv/2004/8781/0/87810025",
"title": "Transfer Function Based Adaptive Decompression for Volume Rendering of Large Medical Data Sets",
"doi": null,
"abstractUrl": "/proceedings-article/vv/2004/87810025/12OmNwI8c9C",
"parentPublication": {
"id": "proceedings/vv/2004/8781/0",
"title": "Volume Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/1997/7761/0/77610271",
"title": "Fast and Compact Volume Rendering in the Compressed Transform Domain",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/1997/77610271/12OmNy1SFBD",
"parentPublication": {
"id": "proceedings/dcc/1997/7761/0",
"title": "Data Compression Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1997/8262/0/82620191",
"title": "Accelerated volume rendering using homogeneous region encoding",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1997/82620191/12OmNzVoBzB",
"parentPublication": {
"id": "proceedings/ieee-vis/1997/8262/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/1999/0293/0/02930147",
"title": "An Efficient Wavelet-Based Compression Method for Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/pg/1999/02930147/12OmNzZmZif",
"parentPublication": {
"id": "proceedings/pg/1999/0293/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870011",
"title": "Interactive Maximum Projection Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870011/12OmNzZmZv2",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/06/04376192",
"title": "Transform Coding for Hardware-accelerated Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2007/06/04376192/13rRUxC0SW3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1995/01/v0029",
"title": "Volume Rendering of DCT-Based Compressed 3D Scalar Data",
"doi": null,
"abstractUrl": "/journal/tg/1995/01/v0029/13rRUxZRbnP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122335",
"title": "Fuzzy Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122335/13rRUyeTVi0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "v1592",
"articleId": "13rRUygBw72",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04376206",
"articleId": "13rRUyuegh4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNC36tS6",
"title": "January",
"year": "2007",
"issueNum": "01",
"idPrefix": "td",
"pubType": "journal",
"volume": "18",
"label": "January",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7eL",
"doi": "10.1109/TPDS.2007.253277",
"abstract": "In this work, image-space-parallel direct volume rendering (DVR) of unstructured grids is investigated for distributed-memory architectures. A hypergraph-partitioning-based model is proposed for the adaptive screen partitioning problem in this context. The proposed model aims to balance the rendering loads of processors while trying to minimize the amount of data replication. In the parallel DVR framework we adopted, each data primitive is statically owned by its home processor, which is responsible from replicating its primitives on other processors. Two appropriate remapping models are proposed by enhancing the above model for use within this framework. These two remapping models aim to minimize the total volume of communication in data replication while balancing the rendering loads of processors. Based on the proposed models, a parallel DVR algorithm is developed. The experiments conducted on a PC cluster show that the proposed remapping models achieve better speedup values compared to the remapping models previously suggested for image-space-parallel DVR",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, image-space-parallel direct volume rendering (DVR) of unstructured grids is investigated for distributed-memory architectures. A hypergraph-partitioning-based model is proposed for the adaptive screen partitioning problem in this context. The proposed model aims to balance the rendering loads of processors while trying to minimize the amount of data replication. In the parallel DVR framework we adopted, each data primitive is statically owned by its home processor, which is responsible from replicating its primitives on other processors. Two appropriate remapping models are proposed by enhancing the above model for use within this framework. These two remapping models aim to minimize the total volume of communication in data replication while balancing the rendering loads of processors. Based on the proposed models, a parallel DVR algorithm is developed. The experiments conducted on a PC cluster show that the proposed remapping models achieve better speedup values compared to the remapping models previously suggested for image-space-parallel DVR",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, image-space-parallel direct volume rendering (DVR) of unstructured grids is investigated for distributed-memory architectures. A hypergraph-partitioning-based model is proposed for the adaptive screen partitioning problem in this context. The proposed model aims to balance the rendering loads of processors while trying to minimize the amount of data replication. In the parallel DVR framework we adopted, each data primitive is statically owned by its home processor, which is responsible from replicating its primitives on other processors. Two appropriate remapping models are proposed by enhancing the above model for use within this framework. These two remapping models aim to minimize the total volume of communication in data replication while balancing the rendering loads of processors. Based on the proposed models, a parallel DVR algorithm is developed. The experiments conducted on a PC cluster show that the proposed remapping models achieve better speedup values compared to the remapping models previously suggested for image-space-parallel DVR",
"title": "Hypergraph-Partitioning-Based Remapping Models for Image-Space-Parallel Direct Volume Rendering of Unstructured Grids",
"normalizedTitle": "Hypergraph-Partitioning-Based Remapping Models for Image-Space-Parallel Direct Volume Rendering of Unstructured Grids",
"fno": "04020508",
"hasPdf": true,
"idPrefix": "td",
"keywords": [
"Resource Allocation",
"Distributed Memory Systems",
"Ray Tracing",
"Rendering Computer Graphics",
"Ray Casting",
"Hypergraph Partitioning Based Remapping Models",
"Image Space Parallel Direct Volume Rendering",
"Unstructured Grids",
"Distributed Memory Architectures",
"Adaptive Screen Partitioning Problem",
"Rendering Load Balancing",
"Data Replication",
"PC Cluster",
"Rendering Computer Graphics",
"Sampling Methods",
"Pixel",
"Context Modeling",
"Clustering Algorithms",
"Partitioning Algorithms",
"Casting",
"Data Analysis",
"Analytical Models",
"Acceleration",
"Remapping",
"Direct Volume Rendering",
"Unstructured Grids",
"Ray Casting",
"Image Space Parallelization",
"Hypergraph Partitioning",
"Screen Partitioning"
],
"authors": [
{
"givenName": "Berkant Barla",
"surname": "Cambazoglu",
"fullName": "Berkant Barla Cambazoglu",
"affiliation": "Computer Engineering Department, Bilkent University, Ankara, Turkey",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Cevdet",
"surname": "Aykanat",
"fullName": "Cevdet Aykanat",
"affiliation": "Computer Engineering Department, Bilkent University, Ankara, Turkey",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2007-01-01 00:00:00",
"pubType": "trans",
"pages": "3-16",
"year": "2007",
"issn": "1045-9219",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2011/4602/0/4602a158",
"title": "An Adaptive Sampling Based Parallel Volume Rendering Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a158/12OmNxE2mHp",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/2005/2766/0/01532796",
"title": "Interactive rendering of large unstructured grids using dynamic level-of-detail",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2005/01532796/12OmNyvY9ut",
"parentPublication": {
"id": "proceedings/ieee-vis/2005/2766/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dagstuhl/1997/0503/0/05030137",
"title": "Ray-Based Data Level Comparisons of Direct Volume Rendering Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/dagstuhl/1997/05030137/12OmNzvQHVf",
"parentPublication": {
"id": "proceedings/dagstuhl/1997/0503/0",
"title": "Dagstuhl '97 - Scientific Visualization Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2007/01/l0003",
"title": "Hypergraph-Partitioning-Based Remapping Models for Image-Space-Parallel Direct Volume Rendering of Unstructured Grids",
"doi": null,
"abstractUrl": "/journal/td/2007/01/l0003/13rRUIJcWl5",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061505",
"title": "Direct Interval Volume Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061505/13rRUxcsYLN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/08576679",
"title": "Perceptually Validated Cross-Renderer Analytical BRDF Parameter Remapping",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/08576679/17D45XreC6e",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dagstuhl/1997/0503/0/01423111",
"title": "Ray-Based Data Level Comparisons of Direct Volume Rendering Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/dagstuhl/1997/01423111/1h0N3aF69Lq",
"parentPublication": {
"id": "proceedings/dagstuhl/1997/0503/0",
"title": "Dagstuhl '97 - Scientific Visualization Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09224194",
"title": "Direct Volume Rendering with Nonparametric Models of Uncertainty",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09224194/1nV71j9G3yo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09258424",
"title": "Real-Time Denoising of Volumetric Path Tracing for Direct Volume Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09258424/1oHhYFwKrM4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2020/8014/0/801400a106",
"title": "FAVR - Accelerating Direct Volume Rendering for Virtual RealitySystems",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2020/801400a106/1qRNBEWTyEw",
"parentPublication": {
"id": "proceedings/vis/2020/8014/0",
"title": "2020 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "04020510",
"articleId": "13rRUzpzeAx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyq0zFI",
"title": "May",
"year": "2020",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1iFLKo4ODvO",
"doi": "10.1109/TVCG.2020.2978971",
"abstract": "Welcome to the 9th <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG</italic>) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 29 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020) held in Atlanta, United States on March 22–26, 2020. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the <italic>IEEE TVCG VR</italic> special issue mark a major highlight of the year.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Welcome to the 9th <italic>IEEE Transactions on Visualization and Computer Graphics (TVCG</italic>) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 29 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020) held in Atlanta, United States on March 22–26, 2020. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the <italic>IEEE TVCG VR</italic> special issue mark a major highlight of the year.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Welcome to the 9th IEEE Transactions on Visualization and Computer Graphics (TVCG) special issue on IEEE Virtual Reality and 3D User Interfaces. This volume contains a total of 29 full papers selected for and presented at the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2020) held in Atlanta, United States on March 22–26, 2020. Founded in 1993, IEEE VR has a long tradition as the premier venue where new research results in the field of Virtual Reality (VR) are presented. With the emergence of VR as a major technology in a diverse set of fields, such as entertainment, education, data analytics, artificial intelligence, medicine, construction, training, and many others, the papers presented at IEEE VR and published in the IEEE TVCG VR special issue mark a major highlight of the year.",
"title": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"normalizedTitle": "Introducing the IEEE Virtual Reality 2020 Special Issue",
"fno": "09052628",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Virtual Reality",
"Meetings"
],
"authors": [
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Stony Brook University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug",
"surname": "Bowman",
"fullName": "Doug Bowman",
"affiliation": "Virginia Tech, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "05",
"pubDate": "2020-05-01 00:00:00",
"pubType": "trans",
"pages": "iv-v",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/01/ttg2011010001",
"title": "Guest Editor's Introduction Special Section on the Virtual Reality Conference (VR)",
"doi": null,
"abstractUrl": "/journal/tg/2011/01/ttg2011010001/13rRUwIF6l4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676185",
"title": "Introducing the IEEE Virtual Reality 2019 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676185/18NkgxdV8sM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754285",
"title": "IEEE VR 2022 Introducing the Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754285/1CpcIar9LS8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405571",
"title": "Introducing the IEEE Virtual Reality 2021 Special Issue",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405571/1sP18PmVuQU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09052077",
"articleId": "1iE6Nu1neNi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09052630",
"articleId": "1iFLLHpsBfW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzZEAy6",
"title": "July/August",
"year": "2002",
"issueNum": "04",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "22",
"label": "July/August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvyanm",
"doi": "10.1109/MCG.2002.1016697",
"abstract": "In this paper we present a statistical learning algorithm for synthesizing new random instances of a sound texture given an example of such a texture as input. A large class of natural and artificial sounds such as rain, waterfall, traffic noises, people babble, machine noises, etc., can be regarded as sound textures--sound signals that are approximately stationary at some scale. Treating the input sound texture as a sample of a stochastic process, we construct a tree representing a hierarchical wavelet transform of the signal. From this tree, new random trees are generated by learning and sampling the conditional probabilities of the paths in the original tree. Transformation of these random trees back into signals results in new sound textures that closely resemble the sonic impression of the original sound source but without exactly repeating it. Applications of this method are abundant and include, for example, automatic generation of sound effects, creative musical and sonic manipulations, and virtual reality sonification. Examples are visually demonstrated in the paper and acoustically demonstrated in an accompanying web site.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we present a statistical learning algorithm for synthesizing new random instances of a sound texture given an example of such a texture as input. A large class of natural and artificial sounds such as rain, waterfall, traffic noises, people babble, machine noises, etc., can be regarded as sound textures--sound signals that are approximately stationary at some scale. Treating the input sound texture as a sample of a stochastic process, we construct a tree representing a hierarchical wavelet transform of the signal. From this tree, new random trees are generated by learning and sampling the conditional probabilities of the paths in the original tree. Transformation of these random trees back into signals results in new sound textures that closely resemble the sonic impression of the original sound source but without exactly repeating it. Applications of this method are abundant and include, for example, automatic generation of sound effects, creative musical and sonic manipulations, and virtual reality sonification. Examples are visually demonstrated in the paper and acoustically demonstrated in an accompanying web site.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we present a statistical learning algorithm for synthesizing new random instances of a sound texture given an example of such a texture as input. A large class of natural and artificial sounds such as rain, waterfall, traffic noises, people babble, machine noises, etc., can be regarded as sound textures--sound signals that are approximately stationary at some scale. Treating the input sound texture as a sample of a stochastic process, we construct a tree representing a hierarchical wavelet transform of the signal. From this tree, new random trees are generated by learning and sampling the conditional probabilities of the paths in the original tree. Transformation of these random trees back into signals results in new sound textures that closely resemble the sonic impression of the original sound source but without exactly repeating it. Applications of this method are abundant and include, for example, automatic generation of sound effects, creative musical and sonic manipulations, and virtual reality sonification. Examples are visually demonstrated in the paper and acoustically demonstrated in an accompanying web site.",
"title": "Synthesizing Sound Textures through Wavelet Tree Learning",
"normalizedTitle": "Synthesizing Sound Textures through Wavelet Tree Learning",
"fno": "mcg2002040038",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [],
"authors": [
{
"givenName": "Shlomo",
"surname": "Dubnov",
"fullName": "Shlomo Dubnov",
"affiliation": "Ben-Gurion University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ziv",
"surname": "Bar-Joseph",
"fullName": "Ziv Bar-Joseph",
"affiliation": "Massachussets Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ran",
"surname": "El-Yaniv",
"fullName": "Ran El-Yaniv",
"affiliation": "Technion-Israel Institute of Technology",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dani",
"surname": "Lischinski",
"fullName": "Dani Lischinski",
"affiliation": "Hebrew University of Jerusalem",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Werman",
"fullName": "Michael Werman",
"affiliation": "Hebrew University of Jerusalem",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2002-07-01 00:00:00",
"pubType": "mags",
"pages": "38-48",
"year": "2002",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "mcg2002040028",
"articleId": "13rRUyogGCA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2002040049",
"articleId": "13rRUwciPf5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNANBZkr",
"title": "Jan.-Mar.",
"year": "2015",
"issueNum": "01",
"idPrefix": "mu",
"pubType": "magazine",
"volume": "22",
"label": "Jan.-Mar.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwd9CIo",
"doi": "10.1109/MMUL.2015.14",
"abstract": "The audio feedback resulting from object interaction provides information about the material of the surface and about one's own motor behavior. With the current developments in interactive sonification, it's now possible to digitally change this audio feedback, making the use of interactive sonification a compelling approach to shape tactile surface interactions. Here, the authors present a prototype for a sonic interactive surface, capable of delivering surface tapping sounds in real time when triggered by users' taps on a real surface or on an imagined "virtual" surface. In this system, the delivered audio feedback can be varied so that the tapping sounds correspond to different applied strengths during tapping. The authors also propose a multidimensional measurement approach to evaluate user experiences of multimodal interactive systems. They evaluated their system by looking at the effect of the altered tapping sounds on emotional action-related responses, the users' interactions with the surface, and perceived surface hardness. Results show the influence of the sonification of tapping at all levels: emotional, behavioral, and perceptual. These results have implications on the design of interactive sonification displays and tangible auditory interfaces aiming to change perceived and subsequent motor behavior as well as perceived material properties.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The audio feedback resulting from object interaction provides information about the material of the surface and about one's own motor behavior. With the current developments in interactive sonification, it's now possible to digitally change this audio feedback, making the use of interactive sonification a compelling approach to shape tactile surface interactions. Here, the authors present a prototype for a sonic interactive surface, capable of delivering surface tapping sounds in real time when triggered by users' taps on a real surface or on an imagined "virtual" surface. In this system, the delivered audio feedback can be varied so that the tapping sounds correspond to different applied strengths during tapping. The authors also propose a multidimensional measurement approach to evaluate user experiences of multimodal interactive systems. They evaluated their system by looking at the effect of the altered tapping sounds on emotional action-related responses, the users' interactions with the surface, and perceived surface hardness. Results show the influence of the sonification of tapping at all levels: emotional, behavioral, and perceptual. These results have implications on the design of interactive sonification displays and tangible auditory interfaces aiming to change perceived and subsequent motor behavior as well as perceived material properties.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The audio feedback resulting from object interaction provides information about the material of the surface and about one's own motor behavior. With the current developments in interactive sonification, it's now possible to digitally change this audio feedback, making the use of interactive sonification a compelling approach to shape tactile surface interactions. Here, the authors present a prototype for a sonic interactive surface, capable of delivering surface tapping sounds in real time when triggered by users' taps on a real surface or on an imagined \"virtual\" surface. In this system, the delivered audio feedback can be varied so that the tapping sounds correspond to different applied strengths during tapping. The authors also propose a multidimensional measurement approach to evaluate user experiences of multimodal interactive systems. They evaluated their system by looking at the effect of the altered tapping sounds on emotional action-related responses, the users' interactions with the surface, and perceived surface hardness. Results show the influence of the sonification of tapping at all levels: emotional, behavioral, and perceptual. These results have implications on the design of interactive sonification displays and tangible auditory interfaces aiming to change perceived and subsequent motor behavior as well as perceived material properties.",
"title": "Sonification of Surface Tapping Changes Behavior, Surface Perception, and Emotion",
"normalizedTitle": "Sonification of Surface Tapping Changes Behavior, Surface Perception, and Emotion",
"fno": "mmu2015010048",
"hasPdf": true,
"idPrefix": "mu",
"keywords": [
"Audio Signal Processing",
"Behavioural Sciences Computing",
"Emotion Recognition",
"Interactive Systems",
"Interactive Surface Tapping Sonification",
"Surface Perception",
"Emotion",
"Audio Feedback",
"Compelling Approach",
"Tactile Surface Interactions",
"Motor Behavior",
"Sonic Interactive Surface",
"Surface Tapping Sound Delivery",
"Multidimensional Measurement Approach",
"Emotional Action Related Responses",
"Perceived Surface Hardness",
"Tangible Auditory Interfaces",
"Perceived Material Properties",
"Accelerometers",
"Interactive Systems",
"Real Time Systems",
"1 F Noise",
"Auditory System",
"Headphones",
"Sonification",
"Auditory Systems",
"Multimedia Communication",
"Virtual Reality",
"Emotion Recognition",
"Multimedia",
"Interactive Sonification",
"Multimodal Interfaces",
"Interaction Styles",
"Surface Interaction",
"Virtual Surface",
"User Experience",
"Multimodal Interactive System",
"Emotional Design"
],
"authors": [
{
"givenName": "Ana",
"surname": "Tajadura-Jiménez",
"fullName": "Ana Tajadura-Jiménez",
"affiliation": "UCLIC, University College London",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nadia",
"surname": "Bianchi-Berthouze",
"fullName": "Nadia Bianchi-Berthouze",
"affiliation": "UCLIC, University College London",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Enrico",
"surname": "Furfaro",
"fullName": "Enrico Furfaro",
"affiliation": "WeLoveDigital",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frédéric",
"surname": "Bevilacqua",
"fullName": "Frédéric Bevilacqua",
"affiliation": "STMS Lab IRCAM-CNRS-UPMC, Paris",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2015-01-01 00:00:00",
"pubType": "mags",
"pages": "48-57",
"year": "2015",
"issn": "1070-986X",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sive/2014/5781/0/07006288",
"title": "Reproducible sonification for virtual navigation",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2014/07006288/12OmNAtaS0G",
"parentPublication": {
"id": "proceedings/sive/2014/5781/0",
"title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpc/2009/3998/0/05090035",
"title": "Sonification design guidelines to enhance program comprehension",
"doi": null,
"abstractUrl": "/proceedings-article/icpc/2009/05090035/12OmNCcbE84",
"parentPublication": {
"id": "proceedings/icpc/2009/3998/0",
"title": "2009 IEEE 17th International Conference on Program Comprehension (ICPC 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010041",
"title": "Designing an Interactive Audio Interface for Climate Science",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010041/13rRUwdrdMQ",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2036",
"title": "HCI Design and Interactive Sonification for Fingers and Ears",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2036/13rRUxC0STl",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2026",
"title": "Interactive Sonification of Choropleth Maps",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2026/13rRUxjQyrR",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010058",
"title": "Interactive Sonification in Rowing: Acoustic Feedback for On-Water Training",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010058/13rRUxjyX13",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2053",
"title": "Movement Sonification: Effects on Perception and Action",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2053/13rRUytF46p",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010074",
"title": "Sonic Trampoline: How Audio Feedback Impacts the User's Experience of Jumping",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010074/13rRUytnsTK",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a159",
"title": "Feasibility Study on Interactive Geometry Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a159/1I6ROHRhemc",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090531",
"title": "Immersive sonification of protein surface",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090531/1jIxzEw3bb2",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mmu2015010041",
"articleId": "13rRUwdrdMQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mmu2015010058",
"articleId": "13rRUxjyX13",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAHW0IR",
"title": "April/June",
"year": "2005",
"issueNum": "02",
"idPrefix": "mu",
"pubType": "magazine",
"volume": "12",
"label": "April/June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxjQyrR",
"doi": "10.1109/MMUL.2005.28",
"abstract": "Interactive sonification systems can make georeferenced data accessible to people with vision impairments. The authors compare methods for using sound to encode georeferenced data patterns and for navigating maps.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interactive sonification systems can make georeferenced data accessible to people with vision impairments. The authors compare methods for using sound to encode georeferenced data patterns and for navigating maps.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interactive sonification systems can make georeferenced data accessible to people with vision impairments. The authors compare methods for using sound to encode georeferenced data patterns and for navigating maps.",
"title": "Interactive Sonification of Choropleth Maps",
"normalizedTitle": "Interactive Sonification of Choropleth Maps",
"fno": "u2026",
"hasPdf": true,
"idPrefix": "mu",
"keywords": [
"Auditory Non Speech Feedback",
"Evaluation",
"Interaction Style",
"Sound",
"User Interfaces",
"Universal Usability"
],
"authors": [
{
"givenName": "Haixia",
"surname": "Zhao",
"fullName": "Haixia Zhao",
"affiliation": "University of Maryland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Benjamin K.",
"surname": "Smith",
"fullName": "Benjamin K. Smith",
"affiliation": "University of Maryland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kent",
"surname": "Norman",
"fullName": "Kent Norman",
"affiliation": "University of Maryland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Catherine",
"surname": "Plaisant",
"fullName": "Catherine Plaisant",
"affiliation": "University of Maryland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ben",
"surname": "Shneiderman",
"fullName": "Ben Shneiderman",
"affiliation": "University of Maryland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2005-04-01 00:00:00",
"pubType": "mags",
"pages": "26-35",
"year": "2005",
"issn": "1070-986X",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2004/2177/0/21770865",
"title": "A Path Based Model for Sonification",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2004/21770865/12OmNyv7m5V",
"parentPublication": {
"id": "proceedings/iv/2004/2177/0",
"title": "Proceedings. Eighth International Conference on Information Visualisation, 2004. IV 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010048",
"title": "Sonification of Surface Tapping Changes Behavior, Surface Perception, and Emotion",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010048/13rRUwd9CIo",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2005/02/u2036",
"title": "HCI Design and Interactive Sonification for Fingers and Ears",
"doi": null,
"abstractUrl": "/magazine/mu/2005/02/u2036/13rRUxC0STl",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2015/01/mmu2015010058",
"title": "Interactive Sonification in Rowing: Acoustic Feedback for On-Water Training",
"doi": null,
"abstractUrl": "/magazine/mu/2015/01/mmu2015010058/13rRUxjyX13",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007327",
"title": "SonifEye: Sonification of Visual Information Using Physical Modeling Sound Synthesis",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007327/13rRUyft7D7",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2021/3225/0/322500a251",
"title": "AIive: Interactive Visualization and Sonification of Neural Networks in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2021/322500a251/1zxLxmIs3xm",
"parentPublication": {
"id": "proceedings/aivr/2021/3225/0",
"title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "u2036",
"articleId": "13rRUxC0STl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBh8gYt",
"title": "July/August",
"year": "1999",
"issueNum": "04",
"idPrefix": "cs",
"pubType": "magazine",
"volume": "1",
"label": "July/August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy08MzR",
"doi": "10.1109/5992.774840",
"abstract": "This article describes a collaborative project between researchers in the Mathematics and Computer Science Division at Argonne National Laboratory and the Computer Music Project of the University of Illinois at Urbana-Champaign. The project focuses on the use of sound for the exploration and analysis of complex data sets in scientific computing. The article addresses digital sound synthesis in the context of DIASS (Digital Instrument for Additive Sound Synthesis) and sound visualization in a virtual-reality environment by means of M4CAVE. It describes the procedures and preliminary results of some experiments in scientific sonification and sound visualization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This article describes a collaborative project between researchers in the Mathematics and Computer Science Division at Argonne National Laboratory and the Computer Music Project of the University of Illinois at Urbana-Champaign. The project focuses on the use of sound for the exploration and analysis of complex data sets in scientific computing. The article addresses digital sound synthesis in the context of DIASS (Digital Instrument for Additive Sound Synthesis) and sound visualization in a virtual-reality environment by means of M4CAVE. It describes the procedures and preliminary results of some experiments in scientific sonification and sound visualization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This article describes a collaborative project between researchers in the Mathematics and Computer Science Division at Argonne National Laboratory and the Computer Music Project of the University of Illinois at Urbana-Champaign. The project focuses on the use of sound for the exploration and analysis of complex data sets in scientific computing. The article addresses digital sound synthesis in the context of DIASS (Digital Instrument for Additive Sound Synthesis) and sound visualization in a virtual-reality environment by means of M4CAVE. It describes the procedures and preliminary results of some experiments in scientific sonification and sound visualization.",
"title": "Data Sonification and Sound Visualization",
"normalizedTitle": "Data Sonification and Sound Visualization",
"fno": "c4048",
"hasPdf": true,
"idPrefix": "cs",
"keywords": [],
"authors": [
{
"givenName": "Hans G.",
"surname": "Kaper",
"fullName": "Hans G. Kaper",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Elizabeth",
"surname": "Wiebel",
"fullName": "Elizabeth Wiebel",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sever",
"surname": "Tipei",
"fullName": "Sever Tipei",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1999-07-01 00:00:00",
"pubType": "mags",
"pages": "48-58",
"year": "1999",
"issn": "1521-9615",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "c4036",
"articleId": "13rRUx0xPwA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "c4059",
"articleId": "13rRUxk89fb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1L8lujshfos",
"title": "Jan.-March",
"year": "2023",
"issueNum": "01",
"idPrefix": "ta",
"pubType": "journal",
"volume": "14",
"label": "Jan.-March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1oIW8klCOiY",
"doi": "10.1109/TAFFC.2020.3038137",
"abstract": "Interpersonal touch is critical for social-emotional development and presents a powerful modality for communicating emotions. Virtual agents of the future could capitalize on touch to establish social bonds with humans and facilitate cooperation in virtual reality (VR). We studied whether the emotional expression of a virtual agent would affect the way humans touch the agent. Participants were asked to hold a pressure-sensing tube presented as the agent’s arm in VR. Upon seeing the agent’s emotional expression change, participants briefly squeezed the arm. The effect of emotional expressions on affective state was measured using self-reported valence and arousal as well as physiology-based indices. Onset, duration, and intensity of the squeeze were recorded to examine the haptic responses. Emotional expression of agents affected squeeze intensity and duration through changes in emotional perception and experience. Haptic responses may thus provide an implicit measure of persons’ experience towards their virtual companion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interpersonal touch is critical for social-emotional development and presents a powerful modality for communicating emotions. Virtual agents of the future could capitalize on touch to establish social bonds with humans and facilitate cooperation in virtual reality (VR). We studied whether the emotional expression of a virtual agent would affect the way humans touch the agent. Participants were asked to hold a pressure-sensing tube presented as the agent’s arm in VR. Upon seeing the agent’s emotional expression change, participants briefly squeezed the arm. The effect of emotional expressions on affective state was measured using self-reported valence and arousal as well as physiology-based indices. Onset, duration, and intensity of the squeeze were recorded to examine the haptic responses. Emotional expression of agents affected squeeze intensity and duration through changes in emotional perception and experience. Haptic responses may thus provide an implicit measure of persons’ experience towards their virtual companion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interpersonal touch is critical for social-emotional development and presents a powerful modality for communicating emotions. Virtual agents of the future could capitalize on touch to establish social bonds with humans and facilitate cooperation in virtual reality (VR). We studied whether the emotional expression of a virtual agent would affect the way humans touch the agent. Participants were asked to hold a pressure-sensing tube presented as the agent’s arm in VR. Upon seeing the agent’s emotional expression change, participants briefly squeezed the arm. The effect of emotional expressions on affective state was measured using self-reported valence and arousal as well as physiology-based indices. Onset, duration, and intensity of the squeeze were recorded to examine the haptic responses. Emotional expression of agents affected squeeze intensity and duration through changes in emotional perception and experience. Haptic responses may thus provide an implicit measure of persons’ experience towards their virtual companion.",
"title": "Touching Virtual Humans: Haptic Responses Reveal the Emotional Impact of Affective Agents",
"normalizedTitle": "Touching Virtual Humans: Haptic Responses Reveal the Emotional Impact of Affective Agents",
"fno": "09258960",
"hasPdf": true,
"idPrefix": "ta",
"keywords": [
"Emotion Recognition",
"Haptic Interfaces",
"Human Computer Interaction",
"Virtual Reality",
"Affective Agents",
"Affective State",
"Communicating Emotions",
"Emotional Expression Change",
"Emotional Impact",
"Emotional Perception",
"Haptic Responses",
"Interpersonal Touch",
"Social Bonds",
"Social Emotional Development",
"Squeeze Intensity",
"Virtual Agent",
"Virtual Companion",
"Virtual Humans",
"Virtual Reality",
"VR",
"Haptic Interfaces",
"Physiology",
"Virtual Reality",
"Mobile Handsets",
"Libraries",
"Games",
"Computer Science",
"Affective Interaction",
"Haptic Response",
"Virtual Reality",
"Physiology",
"Emotional Expression",
"Virtual Agent"
],
"authors": [
{
"givenName": "Imtiaj",
"surname": "Ahmed",
"fullName": "Imtiaj Ahmed",
"affiliation": "Department of Computer Science, University of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ville J.",
"surname": "Harjunen",
"fullName": "Ville J. Harjunen",
"affiliation": "Department of Psychology and Logopedics, University of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Giulio",
"surname": "Jacucci",
"fullName": "Giulio Jacucci",
"affiliation": "Department of Computer Science, University of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Ravaja",
"fullName": "Niklas Ravaja",
"affiliation": "Department of Psychology and Logopedics, University of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tuukka",
"surname": "Ruotsalo",
"fullName": "Tuukka Ruotsalo",
"affiliation": "Department of Computer Science, University of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michiel M.",
"surname": "Spapé",
"fullName": "Michiel M. Spapé",
"affiliation": "Department of Psychology and Logopedics, University of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2023-01-01 00:00:00",
"pubType": "trans",
"pages": "331-342",
"year": "2023",
"issn": "1949-3045",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2010/4217/0/4217a228",
"title": "Bridging the Gap between Virtual and Real World by Bringing an Interpersonal Haptic Communication System in Second Life",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2010/4217a228/12OmNB9t6wX",
"parentPublication": {
"id": "proceedings/ism/2010/4217/0",
"title": "2010 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344656",
"title": "A warm touch of affect?",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344656/12OmNBIWXCi",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2015/1969/0/07361291",
"title": "Touching sounds: audio virtual surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2015/07361291/12OmNBzAck6",
"parentPublication": {
"id": "proceedings/sive/2015/1969/0",
"title": "2015 IEEE 2nd VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2017/0563/0/08273612",
"title": "Emotional responses of vibrotactile-thermal stimuli: Effects of constant-temperature thermal stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2017/08273612/12OmNqMPfQu",
"parentPublication": {
"id": "proceedings/acii/2017/0563/0",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811019",
"title": "Virtual Humans That Touch Back: Enhancing Nonverbal Communication with Virtual Humans through Bidirectional Touch",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811019/12OmNwMXnsz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2010/01/tta2010010060",
"title": "Empathic Touch by Relational Agents",
"doi": null,
"abstractUrl": "/journal/ta/2010/01/tta2010010060/13rRUwI5Ujg",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007246",
"title": "AR Feels “Softer” than VR: Haptic Perception of Stiffness in Augmented versus Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007246/13rRUwh80Hj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/01/07360126",
"title": "Physiological Responses to Affective Tele-Touch during Induced Emotional Stimuli",
"doi": null,
"abstractUrl": "/journal/ta/2017/01/07360126/13rRUxYINdD",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699311",
"title": "Extended Workspace Using a Smartphone with a Depth Camera",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699311/19F1NSFj2sU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09224141",
"articleId": "1nV4IxlEiEo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09373917",
"articleId": "1rPsZrOrPgI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1L8lJtxKRa0",
"name": "tta202301-09258960s1-taffc-3038137-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/tta202301-09258960s1-taffc-3038137-mm.zip",
"extension": "zip",
"size": "395 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1HMOit1lSk8",
"title": "Dec.",
"year": "2022",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1vyju4jl6AE",
"doi": "10.1109/TVCG.2021.3099290",
"abstract": "Virtual self-avatars have been increasingly used in Augmented Reality (AR) where one can see virtual content embedded into physical space. However, little is known about the perception of self-avatars in such a context. The possibility that their embodiment could be achieved in a similar way as in Virtual Reality opens the door to numerous applications in education, communication, entertainment, or the medical field. This article aims to review the literature covering the embodiment of virtual self-avatars in AR. Our goal is (i) to guide readers through the different options and challenges linked to the implementation of AR embodiment systems, (ii) to provide a better understanding of AR embodiment perception by classifying the existing knowledge, and (iii) to offer insight on future research topics and trends for AR and avatar research. To do so, we introduce a taxonomy of virtual embodiment experiences by defining a “body avatarization” continuum. The presented knowledge suggests that the sense of embodiment evolves in the same way in AR as in other settings, but this possibility has yet to be fully investigated. We suggest that, whilst it is yet to be well understood, the embodiment of avatars has a promising future in AR and conclude by discussing possible directions for research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual self-avatars have been increasingly used in Augmented Reality (AR) where one can see virtual content embedded into physical space. However, little is known about the perception of self-avatars in such a context. The possibility that their embodiment could be achieved in a similar way as in Virtual Reality opens the door to numerous applications in education, communication, entertainment, or the medical field. This article aims to review the literature covering the embodiment of virtual self-avatars in AR. Our goal is (i) to guide readers through the different options and challenges linked to the implementation of AR embodiment systems, (ii) to provide a better understanding of AR embodiment perception by classifying the existing knowledge, and (iii) to offer insight on future research topics and trends for AR and avatar research. To do so, we introduce a taxonomy of virtual embodiment experiences by defining a “body avatarization” continuum. The presented knowledge suggests that the sense of embodiment evolves in the same way in AR as in other settings, but this possibility has yet to be fully investigated. We suggest that, whilst it is yet to be well understood, the embodiment of avatars has a promising future in AR and conclude by discussing possible directions for research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual self-avatars have been increasingly used in Augmented Reality (AR) where one can see virtual content embedded into physical space. However, little is known about the perception of self-avatars in such a context. The possibility that their embodiment could be achieved in a similar way as in Virtual Reality opens the door to numerous applications in education, communication, entertainment, or the medical field. This article aims to review the literature covering the embodiment of virtual self-avatars in AR. Our goal is (i) to guide readers through the different options and challenges linked to the implementation of AR embodiment systems, (ii) to provide a better understanding of AR embodiment perception by classifying the existing knowledge, and (iii) to offer insight on future research topics and trends for AR and avatar research. To do so, we introduce a taxonomy of virtual embodiment experiences by defining a “body avatarization” continuum. The presented knowledge suggests that the sense of embodiment evolves in the same way in AR as in other settings, but this possibility has yet to be fully investigated. We suggest that, whilst it is yet to be well understood, the embodiment of avatars has a promising future in AR and conclude by discussing possible directions for research.",
"title": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"normalizedTitle": "Being an Avatar “for Real”: A Survey on Virtual Embodiment in Augmented Reality",
"fno": "09495125",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Avatars",
"Human Factors",
"AR Embodiment Perception",
"AR Embodiment Systems",
"Augmented Reality",
"Body Avatarization Continuum",
"Entertainment",
"Medical Field",
"Physical Space",
"Virtual Content",
"Virtual Embodiment Experiences",
"Virtual Reality",
"Virtual Self Avatars",
"Avatars",
"Psychology",
"Visualization",
"Behavioral Sciences",
"Taxonomy",
"Augmented Reality",
"Virtual Environments",
"Augmented Reality",
"Avatar",
"Sense Of Embodiment",
"Psychology",
"Social And Behavioral Sciences"
],
"authors": [
{
"givenName": "Adélaïde",
"surname": "Genay",
"fullName": "Adélaïde Genay",
"affiliation": "Inria, Bordeaux, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anatole",
"surname": "Lécuyer",
"fullName": "Anatole Lécuyer",
"affiliation": "Inria, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Martin",
"surname": "Hachet",
"fullName": "Martin Hachet",
"affiliation": "Inria, Bordeaux, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "5071-5090",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040591",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714123",
"title": "The Impact of Embodiment and Avatar Sizing on Personal Space in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714123/1B0Y0yXxNbG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a350",
"title": "Exploring Presence, Avatar Embodiment, and Body Perception with a Holographic Augmented Reality Mirror",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a350/1CJcn3q3J5K",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a730",
"title": "Third-Person Perspective Avatar Embodiment in Augmented Reality: Examining the Proteus Effect on Physical Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a730/1CJffY1QgeI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049676",
"title": "The Impact of Avatar and Environment Congruence on Plausibility, Embodiment, Presence, and the Proteus Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049676/1KYosbnM8q4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10109126",
"title": "Measuring Embodiment: Movement Complexity and the Impact of Personal Characteristics",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10109126/1METe7DRIic",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998371",
"title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998371/1hrXiia6v9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09495259",
"articleId": "1vyjtdJRfXO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09497654",
"articleId": "1vzYfkJCG64",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyr8Ysb",
"title": "July",
"year": "2012",
"issueNum": "07",
"idPrefix": "co",
"pubType": "magazine",
"volume": "45",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbPM",
"doi": "10.1109/MC.2012.72",
"abstract": "An investigation of the technology and human factors that drive augmented reality research describes recent developments in handheld AR, concentrating on localization, tracking, interaction, and visualization, and offers several examples illustrating the vast potential and important applications of AR. A related video can be seen here: http://youtu.be/ol371rIyUFY. It shows several real-world examples illustrating the vast potential and important applications of augmented reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An investigation of the technology and human factors that drive augmented reality research describes recent developments in handheld AR, concentrating on localization, tracking, interaction, and visualization, and offers several examples illustrating the vast potential and important applications of AR. A related video can be seen here: http://youtu.be/ol371rIyUFY. It shows several real-world examples illustrating the vast potential and important applications of augmented reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An investigation of the technology and human factors that drive augmented reality research describes recent developments in handheld AR, concentrating on localization, tracking, interaction, and visualization, and offers several examples illustrating the vast potential and important applications of AR. A related video can be seen here: http://youtu.be/ol371rIyUFY. It shows several real-world examples illustrating the vast potential and important applications of augmented reality.",
"title": "Anywhere Interfaces Using Handheld Augmented Reality",
"normalizedTitle": "Anywhere Interfaces Using Handheld Augmented Reality",
"fno": "mco2012070026",
"hasPdf": true,
"idPrefix": "co",
"keywords": [
"Augmented Reality",
"Embodied Interaction",
"Localization",
"Tangible Interaction Tracking"
],
"authors": [
{
"givenName": "Michael",
"surname": "Gervautz",
"fullName": "Michael Gervautz",
"affiliation": "Qualcomm Research, Vienna",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2012-07-01 00:00:00",
"pubType": "mags",
"pages": "26-31",
"year": "2012",
"issn": "0018-9162",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671841",
"title": "Markerless 3D gesture-based interaction for handheld Augmented Reality interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671841/12OmNAIvcZU",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2003/2034/0/20340127",
"title": "First Steps Towards Handheld Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2003/20340127/12OmNAlvHNr",
"parentPublication": {
"id": "proceedings/iswc/2003/2034/0",
"title": "Seventh IEEE International Symposium on Wearable Computers, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2010/4124/0/4124a005",
"title": "Simultaneous Localization and Mapping for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a005/12OmNvA1hcT",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/2/3336d170",
"title": "Virtual Tactical Map with Tangible Augmented Reality Interface",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336d170/12OmNx38vWd",
"parentPublication": {
"id": "proceedings/csse/2008/3336/6",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2009/3890/0/3890a187",
"title": "Dual Face Interaction in Handheld Augmented Reality Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2009/3890a187/12OmNxGj9VX",
"parentPublication": {
"id": "proceedings/ism/2009/3890/0",
"title": "2009 11th IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2010/4055/0/4055a599",
"title": "Tangible Cubes Used as the User Interface in an Augmented Reality Game for Edutainment",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2010/4055a599/12OmNy50gg0",
"parentPublication": {
"id": "proceedings/icalt/2010/4055/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549411",
"title": "Early steps towards understanding text legibility in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549411/12OmNy6HQV1",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv-vis/2008/3271/0/3271a093",
"title": "A Geographic Surface Browsing Tool Using Map-Based Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iv-vis/2008/3271a093/12OmNzayNjF",
"parentPublication": {
"id": "proceedings/iv-vis/2008/3271/0",
"title": "Visualisation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a931",
"title": "From Lab to Reality: Optimization of Industrial Augmented Reality Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a931/1J7Wo6Bhxa8",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090418",
"title": "Spatial Referencing for Anywhere, Anytime Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090418/1jIxkjif74A",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mco2012070024",
"articleId": "13rRUyZaxtP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mco2012070032",
"articleId": "13rRUyoyhJq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAmmuQm",
"title": "July",
"year": "2016",
"issueNum": "07",
"idPrefix": "tp",
"pubType": "journal",
"volume": "38",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvPLaW",
"doi": "10.1109/TPAMI.2015.2491940",
"abstract": "Pipelines to recognize 3D objects despite clutter and occlusions usually end up with a final verification stage whereby recognition hypotheses are validated or dismissed based on how well they explain sensor measurements. Unlike previous work, we propose a Global Hypothesis Verification (GHV) approach which regards all hypotheses jointly so as to account for mutual interactions. GHV provides a principled framework to tackle the complexity of our visual world by leveraging on a plurality of recognition paradigms and cues. Accordingly, we present a 3D object recognition pipeline deploying both global and local 3D features as well as shape and color. Thereby, and facilitated by the robustness of the verification process, diverse object hypotheses can be gathered and weak hypotheses need not be suppressed too early to trade sensitivity for specificity. Experiments demonstrate the effectiveness of our proposal, which significantly improves over the state-of-art and attains ideal performance (no false negatives, no false positives) on three out of the six most relevant and challenging benchmark datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pipelines to recognize 3D objects despite clutter and occlusions usually end up with a final verification stage whereby recognition hypotheses are validated or dismissed based on how well they explain sensor measurements. Unlike previous work, we propose a Global Hypothesis Verification (GHV) approach which regards all hypotheses jointly so as to account for mutual interactions. GHV provides a principled framework to tackle the complexity of our visual world by leveraging on a plurality of recognition paradigms and cues. Accordingly, we present a 3D object recognition pipeline deploying both global and local 3D features as well as shape and color. Thereby, and facilitated by the robustness of the verification process, diverse object hypotheses can be gathered and weak hypotheses need not be suppressed too early to trade sensitivity for specificity. Experiments demonstrate the effectiveness of our proposal, which significantly improves over the state-of-art and attains ideal performance (no false negatives, no false positives) on three out of the six most relevant and challenging benchmark datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pipelines to recognize 3D objects despite clutter and occlusions usually end up with a final verification stage whereby recognition hypotheses are validated or dismissed based on how well they explain sensor measurements. Unlike previous work, we propose a Global Hypothesis Verification (GHV) approach which regards all hypotheses jointly so as to account for mutual interactions. GHV provides a principled framework to tackle the complexity of our visual world by leveraging on a plurality of recognition paradigms and cues. Accordingly, we present a 3D object recognition pipeline deploying both global and local 3D features as well as shape and color. Thereby, and facilitated by the robustness of the verification process, diverse object hypotheses can be gathered and weak hypotheses need not be suppressed too early to trade sensitivity for specificity. Experiments demonstrate the effectiveness of our proposal, which significantly improves over the state-of-art and attains ideal performance (no false negatives, no false positives) on three out of the six most relevant and challenging benchmark datasets.",
"title": "A Global Hypothesis Verification Framework for 3D Object Recognition in Clutter",
"normalizedTitle": "A Global Hypothesis Verification Framework for 3D Object Recognition in Clutter",
"fno": "07299676",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Three Dimensional Displays",
"Pipelines",
"Computational Modeling",
"Solid Modeling",
"Object Recognition",
"Iterative Closest Point Algorithm",
"Clutter",
"3 D Object Recognition",
"Hypothesis Verification",
"Scene Understanding",
"3 D Object Recognition",
"Hypothesis Verification",
"Correspondence Grouping"
],
"authors": [
{
"givenName": "Aitor",
"surname": "Aldoma",
"fullName": "Aitor Aldoma",
"affiliation": ", Vision4Robotics group (ACIN—Technical University of Vienna), Wien, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Federico",
"surname": "Tombari",
"fullName": "Federico Tombari",
"affiliation": ", CVLAB Group (DISI—University of Bologna), Bologna, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Luigi Di",
"surname": "Stefano",
"fullName": "Luigi Di Stefano",
"affiliation": ", CVLAB Group (DISI—University of Bologna), Bologna, Italy",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus",
"surname": "Vincze",
"fullName": "Markus Vincze",
"affiliation": ", Vision4Robotics group (ACIN—Technical University of Vienna), Wien, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2016-07-01 00:00:00",
"pubType": "trans",
"pages": "1383-1396",
"year": "2016",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icfhr/2014/4335/0/06981069",
"title": "Mathematical Symbol Hypothesis Recognition with Rejection Option",
"doi": null,
"abstractUrl": "/proceedings-article/icfhr/2014/06981069/12OmNAolGZt",
"parentPublication": {
"id": "proceedings/icfhr/2014/4335/0",
"title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761696",
"title": "3D face recognition using the Surface Interpenetration Measure: A comparative evaluation on the FRGC database",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761696/12OmNC4eSnY",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130507",
"title": "3D Twins and Expression Challenge",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130507/12OmNqBtiI7",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a350",
"title": "Recognizing Occluded 3D Faces Using an Efficient ICP Variant",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a350/12OmNqHqSAK",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2925/0/00202141",
"title": "Parallel hypothesis verification",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00202141/12OmNro0IgJ",
"parentPublication": {
"id": "proceedings/icpr/1992/2925/0",
"title": "11th IAPR International Conference on Pattern Recognition. Vol. IV. Conference D: Architectures for Vision and Pattern Recognition,",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2008/2570/0/04607381",
"title": "Toward a region-based 3D face recognition approach",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2008/04607381/12OmNxtOO0B",
"parentPublication": {
"id": "proceedings/icme/2008/2570/0",
"title": "2008 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2013/5053/0/06474992",
"title": "3D free form object recognition using rotational projection statistics",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2013/06474992/12OmNxwncFZ",
"parentPublication": {
"id": "proceedings/wacv/2013/5053/0",
"title": "Applications of Computer Vision, IEEE Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a115",
"title": "Global Hypothesis Generation for 6D Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a115/12OmNyOq4RS",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007238",
"title": "Looking Beyond the Simple Scenarios: Combining Learners and Optimizers in 3D Temporal Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007238/13rRUwInvl5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/04/i0718",
"title": "Human Ear Recognition in 3D",
"doi": null,
"abstractUrl": "/journal/tp/2007/04/i0718/13rRUxZRbpa",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07297868",
"articleId": "13rRUwInvgr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07286852",
"articleId": "13rRUxjQyqj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyeWdCU",
"title": "May",
"year": "1999",
"issueNum": "05",
"idPrefix": "tp",
"pubType": "journal",
"volume": "21",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0gevS",
"doi": "10.1109/34.765655",
"abstract": "Abstract—We present a 3D shape-based object recognition system for simultaneous recognition of multiple objects in scenes containing clutter and occlusion. Recognition is based on matching surfaces by matching points using the spin image representation. The spin image is a data level shape descriptor that is used to match surfaces represented as surface meshes. We present a compression scheme for spin images that results in efficient multiple object recognition which we verify with results showing the simultaneous recognition of multiple objects from a library of 20 models. Furthermore, we demonstrate the robust performance of recognition in the presence of clutter and occlusion through analysis of recognition trials on 100 scenes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We present a 3D shape-based object recognition system for simultaneous recognition of multiple objects in scenes containing clutter and occlusion. Recognition is based on matching surfaces by matching points using the spin image representation. The spin image is a data level shape descriptor that is used to match surfaces represented as surface meshes. We present a compression scheme for spin images that results in efficient multiple object recognition which we verify with results showing the simultaneous recognition of multiple objects from a library of 20 models. Furthermore, we demonstrate the robust performance of recognition in the presence of clutter and occlusion through analysis of recognition trials on 100 scenes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We present a 3D shape-based object recognition system for simultaneous recognition of multiple objects in scenes containing clutter and occlusion. Recognition is based on matching surfaces by matching points using the spin image representation. The spin image is a data level shape descriptor that is used to match surfaces represented as surface meshes. We present a compression scheme for spin images that results in efficient multiple object recognition which we verify with results showing the simultaneous recognition of multiple objects from a library of 20 models. Furthermore, we demonstrate the robust performance of recognition in the presence of clutter and occlusion through analysis of recognition trials on 100 scenes.",
"title": "Using Spin Images for Efficient Object Recognition in Cluttered 3D Scenes",
"normalizedTitle": "Using Spin Images for Efficient Object Recognition in Cluttered 3D Scenes",
"fno": "i0433",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"3 D Object Recognition",
"Surface Matching",
"Spin Image",
"Clutter",
"Occlusion",
"Oriented Point",
"Surface Mesh",
"Point Correspondence"
],
"authors": [
{
"givenName": "Andrew E.",
"surname": "Johnson",
"fullName": "Andrew E. Johnson",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Martial",
"surname": "Hebert",
"fullName": "Martial Hebert",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "1999-05-01 00:00:00",
"pubType": "trans",
"pages": "433-449",
"year": "1999",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "i0422",
"articleId": "13rRUwhHcKb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "i0450",
"articleId": "13rRUxNmPES",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBKEyoy",
"title": "March",
"year": "2016",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly8XI",
"doi": "10.1109/TVCG.2015.2450934",
"abstract": "Shader lamps can augment physical objects with projected virtual replications using a camera-projector system, provided that the physical and virtual object are well registered to each other. Precise registration and tracking has been a cumbersome and intrusive process in the past. In this paper, we present a new method for tracking complex-shaped physical objects interactively. In contrast to previous approaches our system is mobile and makes solely use of the projection of the virtual replication to track the physical object and “stick” the projection to it. Our method consists of two stages, a fast pose initialization based on structured light patterns and a non-intrusive frame-by-frame tracking based on features detected in the projection. During the tracking phase, a radiometrically corrected virtual camera view based on the current pose prediction is rendered and compared to the captured image. Matched features are triangulated providing a sparse set of surface points that is robustly aligned to the virtual model. The alignment transformation serves as an input for the new pose prediction. Detailed experiments including the evaluation of the overlay accuracy show that our approach can accurately and robustly track complex objects at interactive rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Shader lamps can augment physical objects with projected virtual replications using a camera-projector system, provided that the physical and virtual object are well registered to each other. Precise registration and tracking has been a cumbersome and intrusive process in the past. In this paper, we present a new method for tracking complex-shaped physical objects interactively. In contrast to previous approaches our system is mobile and makes solely use of the projection of the virtual replication to track the physical object and “stick” the projection to it. Our method consists of two stages, a fast pose initialization based on structured light patterns and a non-intrusive frame-by-frame tracking based on features detected in the projection. During the tracking phase, a radiometrically corrected virtual camera view based on the current pose prediction is rendered and compared to the captured image. Matched features are triangulated providing a sparse set of surface points that is robustly aligned to the virtual model. The alignment transformation serves as an input for the new pose prediction. Detailed experiments including the evaluation of the overlay accuracy show that our approach can accurately and robustly track complex objects at interactive rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Shader lamps can augment physical objects with projected virtual replications using a camera-projector system, provided that the physical and virtual object are well registered to each other. Precise registration and tracking has been a cumbersome and intrusive process in the past. In this paper, we present a new method for tracking complex-shaped physical objects interactively. In contrast to previous approaches our system is mobile and makes solely use of the projection of the virtual replication to track the physical object and “stick” the projection to it. Our method consists of two stages, a fast pose initialization based on structured light patterns and a non-intrusive frame-by-frame tracking based on features detected in the projection. During the tracking phase, a radiometrically corrected virtual camera view based on the current pose prediction is rendered and compared to the captured image. Matched features are triangulated providing a sparse set of surface points that is robustly aligned to the virtual model. The alignment transformation serves as an input for the new pose prediction. Detailed experiments including the evaluation of the overlay accuracy show that our approach can accurately and robustly track complex objects at interactive rates.",
"title": "Sticky Projections-A Model-Based Approach to Interactive Shader Lamps Tracking",
"normalizedTitle": "Sticky Projections-A Model-Based Approach to Interactive Shader Lamps Tracking",
"fno": "07138633",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Three Dimensional Displays",
"Tracking",
"Iterative Closest Point Algorithm",
"Feature Extraction",
"Radiometry",
"Calibration",
"Computer Vision",
"Augmented Reality",
"Computer Vision"
],
"authors": [
{
"givenName": "Christoph",
"surname": "Resch",
"fullName": "Christoph Resch",
"affiliation": "EXTEND3D GmbH, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Keitler",
"fullName": "Peter Keitler",
"affiliation": "EXTEND3D GmbH, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gudrun",
"surname": "Klinker",
"fullName": "Gudrun Klinker",
"affiliation": ", Technische Universität München, Munich, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2016-03-01 00:00:00",
"pubType": "trans",
"pages": "1291-1301",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209c263",
"title": "Robust Real-Time Extreme Head Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c263/12OmNBBQZpN",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977431",
"title": "Hybrid On-Line 3D Face and Facial Actions Tracking in RGBD Video Sequences",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977431/12OmNCwlafG",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671777",
"title": "Real-time RGB-D camera relocalization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671777/12OmNqEAT3B",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2009/5390/0/05336503",
"title": "Animatronic Shader Lamps Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2009/05336503/12OmNqHItD7",
"parentPublication": {
"id": "proceedings/ismar/2009/5390/0",
"title": "2009 8th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948421",
"title": "Sticky projections — A new approach to interactive shader lamp tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948421/12OmNwkzupV",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2013/5001/0/06655785",
"title": "A Robust Real-Time Face Tracking Using Head Pose Estimation for a Markerless AR System",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2013/06655785/12OmNxvwoYX",
"parentPublication": {
"id": "proceedings/svr/2013/5001/0",
"title": "2013 XV Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a148",
"title": "[POSTER] Rubix: Dynamic Spatial Augmented Reality by Extraction of Plane Regions with a RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a148/12OmNyKJicb",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460968",
"title": "Plane based multi camera calibration under unknown correspondence using ICP-like approach",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460968/12OmNz5JBUr",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750207",
"title": "Dynamic Shader Lamps: Painting on Movable Objects",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750207/12OmNzwpU8A",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794641",
"title": "Projection Distortion-based Object Tracking in Shader Lamp Scenarios",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794641/1cPXBdjp9yo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07138635",
"articleId": "13rRUB7a1fU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07321831",
"articleId": "13rRUyYBlgC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRTm",
"name": "ttg201603-07138633s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201603-07138633s1.zip",
"extension": "zip",
"size": "79 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWRTl",
"name": "ttg201603-07138633s2.txt",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201603-07138633s2.txt",
"extension": "txt",
"size": "36 B",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWRTk",
"name": "ttg201603-07138633s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201603-07138633s1.mp4",
"extension": "mp4",
"size": "82.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzV70s0",
"title": "May",
"year": "2015",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvyC",
"doi": "10.1109/TVCG.2014.2355207",
"abstract": "We present a novel framework for jointly tracking a camera in 3D and reconstructing the 3D model of an observed object. Due to the region based approach, our formulation can handle untextured objects, partial occlusions, motion blur, dynamic backgrounds and imperfect lighting. Our formulation also allows for a very efficient implementation which achieves real-time performance on a mobile phone, by running the pose estimation and the shape optimisation in parallel. We use a level set based pose estimation but completely avoid the, typically required, explicit computation of a global distance. This leads to tracking rates of more than 100 Hz on a desktop PC and 30 Hz on a mobile phone. Further, we incorporate additional orientation information from the phone’s inertial sensor which helps us resolve the tracking ambiguities inherent to region based formulations. The reconstruction step first probabilistically integrates 2D image statistics from selected keyframes into a 3D volume, and then imposes coherency and compactness using a total variational regularisation term. The global optimum of the overall energy function is found using a continuous max-flow algorithm and we show that, similar to tracking, the integration of per voxel posteriors instead of likelihoods improves the precision and accuracy of the reconstruction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel framework for jointly tracking a camera in 3D and reconstructing the 3D model of an observed object. Due to the region based approach, our formulation can handle untextured objects, partial occlusions, motion blur, dynamic backgrounds and imperfect lighting. Our formulation also allows for a very efficient implementation which achieves real-time performance on a mobile phone, by running the pose estimation and the shape optimisation in parallel. We use a level set based pose estimation but completely avoid the, typically required, explicit computation of a global distance. This leads to tracking rates of more than 100 Hz on a desktop PC and 30 Hz on a mobile phone. Further, we incorporate additional orientation information from the phone’s inertial sensor which helps us resolve the tracking ambiguities inherent to region based formulations. The reconstruction step first probabilistically integrates 2D image statistics from selected keyframes into a 3D volume, and then imposes coherency and compactness using a total variational regularisation term. The global optimum of the overall energy function is found using a continuous max-flow algorithm and we show that, similar to tracking, the integration of per voxel posteriors instead of likelihoods improves the precision and accuracy of the reconstruction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel framework for jointly tracking a camera in 3D and reconstructing the 3D model of an observed object. Due to the region based approach, our formulation can handle untextured objects, partial occlusions, motion blur, dynamic backgrounds and imperfect lighting. Our formulation also allows for a very efficient implementation which achieves real-time performance on a mobile phone, by running the pose estimation and the shape optimisation in parallel. We use a level set based pose estimation but completely avoid the, typically required, explicit computation of a global distance. This leads to tracking rates of more than 100 Hz on a desktop PC and 30 Hz on a mobile phone. Further, we incorporate additional orientation information from the phone’s inertial sensor which helps us resolve the tracking ambiguities inherent to region based formulations. The reconstruction step first probabilistically integrates 2D image statistics from selected keyframes into a 3D volume, and then imposes coherency and compactness using a total variational regularisation term. The global optimum of the overall energy function is found using a continuous max-flow algorithm and we show that, similar to tracking, the integration of per voxel posteriors instead of likelihoods improves the precision and accuracy of the reconstruction.",
"title": "Real-Time 3D Tracking and Reconstruction on Mobile Phones",
"normalizedTitle": "Real-Time 3D Tracking and Reconstruction on Mobile Phones",
"fno": "06892950",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Three Dimensional Displays",
"Shape",
"Image Reconstruction",
"Cameras",
"Optimization",
"Mobile Handsets",
"Rendering Computer Graphics",
"Mobile Phone",
"3 D Tracking",
"3 D Reconstruction",
"Augmented Reality"
],
"authors": [
{
"givenName": "Victor Adrian",
"surname": "Prisacariu",
"fullName": "Victor Adrian Prisacariu",
"affiliation": "Department of Engineering Science, University of Oxford, Oxford OX1 3PJ, Oxfordshire, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Olaf",
"surname": "Kahler",
"fullName": "Olaf Kahler",
"affiliation": "Department of Engineering Science, University of Oxford, Oxford OX1 3PJ, Oxfordshire, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David W.",
"surname": "Murray",
"fullName": "David W. Murray",
"affiliation": "Department of Engineering Science, University of Oxford, Oxford OX1 3PJ, Oxfordshire, United Kingdom",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ian D.",
"surname": "Reid",
"fullName": "Ian D. Reid",
"affiliation": "Department of Computer Science, University of Adelaide, Adelaide, South Australia, Australia",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2015-05-01 00:00:00",
"pubType": "trans",
"pages": "557-570",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2011/2183/0/06162909",
"title": "Interactive annotation on mobile phones for real and virtual space registration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06162909/12OmNAIdBPt",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbs/2010/4005/0/4005a403",
"title": "Visual Tracking Based on 3D Probabilistic Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ecbs/2010/4005a403/12OmNCbCrT7",
"parentPublication": {
"id": "proceedings/ecbs/2010/4005/0",
"title": "Engineering of Computer-Based Systems, IEEE International Conference on the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890240",
"title": "Efficient pose tracking on mobile phones with 3D points grouping",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890240/12OmNs59JGX",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671768",
"title": "Simultaneous 3D tracking and reconstruction on a mobile phone",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671768/12OmNxdVgZ2",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2012/4711/0/4711a711",
"title": "On-line Object Reconstruction and Tracking for 3D Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2012/4711a711/12OmNyGbIiN",
"parentPublication": {
"id": "proceedings/icme/2012/4711/0",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165662",
"title": "MobileFusion: Real-Time Volumetric Surface Reconstruction and Dense Tracking on Mobile Phones",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165662/13rRUwInv4q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200k0881",
"title": "Common Objects in 3D: Large-Scale Learning and Evaluation of Real-life 3D Category Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200k0881/1BmKIhRemJ2",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b259",
"title": "Through the Looking Glass: Neural 3D Reconstruction of Transparent Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b259/1m3nkX25xyE",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09201064",
"title": "Mobile3DRecon: Real-time Monocular 3D Reconstruction on a Mobile Phone",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09201064/1niUpdweh2g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/08/09314030",
"title": "3D Curve Creation on and Around Physical Objects With Mobile AR",
"doi": null,
"abstractUrl": "/journal/tg/2022/08/09314030/1q8Ufya8xj2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07067526",
"articleId": "13rRUwwaKta",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06912003",
"articleId": "13rRUwInvsU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1AlHzMLlSUg",
"title": null,
"year": "2022",
"issueNum": "01",
"idPrefix": "qe",
"pubType": "journal",
"volume": "3",
"label": null,
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1IAFLDGVVVm",
"doi": "10.1109/TQE.2022.3224686",
"abstract": "The level of quantum noise in measurements is bounded from below by the Heisenberg uncertainty principle, but it can be unequally distributed between two noncommuting observables: it can be “squeezed.” Since 2019, all gravitational-wave observatories have been using squeezed light for increasing the astronomical reach. Squeezed laser light is efficiently produced by degenerate parametric down-conversion in a nonlinear crystal located inside an optical resonator. A spontaneously generated initial pair of indistinguishable photons is amplified to a squeezed vacuum state. Overlapped with bright coherent light, the photo-electric measurement shows a sub-Poissonian photon statistics. Squeezed states have ample applications in nonlocal quantum sensing, device-independent quantum key distribution, and quantum computing. Here, we present our continuous-wave 1550-nm “squeeze laser” with a footprint of 80 × 80 cm. The well-defined output beam has an interference contrast of <inline-formula><tex-math notation=\"LaTeX\">Z_$\\gtrsim 99\\%$_Z</tex-math></inline-formula> with an overlapped 10-mW beam being in an almost perfect TEM00 mode. The interference result shows 13-dB squeezing of the photon shot noise in balanced detection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The level of quantum noise in measurements is bounded from below by the Heisenberg uncertainty principle, but it can be unequally distributed between two noncommuting observables: it can be “squeezed.” Since 2019, all gravitational-wave observatories have been using squeezed light for increasing the astronomical reach. Squeezed laser light is efficiently produced by degenerate parametric down-conversion in a nonlinear crystal located inside an optical resonator. A spontaneously generated initial pair of indistinguishable photons is amplified to a squeezed vacuum state. Overlapped with bright coherent light, the photo-electric measurement shows a sub-Poissonian photon statistics. Squeezed states have ample applications in nonlocal quantum sensing, device-independent quantum key distribution, and quantum computing. Here, we present our continuous-wave 1550-nm “squeeze laser” with a footprint of 80 × 80 cm. The well-defined output beam has an interference contrast of <inline-formula><tex-math notation=\"LaTeX\">$\\gtrsim 99\\%$</tex-math></inline-formula> with an overlapped 10-mW beam being in an almost perfect TEM00 mode. The interference result shows 13-dB squeezing of the photon shot noise in balanced detection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The level of quantum noise in measurements is bounded from below by the Heisenberg uncertainty principle, but it can be unequally distributed between two noncommuting observables: it can be “squeezed.” Since 2019, all gravitational-wave observatories have been using squeezed light for increasing the astronomical reach. Squeezed laser light is efficiently produced by degenerate parametric down-conversion in a nonlinear crystal located inside an optical resonator. A spontaneously generated initial pair of indistinguishable photons is amplified to a squeezed vacuum state. Overlapped with bright coherent light, the photo-electric measurement shows a sub-Poissonian photon statistics. Squeezed states have ample applications in nonlocal quantum sensing, device-independent quantum key distribution, and quantum computing. Here, we present our continuous-wave 1550-nm “squeeze laser” with a footprint of 80 × 80 cm. The well-defined output beam has an interference contrast of - with an overlapped 10-mW beam being in an almost perfect TEM00 mode. The interference result shows 13-dB squeezing of the photon shot noise in balanced detection.",
"title": "The “Squeeze Laser”",
"normalizedTitle": "The “Squeeze Laser”",
"fno": "09964063",
"hasPdf": true,
"idPrefix": "qe",
"keywords": [
"Indeterminancy",
"Laser Beams",
"Laser Cavity Resonators",
"Laser Modes",
"Light Coherence",
"Optical Resonators",
"Optical Squeezing",
"Quantum Cryptography",
"Quantum Noise",
"Shot Noise",
"13 D B Squeezing",
"Astronomical Reach",
"Bright Coherent Light",
"Continuous Wave 1550 Nm Squeeze Laser",
"Device Independent Quantum Key Distribution",
"Gravitational Wave Observatories",
"Heisenberg Uncertainty Principle",
"Indistinguishable Photons",
"Initial Pair",
"Noncommuting Observables",
"Nonlinear Crystal",
"Nonlocal Quantum Sensing",
"Optical Resonator",
"Photo Electric Measurement",
"Photon Shot Noise",
"Quantum Computing",
"Quantum Noise",
"Squeezed Laser Light",
"Squeezed Light",
"Squeezed States",
"Squeezed Vacuum State",
"Sub Poissonian Photon Statistics",
"Wavelength 80 0 Cm",
"Photonics",
"Uncertainty",
"Pump Lasers",
"Measurement By Laser Beam",
"Laser Theory",
"Laser Modes",
"Optical Sensors",
"Quantum Computing",
"Quantum Sensing",
"Squeezed States",
"Nonlocality"
],
"authors": [
{
"givenName": "Roman",
"surname": "Schnabel",
"fullName": "Roman Schnabel",
"affiliation": "Institut für Laserphysik & Zentrum für Optische Quantentechnologien, Universität Hamburg, Hamburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Axel",
"surname": "Schönbeck",
"fullName": "Axel Schönbeck",
"affiliation": "Institut für Laserphysik & Zentrum für Optische Quantentechnologien, Universität Hamburg, Hamburg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2022-01-01 00:00:00",
"pubType": "trans",
"pages": "1-9",
"year": "2022",
"issn": "2689-1808",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/eqec/2005/8973/0/01567428",
"title": "Atomic self-trapping and cooling in a single-atom laser",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567428/12OmNqIQS4Y",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2014/6887/0/06935435",
"title": "VAL: Visually Augmented Laser cutting to enhance and support creativity",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2014/06935435/12OmNrJAdXk",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2014/6887/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itc/2014/4722/0/07035329",
"title": "IC laser trimming speed-up through wafer-level spatial correlation modeling",
"doi": null,
"abstractUrl": "/proceedings-article/itc/2014/07035329/12OmNwAt1Fq",
"parentPublication": {
"id": "proceedings/itc/2014/4722/0",
"title": "2014 IEEE International Test Conference (ITC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2018/3652/0/08390392",
"title": "Effects of the atmospheric turbulence on the single photon transmission in quantum channel",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2018/08390392/12OmNwpoFJF",
"parentPublication": {
"id": "proceedings/icnc/2018/3652/0",
"title": "2018 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3pgcic/2014/4171/0/4171a324",
"title": "Photon Pulses based Heterodyne Detection Precision Ranging System",
"doi": null,
"abstractUrl": "/proceedings-article/3pgcic/2014/4171a324/12OmNyQ7FKZ",
"parentPublication": {
"id": "proceedings/3pgcic/2014/4171/0",
"title": "2014 Ninth International Conference on P2P, Parallel, Grid, Cloud and Internet Computing (3PGCIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567525",
"title": "Experimental evidence for correlated polaritonic emission from a semiconductor microcavity",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567525/12OmNyuPL2E",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567450",
"title": "Photon number squeezing using an all-fibre asymmetric Sagnac loop",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567450/12OmNyuyae5",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2016/8838/0/07945964",
"title": "The Performance Analysis of Quantum Illumination Radar",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2016/07945964/12OmNzkMlPR",
"parentPublication": {
"id": "proceedings/icnisc/2016/8838/0",
"title": "2016 International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/qe/2023/01/09964108",
"title": "Fundamentals of Quantum Fourier Optics",
"doi": null,
"abstractUrl": "/journal/qe/2023/01/09964108/1IAFM4ExGVi",
"parentPublication": {
"id": "trans/qe",
"title": "IEEE Transactions on Quantum Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h908",
"title": "Asynchronous Single-Photon 3D Imaging",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h908/1hVlaL5wDoA",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09661380",
"articleId": "1B4fgiYmT3q",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09650707",
"articleId": "1B4gjPUonLy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzZ5oah",
"title": "March/April",
"year": "2004",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "10",
"label": "March/April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwfI0PW",
"doi": "10.1109/TVCG.2004.1260769",
"abstract": "Abstract—Large-area displays made up of several projectors show significant variation in color. In this paper, we identify different projector parameters that cause the color variation and study their effects on the luminance and chrominance characteristics of the display. This work leads to the realization that luminance varies significantly within and across projectors, while chrominance variation is relatively small, especially across projectors of same model. To address this situation, we present a method to achieve luminance matching across all pixels of a multiprojector display that results in photometrically uniform displays. We use a camera as a measurement device for this purpose. Our method comprises a one-time calibration step that generates a per channel per projector luminance attenuation map (LAM), which is then used to correct any image projected on the display at interactive rates on commodity graphics hardware. To the best of our knowledge, this is the first effort to match luminance across all the pixels of a multiprojector display.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Large-area displays made up of several projectors show significant variation in color. In this paper, we identify different projector parameters that cause the color variation and study their effects on the luminance and chrominance characteristics of the display. This work leads to the realization that luminance varies significantly within and across projectors, while chrominance variation is relatively small, especially across projectors of same model. To address this situation, we present a method to achieve luminance matching across all pixels of a multiprojector display that results in photometrically uniform displays. We use a camera as a measurement device for this purpose. Our method comprises a one-time calibration step that generates a per channel per projector luminance attenuation map (LAM), which is then used to correct any image projected on the display at interactive rates on commodity graphics hardware. To the best of our knowledge, this is the first effort to match luminance across all the pixels of a multiprojector display.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Large-area displays made up of several projectors show significant variation in color. In this paper, we identify different projector parameters that cause the color variation and study their effects on the luminance and chrominance characteristics of the display. This work leads to the realization that luminance varies significantly within and across projectors, while chrominance variation is relatively small, especially across projectors of same model. To address this situation, we present a method to achieve luminance matching across all pixels of a multiprojector display that results in photometrically uniform displays. We use a camera as a measurement device for this purpose. Our method comprises a one-time calibration step that generates a per channel per projector luminance attenuation map (LAM), which is then used to correct any image projected on the display at interactive rates on commodity graphics hardware. To the best of our knowledge, this is the first effort to match luminance across all the pixels of a multiprojector display.",
"title": "Color Nonuniformity in Projection-Based Displays: Analysis and Solutions",
"normalizedTitle": "Color Nonuniformity in Projection-Based Displays: Analysis and Solutions",
"fno": "v0177",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Projection Based Displays",
"Tiled Displays",
"Color Calibration"
],
"authors": [
{
"givenName": "Aditi",
"surname": "Majumder",
"fullName": "Aditi Majumder",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rick",
"surname": "Stevens",
"fullName": "Rick Stevens",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2004-03-01 00:00:00",
"pubType": "trans",
"pages": "177-188",
"year": "2004",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0164",
"articleId": "13rRUwh80H2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0189",
"articleId": "13rRUygT7mK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASupD",
"doi": "10.1109/TVCG.2017.2734478",
"abstract": "This paper presents a novel projected pixel localization principle for online geometric registration in dynamic projection mapping applications. We propose applying a time measurement of a laser projector raster-scanning beam using a photosensor to estimate its position while the projector displays meaningful visual information to human observers. Based on this principle, we develop two types of position estimation techniques. One estimates the position of a projected beam when it directly illuminates a photosensor. The other localizes a beam by measuring the reflection from a retro-reflective marker with the photosensor placed in the optical path of the projector. We conduct system evaluations using prototypes to validate this method as well as to confirm the applicability of our principle. In addition, we discuss the technical limitations of the prototypes based on the evaluation results. Finally, we build several dynamic projection mapping applications to demonstrate the feasibility of our principle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a novel projected pixel localization principle for online geometric registration in dynamic projection mapping applications. We propose applying a time measurement of a laser projector raster-scanning beam using a photosensor to estimate its position while the projector displays meaningful visual information to human observers. Based on this principle, we develop two types of position estimation techniques. One estimates the position of a projected beam when it directly illuminates a photosensor. The other localizes a beam by measuring the reflection from a retro-reflective marker with the photosensor placed in the optical path of the projector. We conduct system evaluations using prototypes to validate this method as well as to confirm the applicability of our principle. In addition, we discuss the technical limitations of the prototypes based on the evaluation results. Finally, we build several dynamic projection mapping applications to demonstrate the feasibility of our principle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a novel projected pixel localization principle for online geometric registration in dynamic projection mapping applications. We propose applying a time measurement of a laser projector raster-scanning beam using a photosensor to estimate its position while the projector displays meaningful visual information to human observers. Based on this principle, we develop two types of position estimation techniques. One estimates the position of a projected beam when it directly illuminates a photosensor. The other localizes a beam by measuring the reflection from a retro-reflective marker with the photosensor placed in the optical path of the projector. We conduct system evaluations using prototypes to validate this method as well as to confirm the applicability of our principle. In addition, we discuss the technical limitations of the prototypes based on the evaluation results. Finally, we build several dynamic projection mapping applications to demonstrate the feasibility of our principle.",
"title": "Simultaneous Projection and Positioning of Laser Projector Pixels",
"normalizedTitle": "Simultaneous Projection and Positioning of Laser Projector Pixels",
"fno": "08007248",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Measurement By Laser Beam",
"Laser Beams",
"Position Measurement",
"Surface Texture",
"Cameras",
"Calibration",
"Lasers",
"Dynamic Projection Mapping",
"Spatial Augmented Reality",
"Laser Projector",
"Light Pen",
"Geometric Registration"
],
"authors": [
{
"givenName": "Yuki",
"surname": "Kitajima",
"fullName": "Yuki Kitajima",
"affiliation": "Graduate School of Engineering ScienceOsaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daisuke",
"surname": "Iwai",
"fullName": "Daisuke Iwai",
"affiliation": "Graduate School of Engineering ScienceOsaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kosuke",
"surname": "Sato",
"fullName": "Kosuke Sato",
"affiliation": "Graduate School of Engineering ScienceOsaka University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2419-2429",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2014/5209/0/5209c095",
"title": "3D Acquisition of Occluded Surfaces from Scattering in Participating Media",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c095/12OmNAlvHRN",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imccc/2016/1195/0/07774873",
"title": "A New Positioning Method for Indoor Laser Navigation on Under-Determined Condition",
"doi": null,
"abstractUrl": "/proceedings-article/imccc/2016/07774873/12OmNwoxSaK",
"parentPublication": {
"id": "proceedings/imccc/2016/1195/0",
"title": "2016 Sixth International Conference on Instrumentation & Measurement, Computer, Communication and Control (IMCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119393",
"title": "Recent Development of Using Optical Methods to Measure the Mechanical Properties of Thin Films",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119393/12OmNx9nGGj",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ectc/2017/6315/0/07999928",
"title": "Multi Beam Full Cut Dicing of Thin Si IC Wafers",
"doi": null,
"abstractUrl": "/proceedings-article/ectc/2017/07999928/12OmNxFsmCC",
"parentPublication": {
"id": "proceedings/ectc/2017/6315/0",
"title": "2017 IEEE 67th Electronic Components and Technology Conference (ECTC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a449",
"title": "Projection Center Calibration for a Co-located Projector Camera System",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119447",
"title": "Acoustic Research and Control of Piezoelectric Speakers Using a Spatially Modulated TiOPc/Piezo Buzzer Actuator",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119447/12OmNzCF4UY",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/qe/2022/01/09964063",
"title": "The “Squeeze Laser”",
"doi": null,
"abstractUrl": "/journal/qe/2022/01/09964063/1IAFLDGVVVm",
"parentPublication": {
"id": "trans/qe",
"title": "IEEE Transactions on Quantum Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2019/4689/0/468900a470",
"title": "Design of a Stroboscopic Laser Grating Stripe Projection Device",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2019/468900a470/1h0FgohMNG0",
"parentPublication": {
"id": "proceedings/icmcce/2019/4689/0",
"title": "2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2021/3892/0/389200a628",
"title": "Study on spherical aberration in the laser optical system",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2021/389200a628/1t2n9aXMNPO",
"parentPublication": {
"id": "proceedings/icmtma/2021/3892/0",
"title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsmt/2020/8668/0/866800a009",
"title": "SLAM Global Positioning Algorithm Based on Laser and Vision Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/iccsmt/2020/866800a009/1u8pDO7YPzG",
"parentPublication": {
"id": "proceedings/iccsmt/2020/8668/0",
"title": "2020 International Conference on Computer Science and Management Technology (ICCSMT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007334",
"articleId": "13rRUxYIMV7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007213",
"articleId": "13rRUxcsYLX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgAl",
"name": "ttg201711-08007248s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007248s1.zip",
"extension": "zip",
"size": "162 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwudQUo",
"title": "March",
"year": "2017",
"issueNum": "03",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdIOUR",
"doi": "10.1109/TVCG.2016.2592910",
"abstract": "Dynamic projection mapping for moving objects has attracted much attention in recent years. However, conventional approaches have faced some issues, such as the target objects being limited to rigid objects, and the limited moving speed of the targets. In this paper, we focus on dynamic projection mapping onto rapidly deforming non-rigid surfaces with a speed sufficiently high that a human does not perceive any misalignment between the target object and the projected images. In order to achieve such projection mapping, we need a high-speed technique for tracking non-rigid surfaces, which is still a challenging problem in the field of computer vision. We propose the Deformable Dot Cluster Marker (DDCM), a novel fiducial marker for high-speed tracking of non-rigid surfaces using a high-frame-rate camera. The DDCM has three performance advantages. First, it can be detected even when it is strongly deformed. Second, it realizes robust tracking even in the presence of external and self occlusions. Third, it allows millisecond-order computational speed. Using DDCM and a high-speed projector, we realized dynamic projection mapping onto a deformed sheet of paper and a T-shirt with a speed sufficiently high that the projected images appeared to be printed on the objects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Dynamic projection mapping for moving objects has attracted much attention in recent years. However, conventional approaches have faced some issues, such as the target objects being limited to rigid objects, and the limited moving speed of the targets. In this paper, we focus on dynamic projection mapping onto rapidly deforming non-rigid surfaces with a speed sufficiently high that a human does not perceive any misalignment between the target object and the projected images. In order to achieve such projection mapping, we need a high-speed technique for tracking non-rigid surfaces, which is still a challenging problem in the field of computer vision. We propose the Deformable Dot Cluster Marker (DDCM), a novel fiducial marker for high-speed tracking of non-rigid surfaces using a high-frame-rate camera. The DDCM has three performance advantages. First, it can be detected even when it is strongly deformed. Second, it realizes robust tracking even in the presence of external and self occlusions. Third, it allows millisecond-order computational speed. Using DDCM and a high-speed projector, we realized dynamic projection mapping onto a deformed sheet of paper and a T-shirt with a speed sufficiently high that the projected images appeared to be printed on the objects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Dynamic projection mapping for moving objects has attracted much attention in recent years. However, conventional approaches have faced some issues, such as the target objects being limited to rigid objects, and the limited moving speed of the targets. In this paper, we focus on dynamic projection mapping onto rapidly deforming non-rigid surfaces with a speed sufficiently high that a human does not perceive any misalignment between the target object and the projected images. In order to achieve such projection mapping, we need a high-speed technique for tracking non-rigid surfaces, which is still a challenging problem in the field of computer vision. We propose the Deformable Dot Cluster Marker (DDCM), a novel fiducial marker for high-speed tracking of non-rigid surfaces using a high-frame-rate camera. The DDCM has three performance advantages. First, it can be detected even when it is strongly deformed. Second, it realizes robust tracking even in the presence of external and self occlusions. Third, it allows millisecond-order computational speed. Using DDCM and a high-speed projector, we realized dynamic projection mapping onto a deformed sheet of paper and a T-shirt with a speed sufficiently high that the projected images appeared to be printed on the objects.",
"title": "Dynamic Projection Mapping onto Deforming Non-Rigid Surface Using Deformable Dot Cluster Marker",
"normalizedTitle": "Dynamic Projection Mapping onto Deforming Non-Rigid Surface Using Deformable Dot Cluster Marker",
"fno": "07516689",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Target Tracking",
"Robustness",
"Shape",
"Mirrors",
"Cameras",
"Visualization",
"High Speed Vision",
"Non Rigid Surface Tracking",
"Fiducial Marker",
"Projection Mapping",
"Spatial Augmented Reality"
],
"authors": [
{
"givenName": "Gaku",
"surname": "Narita",
"fullName": "Gaku Narita",
"affiliation": "Graduate School of Information Science and Technology, University of Tokyo, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yoshihiro",
"surname": "Watanabe",
"fullName": "Yoshihiro Watanabe",
"affiliation": "Graduate School of Information Science and Technology, University of Tokyo, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Masatoshi",
"surname": "Ishikawa",
"fullName": "Masatoshi Ishikawa",
"affiliation": "Graduate School of Information Science and Technology, University of Tokyo, Tokyo, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2017-03-01 00:00:00",
"pubType": "trans",
"pages": "1235-1248",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223330",
"title": "Robust high-speed tracking against illumination changes for dynamic projection mapping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223330/12OmNCdk2JE",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a052",
"title": "Extended Dot Cluster Marker for High-speed 3D Tracking in Dynamic Projection Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a052/12OmNvoWV1k",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/intetain/2015/0061/0/07325479",
"title": "Digital facial augmentation for interactive entertainment",
"doi": null,
"abstractUrl": "/proceedings-article/intetain/2015/07325479/12OmNy5R3ES",
"parentPublication": {
"id": "proceedings/intetain/2015/0061/0",
"title": "2015 7th International Conference on Intelligent Technologies for Interactive Entertainment (INTETAIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460415",
"title": "Coupled 3D tracking and pose optimization of rigid objects using particle filter",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460415/12OmNzYNNaZ",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007312",
"title": "FaceForge: Markerless Non-Rigid Face Multi-Projection Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007312/13rRUwInvyG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404540",
"title": "Geometrically-Correct Projection-Based Texture Mapping onto a Deformable Object",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404540/13rRUxcsYLO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08821571",
"title": "Animated Stickies: Fast Video Projection Mapping onto a Markerless Plane through a Direct Closed-Loop Alignment",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08821571/1d6xCnoQsU0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102813",
"title": "Projection Mapping System To A Widely Dynamic Sphere With Circumferential Markers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102813/1kwqWza3GI8",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09536434",
"title": "Dynamic Projection Mapping for Robust Sphere Posture Tracking Using Uniform/Biased Circumferential Markers",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09536434/1wREa2FncUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nicoint/2021/3954/0/395400a001",
"title": "Interactive Dynamic Projection Mapping onto Thin Plants with Bioluminescent Effect Animations",
"doi": null,
"abstractUrl": "/proceedings-article/nicoint/2021/395400a001/1wnPrwHNFwQ",
"parentPublication": {
"id": "proceedings/nicoint/2021/3954/0",
"title": "2021 Nicograph International (NicoInt)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07792698",
"articleId": "13rRUx0geq0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07784854",
"articleId": "13rRUwjGoG8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyQphh4",
"title": "Aug.",
"year": "2018",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbUO",
"doi": "10.1109/TVCG.2017.2728660",
"abstract": "Aiming at automatic, convenient and non-instrusive motion capture, this paper presents a new generation markerless motion capture technique, the FlyCap system, to capture surface motions of moving characters using multiple autonomous flying cameras (autonomous unmanned aerial vehicles(UAVs) each integrated with an RGBD video camera). During data capture, three cooperative flying cameras automatically track and follow the moving target who performs large-scale motions in a wide space. We propose a novel non-rigid surface registration method to track and fuse the depth of the three flying cameras for surface motion tracking of the moving target, and simultaneously calculate the pose of each flying camera. We leverage the using of visual-odometry information provided by the UAV platform, and formulate the surface tracking problem in a non-linear objective function that can be linearized and effectively minimized through a Gaussian-Newton method. Quantitative and qualitative experimental results demonstrate the plausible surface and motion reconstruction results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Aiming at automatic, convenient and non-instrusive motion capture, this paper presents a new generation markerless motion capture technique, the FlyCap system, to capture surface motions of moving characters using multiple autonomous flying cameras (autonomous unmanned aerial vehicles(UAVs) each integrated with an RGBD video camera). During data capture, three cooperative flying cameras automatically track and follow the moving target who performs large-scale motions in a wide space. We propose a novel non-rigid surface registration method to track and fuse the depth of the three flying cameras for surface motion tracking of the moving target, and simultaneously calculate the pose of each flying camera. We leverage the using of visual-odometry information provided by the UAV platform, and formulate the surface tracking problem in a non-linear objective function that can be linearized and effectively minimized through a Gaussian-Newton method. Quantitative and qualitative experimental results demonstrate the plausible surface and motion reconstruction results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Aiming at automatic, convenient and non-instrusive motion capture, this paper presents a new generation markerless motion capture technique, the FlyCap system, to capture surface motions of moving characters using multiple autonomous flying cameras (autonomous unmanned aerial vehicles(UAVs) each integrated with an RGBD video camera). During data capture, three cooperative flying cameras automatically track and follow the moving target who performs large-scale motions in a wide space. We propose a novel non-rigid surface registration method to track and fuse the depth of the three flying cameras for surface motion tracking of the moving target, and simultaneously calculate the pose of each flying camera. We leverage the using of visual-odometry information provided by the UAV platform, and formulate the surface tracking problem in a non-linear objective function that can be linearized and effectively minimized through a Gaussian-Newton method. Quantitative and qualitative experimental results demonstrate the plausible surface and motion reconstruction results.",
"title": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras",
"normalizedTitle": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras",
"fno": "07983006",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Target Tracking",
"Surface Reconstruction",
"Geometry",
"Image Reconstruction",
"Unmanned Aerial Vehicles",
"Markerless Motion Capture",
"Flying Camera",
"Non Rigid Surface Reconstruction"
],
"authors": [
{
"givenName": "Lan",
"surname": "Xu",
"fullName": "Lan Xu",
"affiliation": "Tsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yebin",
"surname": "Liu",
"fullName": "Yebin Liu",
"affiliation": "Department of AutomationTsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Cheng",
"fullName": "Wei Cheng",
"affiliation": "Tsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kaiwen",
"surname": "Guo",
"fullName": "Kaiwen Guo",
"affiliation": "Department of AutomationTsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guyue",
"surname": "Zhou",
"fullName": "Guyue Zhou",
"affiliation": "DJI Technology, Shenzhen",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Qionghai",
"surname": "Dai",
"fullName": "Qionghai Dai",
"affiliation": "Department of AutomationTsinghua University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lu",
"surname": "Fang",
"fullName": "Lu Fang",
"affiliation": "Tsinghua University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2018-08-01 00:00:00",
"pubType": "trans",
"pages": "2284-2297",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2017/2939/0/08078557",
"title": "Flying object detection for automatic UAV recognition",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2017/08078557/12OmNBVrjpF",
"parentPublication": {
"id": "proceedings/avss/2017/2939/0",
"title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2009/3992/0/05206859",
"title": "Markerless Motion Capture with unsynchronized moving cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2009/05206859/12OmNqI04JK",
"parentPublication": {
"id": "proceedings/cvpr/2009/3992/0",
"title": "2009 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995424",
"title": "Markerless motion capture of interacting characters using multi-view image segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995424/12OmNrHB1Qj",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284846",
"title": "Model-Based Markerless Human Body Motion Capture using Multiple Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284846/12OmNvmXJ37",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cviie/2005/2524/0/25240015",
"title": "Markerless Motion Capture using Multiple Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cviie/2005/25240015/12OmNxymo7V",
"parentPublication": {
"id": "proceedings/cviie/2005/2524/0",
"title": "Computer Vision for Interactive and Intelligent Environment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130445",
"title": "Motion capture from dynamic orthographic cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130445/12OmNyQ7FSA",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/05/07896626",
"title": "Outdoor Markerless Motion Capture with Sparse Handheld Video Cameras",
"doi": null,
"abstractUrl": "/journal/tg/2018/05/07896626/13rRUB7a1g0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g219",
"title": "DeepMultiCap: Performance Capture of Multiple Characters Using Sparse Multiview Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g219/1BmEybxUSnC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a308",
"title": "Motion Capture from Pan-Tilt Cameras with Unknown Orientation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a308/1ezRBTghOZq",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300a823",
"title": "Markerless Outdoor Human Motion Capture Using Multiple Autonomous Micro Aerial Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300a823/1hQqk33280w",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07999244",
"articleId": "13rRUNvgz9Z",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007307",
"articleId": "13rRUyeTVi9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvAiSp1",
"title": "Nov.",
"year": "2018",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "14M3DYlzziw",
"doi": "10.1109/TVCG.2018.2868530",
"abstract": "The quality of every dynamic multi-projection mapping system is limited by the quality of the projector to tracking device calibration. Common problems with poor calibration result in noticeable artifacts for the user, such as ghosting and seams. In this work we introduce a new, fully automated calibration algorithm that is tailored to reduce these artifacts, based on consumer-grade hardware. We achieve this goal by repurposing a structured-light scanning setup. A structured-light scanner can generate 3D geometry based on a known intrinsic and extrinsic calibration of its components (projector and RGB camera). We revert this process by providing the resulting 3D model to determine the intrinsic and extrinsic parameters of our setup (including those of a variety of tracking systems). Our system matches features and solves for all parameters in a single pass while respecting the lower quality of our sensory input.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The quality of every dynamic multi-projection mapping system is limited by the quality of the projector to tracking device calibration. Common problems with poor calibration result in noticeable artifacts for the user, such as ghosting and seams. In this work we introduce a new, fully automated calibration algorithm that is tailored to reduce these artifacts, based on consumer-grade hardware. We achieve this goal by repurposing a structured-light scanning setup. A structured-light scanner can generate 3D geometry based on a known intrinsic and extrinsic calibration of its components (projector and RGB camera). We revert this process by providing the resulting 3D model to determine the intrinsic and extrinsic parameters of our setup (including those of a variety of tracking systems). Our system matches features and solves for all parameters in a single pass while respecting the lower quality of our sensory input.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The quality of every dynamic multi-projection mapping system is limited by the quality of the projector to tracking device calibration. Common problems with poor calibration result in noticeable artifacts for the user, such as ghosting and seams. In this work we introduce a new, fully automated calibration algorithm that is tailored to reduce these artifacts, based on consumer-grade hardware. We achieve this goal by repurposing a structured-light scanning setup. A structured-light scanner can generate 3D geometry based on a known intrinsic and extrinsic calibration of its components (projector and RGB camera). We revert this process by providing the resulting 3D model to determine the intrinsic and extrinsic parameters of our setup (including those of a variety of tracking systems). Our system matches features and solves for all parameters in a single pass while respecting the lower quality of our sensory input.",
"title": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces",
"normalizedTitle": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces",
"fno": "08466021",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Calibration",
"Optical Projectors",
"Optical Scanners",
"Tracking Systems",
"Dynamic Multiprojection Mapping System",
"Consumer Grade Hardware",
"Structured Light Scanning Setup",
"Automated Calibration Algorithm",
"3 D Geometry Generation Model",
"RGB Camera",
"Cameras",
"Calibration",
"Target Tracking",
"Three Dimensional Displays",
"Distortion",
"Heuristic Algorithms",
"Geometry",
"Calibration",
"SAR",
"Multi Projection Mapping",
"Mixed Reality"
],
"authors": [
{
"givenName": "Philipp",
"surname": "Kurth",
"fullName": "Philipp Kurth",
"affiliation": "Computer Graphics GroupFriedrich-Alexander-University Erlangen-Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Vanessa",
"surname": "Lange",
"fullName": "Vanessa Lange",
"affiliation": "Computer Graphics GroupFriedrich-Alexander-University Erlangen-Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Siegl",
"fullName": "Christian Siegl",
"affiliation": "Computer Graphics GroupFriedrich-Alexander-University Erlangen-Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Stamminger",
"fullName": "Marc Stamminger",
"affiliation": "Computer Graphics GroupFriedrich-Alexander-University Erlangen-Nuremberg",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Bauer",
"fullName": "Frank Bauer",
"affiliation": "Computer Graphics GroupFriedrich-Alexander-University Erlangen-Nuremberg",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2018-11-01 00:00:00",
"pubType": "trans",
"pages": "2886-2894",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223400",
"title": "Semi-automatic calibration of a projector-camera system using arbitrary objects with known geometry",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223400/12OmNBJw9RK",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761601",
"title": "Calibration of projector-camera systems from virtual mutual projection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981781",
"title": "Simultaneous self-calibration of a projector and a camera using structured light",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981781/12OmNBzRNuv",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2017/2943/0/2943a042",
"title": "Robust Geometric Self-Calibration of Generic Multi-Projector Camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2017/2943a042/12OmNCbCrRh",
"parentPublication": {
"id": "proceedings/ismar/2017/2943/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2016/3284/0/08010586",
"title": "Auto-calibration of multi-projector systems on arbitrary shapes",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2016/08010586/12OmNrJiCPx",
"parentPublication": {
"id": "proceedings/aipr/2016/3284/0",
"title": "2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444797",
"title": "Auto-calibration of cylindrical multi-projector systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444797/12OmNviHKkd",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460078",
"title": "Calibration-free projector-camera system for spatial augmented reality on planar surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460078/12OmNzUxO4G",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130369",
"title": "More accurate pinhole camera calibration with imperfect planar target",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130369/12OmNzd7bLF",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07164353",
"title": "On-Site Semi-Automatic Calibration and Registration of a Projector-Camera System Using Arbitrary Objects with Known Geometry",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07164353/13rRUEgs2M6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08466859",
"articleId": "14M3E5b55mM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08492363",
"articleId": "14M3E1hwrFS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1Glc3bQnGuI",
"name": "ttg201811-08466021s1-supp1-2868530.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201811-08466021s1-supp1-2868530.mp4",
"extension": "mp4",
"size": "48.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "1HGJ6XQen96",
"title": "Nov.",
"year": "2022",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1GjwJ0X1ks0",
"doi": "10.1109/TVCG.2022.3203085",
"abstract": "Projection mapping with inexpensive hardware often suffers from calibration errors that lead to visually compromised results. In this paper, we classify common errors that lead to typical visual artifacts. Based on this classification, we present the first content-aware brightness solver. It is tailored for high GPU performance, yet efficiently hides the most common calibration artifacts. Moreover, it is specifically designed to handle both single and larger networked projection mapping setups with minimal latency.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Projection mapping with inexpensive hardware often suffers from calibration errors that lead to visually compromised results. In this paper, we classify common errors that lead to typical visual artifacts. Based on this classification, we present the first content-aware brightness solver. It is tailored for high GPU performance, yet efficiently hides the most common calibration artifacts. Moreover, it is specifically designed to handle both single and larger networked projection mapping setups with minimal latency.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Projection mapping with inexpensive hardware often suffers from calibration errors that lead to visually compromised results. In this paper, we classify common errors that lead to typical visual artifacts. Based on this classification, we present the first content-aware brightness solver. It is tailored for high GPU performance, yet efficiently hides the most common calibration artifacts. Moreover, it is specifically designed to handle both single and larger networked projection mapping setups with minimal latency.",
"title": "Content-Aware Brightness Solving and Error Mitigation in Large-Scale Multi-Projection Mapping",
"normalizedTitle": "Content-Aware Brightness Solving and Error Mitigation in Large-Scale Multi-Projection Mapping",
"fno": "09873957",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Brightness",
"Calibration",
"Computer Vision",
"Data Visualisation",
"Calibration Artifacts",
"Calibration Errors",
"Content Aware Brightness Solver",
"Content Aware Brightness Solving",
"Error Mitigation",
"GPU Performance",
"Large Scale Multiprojection Mapping",
"Networked Projection Mapping",
"Visual Artifacts",
"Calibration",
"Brightness",
"Sensitivity",
"Image Edge Detection",
"Image Color Analysis",
"Target Tracking",
"Nose",
"Projection Mapping",
"Content Aware",
"Robust",
"Distributed",
"Scalable"
],
"authors": [
{
"givenName": "Philipp",
"surname": "Kurth",
"fullName": "Philipp Kurth",
"affiliation": "Visual Computing Group, Friedrich-Alexander-Universität Erlangen-Nürnberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus",
"surname": "Leuschner",
"fullName": "Markus Leuschner",
"affiliation": "Visual Computing Group, Friedrich-Alexander-Universität Erlangen-Nürnberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Stamminger",
"fullName": "Marc Stamminger",
"affiliation": "Visual Computing Group, Friedrich-Alexander-Universität Erlangen-Nürnberg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Frank",
"surname": "Bauer",
"fullName": "Frank Bauer",
"affiliation": "Visual Computing Group, Friedrich-Alexander-Universität Erlangen-Nürnberg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2022-11-01 00:00:00",
"pubType": "trans",
"pages": "3607-3617",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iiaiaai/2014/4174/0/06913421",
"title": "Development of Projection Mapping with Utility of Digital Signage",
"doi": null,
"abstractUrl": "/proceedings-article/iiaiaai/2014/06913421/12OmNAnMuFg",
"parentPublication": {
"id": "proceedings/iiaiaai/2014/4174/0",
"title": "2014 IIAI 3rd International Conference on Advanced Applied Informatics (IIAIAAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446433",
"title": "A Calibration Method for Large-Scale Projection Based Floor Display System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446433/13bd1gJ1v0M",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007312",
"title": "FaceForge: Markerless Non-Rigid Face Multi-Projection Mapping",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007312/13rRUwInvyG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/11/08430574",
"title": "A Generic Multi-Projection-Center Model and Calibration Method for Light Field Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2019/11/08430574/13rRUxASuwA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007213",
"title": "Geometric and Photometric Consistency in a Mixed Video and Galvanoscopic Scanning Laser Projection Mapping System",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007213/13rRUxcsYLX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466021",
"title": "Auto-Calibration for Dynamic Multi-Projection Mapping on Arbitrary Surfaces",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466021/14M3DYlzziw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a261",
"title": "A Projector Calibration Method Using a Mobile Camera for Projection Mapping System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a261/1gysikN6QOQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102813",
"title": "Projection Mapping System To A Widely Dynamic Sphere With Circumferential Markers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102813/1kwqWza3GI8",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a174",
"title": "Real-Time Adaptive Color Correction in Dynamic Projection Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a174/1pysyl9FDhu",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a597",
"title": "Dynamic Projection Mapping with 3D Images Using Volumetric Display",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a597/1tnX0LxdiuI",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09874383",
"articleId": "1GjwO1LML60",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09873969",
"articleId": "1GjwKZEQiFa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNy5hRch",
"title": "Nov.",
"year": "2019",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "25",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1d6xCnoQsU0",
"doi": "10.1109/TVCG.2019.2932248",
"abstract": "This paper presents a fast projection mapping method for moving image content projected onto a markerless planar surface using a low-latency Digital Micromirror Device (DMD) projector. By adopting a closed-loop alignment approach, in which not only the surface texture but also the projected image is tracked by a camera, the proposed method is free from a calibration or position adjustment between the camera and projector. We designed fiducial patterns to be inserted into a fast flapping sequence of binary frames of the DMD projector, which allows the simultaneous tracking of the surface texture and a fiducial geometry separate from a single image captured by the camera. The proposed method implemented on a CPU runs at 400 fps and enables arbitrary video contents to be “stuck” onto a variety of textured surfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a fast projection mapping method for moving image content projected onto a markerless planar surface using a low-latency Digital Micromirror Device (DMD) projector. By adopting a closed-loop alignment approach, in which not only the surface texture but also the projected image is tracked by a camera, the proposed method is free from a calibration or position adjustment between the camera and projector. We designed fiducial patterns to be inserted into a fast flapping sequence of binary frames of the DMD projector, which allows the simultaneous tracking of the surface texture and a fiducial geometry separate from a single image captured by the camera. The proposed method implemented on a CPU runs at 400 fps and enables arbitrary video contents to be “stuck” onto a variety of textured surfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a fast projection mapping method for moving image content projected onto a markerless planar surface using a low-latency Digital Micromirror Device (DMD) projector. By adopting a closed-loop alignment approach, in which not only the surface texture but also the projected image is tracked by a camera, the proposed method is free from a calibration or position adjustment between the camera and projector. We designed fiducial patterns to be inserted into a fast flapping sequence of binary frames of the DMD projector, which allows the simultaneous tracking of the surface texture and a fiducial geometry separate from a single image captured by the camera. The proposed method implemented on a CPU runs at 400 fps and enables arbitrary video contents to be “stuck” onto a variety of textured surfaces.",
"title": "Animated Stickies: Fast Video Projection Mapping onto a Markerless Plane through a Direct Closed-Loop Alignment",
"normalizedTitle": "Animated Stickies: Fast Video Projection Mapping onto a Markerless Plane through a Direct Closed-Loop Alignment",
"fno": "08821571",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Calibration",
"Cameras",
"Closed Loop Systems",
"Image Capture",
"Image Sensors",
"Micromirrors",
"Optical Design Techniques",
"Optical Images",
"Optical Projectors",
"Photodetectors",
"Surface Texture",
"Video Signal Processing",
"Single Image Capture",
"Fiducial Geometry",
"Surface Texture Tracking",
"Direct Closed Loop Alignment Approach",
"Low Latency Digital Micromirror Device Projector",
"Arbitrary Video Contents",
"DMD Projector",
"Fast Flapping Sequence",
"Fiducial Patterns",
"Calibration",
"Markerless Planar Surface",
"Fast Video Projection Mapping",
"Animated Stickies",
"Cameras",
"Target Tracking",
"Surface Texture",
"Calibration",
"Sensors",
"Optimization",
"Visualization",
"Spatial Augmented Reality",
"High Speed Vision",
"Projector Camera System",
"Visual Tracking"
],
"authors": [
{
"givenName": "Shingo",
"surname": "Kagami",
"fullName": "Shingo Kagami",
"affiliation": "Graduate School of Information SciencesTohoku University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Koichi",
"surname": "Hashimoto",
"fullName": "Koichi Hashimoto",
"affiliation": "Graduate School of Information SciencesTohoku University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2019-11-01 00:00:00",
"pubType": "trans",
"pages": "3094-3104",
"year": "2019",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2017/2937/0/2937a114",
"title": "Detecting Good Surface for Improvisatory Visual Projection",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a114/12OmNCd2roE",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pbg/2005/20/0/01500323",
"title": "A practical structured light acquisition system for point-based geometry and texture",
"doi": null,
"abstractUrl": "/proceedings-article/pbg/2005/01500323/12OmNCdTeQ0",
"parentPublication": {
"id": "proceedings/pbg/2005/20/0",
"title": "Point-Based Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2016/3284/0/08010586",
"title": "Auto-calibration of multi-projector systems on arbitrary shapes",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2016/08010586/12OmNrJiCPx",
"parentPublication": {
"id": "proceedings/aipr/2016/3284/0",
"title": "2016 IEEE Applied Imagery Pattern Recognition Workshop (AIPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a307",
"title": "Adaptive Image Projection onto Non-planar Screen Using Projector-Camera Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a307/12OmNs0C9zQ",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444797",
"title": "Auto-calibration of cylindrical multi-projector systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444797/12OmNviHKkd",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460078",
"title": "Calibration-free projector-camera system for spatial augmented reality on planar surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460078/12OmNzUxO4G",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643566",
"title": "Build your world and play in it: Interacting with surface particles on complex objects",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643566/12OmNzd7byj",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07516689",
"title": "Dynamic Projection Mapping onto Deforming Non-Rigid Surface Using Deformable Dot Cluster Marker",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07516689/13rRUwdIOUR",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/08/07983006",
"title": "FlyCap: Markerless Motion Capture Using Multiple Autonomous Flying Cameras",
"doi": null,
"abstractUrl": "/journal/tg/2018/08/07983006/13rRUxYrbUO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061307",
"title": "Markerless View-Independent Registration of Multiple Distorted Projectors on Extruded Surfaces Using an Uncalibrated Camera",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061307/13rRUy0HYRj",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08794580",
"articleId": "1dNHpE3ccCc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08794641",
"articleId": "1cPXBdjp9yo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNy7Qfqa",
"title": "Feb.",
"year": "2013",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILtJm9",
"doi": "10.1109/TVCG.2012.112",
"abstract": "Intrinsic images aim at separating an image into its reflectance and illumination components to facilitate further analysis or manipulation. This separation is severely ill posed and the most successful methods rely on user indications or precise geometry to resolve the ambiguities inherent to this problem. In this paper, we propose a method to estimate intrinsic images from multiple views of an outdoor scene without the need for precise geometry and with a few manual steps to calibrate the input. We use multiview stereo to automatically reconstruct a 3D point cloud of the scene. Although this point cloud is sparse and incomplete, we show that it provides the necessary information to compute plausible sky and indirect illumination at each 3D point. We then introduce an optimization method to estimate sun visibility over the point cloud. This algorithm compensates for the lack of accurate geometry and allows the extraction of precise shadows in the final image. We finally propagate the information computed over the sparse point cloud to every pixel in the photograph using image-guided propagation. Our propagation not only separates reflectance from illumination, but also decomposes the illumination into a sun, sky, and indirect layer. This rich decomposition allows novel image manipulations as demonstrated by our results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Intrinsic images aim at separating an image into its reflectance and illumination components to facilitate further analysis or manipulation. This separation is severely ill posed and the most successful methods rely on user indications or precise geometry to resolve the ambiguities inherent to this problem. In this paper, we propose a method to estimate intrinsic images from multiple views of an outdoor scene without the need for precise geometry and with a few manual steps to calibrate the input. We use multiview stereo to automatically reconstruct a 3D point cloud of the scene. Although this point cloud is sparse and incomplete, we show that it provides the necessary information to compute plausible sky and indirect illumination at each 3D point. We then introduce an optimization method to estimate sun visibility over the point cloud. This algorithm compensates for the lack of accurate geometry and allows the extraction of precise shadows in the final image. We finally propagate the information computed over the sparse point cloud to every pixel in the photograph using image-guided propagation. Our propagation not only separates reflectance from illumination, but also decomposes the illumination into a sun, sky, and indirect layer. This rich decomposition allows novel image manipulations as demonstrated by our results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Intrinsic images aim at separating an image into its reflectance and illumination components to facilitate further analysis or manipulation. This separation is severely ill posed and the most successful methods rely on user indications or precise geometry to resolve the ambiguities inherent to this problem. In this paper, we propose a method to estimate intrinsic images from multiple views of an outdoor scene without the need for precise geometry and with a few manual steps to calibrate the input. We use multiview stereo to automatically reconstruct a 3D point cloud of the scene. Although this point cloud is sparse and incomplete, we show that it provides the necessary information to compute plausible sky and indirect illumination at each 3D point. We then introduce an optimization method to estimate sun visibility over the point cloud. This algorithm compensates for the lack of accurate geometry and allows the extraction of precise shadows in the final image. We finally propagate the information computed over the sparse point cloud to every pixel in the photograph using image-guided propagation. Our propagation not only separates reflectance from illumination, but also decomposes the illumination into a sun, sky, and indirect layer. This rich decomposition allows novel image manipulations as demonstrated by our results.",
"title": "Rich Intrinsic Image Decomposition of Outdoor Scenes from Multiple Views",
"normalizedTitle": "Rich Intrinsic Image Decomposition of Outdoor Scenes from Multiple Views",
"fno": "ttg2013020210",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Optimisation",
"Geometry",
"Image Processing",
"Image Manipulations",
"Rich Intrinsic Image Decomposition",
"Outdoor Scenes",
"Image Separation",
"Illumination Components",
"Geometry",
"Optimization",
"Point Cloud",
"Image Guided Propagation",
"Lighting",
"Sun",
"Image Reconstruction",
"Three Dimensional Displays",
"Geometry",
"Materials",
"Image Color Analysis",
"Mean Shift Algorithm",
"Intrinsic Images",
"Image Guided Propagation",
"Multiview Stereo"
],
"authors": [
{
"givenName": "P.",
"surname": "Laffont",
"fullName": "P. Laffont",
"affiliation": "REVES, INRIA Sophia Antipolis, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Bousseau",
"fullName": "A. Bousseau",
"affiliation": "REVES, INRIA Sophia Antipolis, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "G.",
"surname": "Drettakis",
"fullName": "G. Drettakis",
"affiliation": "REVES, INRIA Sophia Antipolis, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2013-02-01 00:00:00",
"pubType": "trans",
"pages": "210-224",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1992/2855/0/00223129",
"title": "Anatomy of a color histogram",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223129/12OmNANkojf",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a389",
"title": "Cultural Heritage Acquisition: Geometry-Based Radiometry in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a389/12OmNApLGrq",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995507",
"title": "Intrinsic images using optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995507/12OmNCbU3cE",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cadgraphics/2011/4497/0/4497a450",
"title": "Single Image Based Illumination Estimation for Lighting Virtual Object in Real Scene",
"doi": null,
"abstractUrl": "/proceedings-article/cadgraphics/2011/4497a450/12OmNx0RIM6",
"parentPublication": {
"id": "proceedings/cadgraphics/2011/4497/0",
"title": "Computer-Aided Design and Computer Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a172",
"title": "A Comprehensive Multi-Illuminant Dataset for Benchmarking of the Intrinsic Image Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a172/12OmNyQpgMj",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2014/4985/0/06836000",
"title": "Estimating cloudmaps from outdoor image sequences",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2014/06836000/12OmNyY4rvG",
"parentPublication": {
"id": "proceedings/wacv/2014/4985/0",
"title": "2014 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a433",
"title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1995/7042/0/70420920",
"title": "Trilinearity of three perspective views and its associated tensor",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1995/70420920/12OmNzXnNrx",
"parentPublication": {
"id": "proceedings/iccv/1995/7042/0",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/04/07152924",
"title": "Photometric Ambient Occlusion for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/journal/tp/2016/04/07152924/13rRUIIVllH",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08008808",
"title": "Live User-Guided Intrinsic Video for Static Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08008808/13rRUxOdD8m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013020201",
"articleId": "13rRUxNEqPQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013020225",
"articleId": "13rRUEgs2M0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet0i",
"name": "ttg2013020210s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013020210s.zip",
"extension": "zip",
"size": "80 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNC1oT5u",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tp",
"pubType": "journal",
"volume": "35",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOdD3R",
"doi": "10.1109/TPAMI.2013.136",
"abstract": "Intrinsic image decomposition is an important problem that targets the recovery of shading and reflectance components from a single image. While this is an ill-posed problem on its own, we propose a novel approach for intrinsic image decomposition using reflectance sparsity priors that we have developed. Our sparse representation of reflectance is based on a simple observation: Neighboring pixels with similar chromaticities usually have the same reflectance. We formalize and apply this sparsity constraint on local reflectance to construct a data-driven second-generation wavelet representation. We show that the reflectance component of natural images is sparse in this representation. We further propose and formulate a global sparse constraint on reflectance colors using the assumption that each natural image uses a small set of material colors. Using this sparse reflectance representation and the global constraint on a sparse set of reflectance colors, we formulate a constrained Z_$(l_1)$_Z-norm minimization problem for intrinsic image decomposition that can be solved efficiently. Our algorithm can successfully extract intrinsic images from a single image without using color models or any user interaction. Experimental results on a variety of images demonstrate the effectiveness of the proposed technique.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Intrinsic image decomposition is an important problem that targets the recovery of shading and reflectance components from a single image. While this is an ill-posed problem on its own, we propose a novel approach for intrinsic image decomposition using reflectance sparsity priors that we have developed. Our sparse representation of reflectance is based on a simple observation: Neighboring pixels with similar chromaticities usually have the same reflectance. We formalize and apply this sparsity constraint on local reflectance to construct a data-driven second-generation wavelet representation. We show that the reflectance component of natural images is sparse in this representation. We further propose and formulate a global sparse constraint on reflectance colors using the assumption that each natural image uses a small set of material colors. Using this sparse reflectance representation and the global constraint on a sparse set of reflectance colors, we formulate a constrained $(l_1)$-norm minimization problem for intrinsic image decomposition that can be solved efficiently. Our algorithm can successfully extract intrinsic images from a single image without using color models or any user interaction. Experimental results on a variety of images demonstrate the effectiveness of the proposed technique.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Intrinsic image decomposition is an important problem that targets the recovery of shading and reflectance components from a single image. While this is an ill-posed problem on its own, we propose a novel approach for intrinsic image decomposition using reflectance sparsity priors that we have developed. Our sparse representation of reflectance is based on a simple observation: Neighboring pixels with similar chromaticities usually have the same reflectance. We formalize and apply this sparsity constraint on local reflectance to construct a data-driven second-generation wavelet representation. We show that the reflectance component of natural images is sparse in this representation. We further propose and formulate a global sparse constraint on reflectance colors using the assumption that each natural image uses a small set of material colors. Using this sparse reflectance representation and the global constraint on a sparse set of reflectance colors, we formulate a constrained --norm minimization problem for intrinsic image decomposition that can be solved efficiently. Our algorithm can successfully extract intrinsic images from a single image without using color models or any user interaction. Experimental results on a variety of images demonstrate the effectiveness of the proposed technique.",
"title": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance",
"normalizedTitle": "Intrinsic Image Decomposition Using a Sparse Representation of Reflectance",
"fno": "ttp2013122904",
"hasPdf": true,
"idPrefix": "tp",
"keywords": [
"Image Color Analysis",
"Wavelet Transforms",
"Image Decomposition",
"Multiresolution Analysis",
"Reflectance",
"Image Edge Detection",
"Multiresolution Analysis",
"Intrinsic Image Decomposition",
"Sparse Reconstruction"
],
"authors": [
{
"givenName": null,
"surname": "Li Shen",
"fullName": "Li Shen",
"affiliation": "Inst. for Infocomm Res., Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Chuohao Yeo",
"fullName": "Chuohao Yeo",
"affiliation": "Inst. for Infocomm Res., Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Binh-Son Hua",
"fullName": "Binh-Son Hua",
"affiliation": "Dept. of Comput. Sci., Nat. Univ. of Singapore, Singapore, Singapore",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2904-2915",
"year": "2013",
"issn": "0162-8828",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2014/4761/0/06890318",
"title": "L0 co-intrinsic images decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890318/12OmNAoUTnl",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d469",
"title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995507",
"title": "Intrinsic images using optimization",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995507/12OmNCbU3cE",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995738",
"title": "Intrinsic images decomposition using a local and global sparse representation of reflectance",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995738/12OmNCd2ryX",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b771",
"title": "Reflectance Adaptive Filtering Improves Intrinsic Image Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b771/12OmNqHqSvf",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890313",
"title": "Intrinsic image decomposition by hierarchical L0 sparsity",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890313/12OmNwnYFW1",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/1997/04/v0329",
"title": "A Wavelet Representation of Reflectance Functions",
"doi": null,
"abstractUrl": "/journal/tg/1997/04/v0329/13rRUxBJhmG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g430",
"title": "Multispectral Image Intrinsic Decomposition via Subspace Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g430/17D45XeKgro",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a175",
"title": "Towards High-Quality Intrinsic Images in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a175/1cdOQdLofSw",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09625763",
"title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttp2013122891",
"articleId": "13rRUwInuXu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttp2013122916",
"articleId": "13rRUwInvmf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBBhN8N",
"title": "Dec.",
"year": "2020",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1ncgrpZIBi0",
"doi": "10.1109/TVCG.2020.3023565",
"abstract": "Intrinsic image decomposition, i.e., decomposing a natural image into a reflectance image and a shading image, is used in many augmented reality applications for achieving better visual coherence between virtual contents and real scenes. The main challenge is that the decomposition is ill-posed, especially in indoor scenes where lighting conditions are complicated, while real training data is inadequate. To solve this challenge, we propose NIID-Net, a novel learning-based framework that adapts surface normal knowledge for improving the decomposition. The knowledge learned from relatively more abundant data for surface normal estimation is integrated into intrinsic image decomposition in two novel ways. First, normal feature adapters are proposed to incorporate scene geometry features when decomposing the image. Secondly, a map of integrated lighting is proposed for propagating object contour and planarity information during shading rendering. Furthermore, this map is capable of representing spatially-varying lighting conditions indoors. Experiments show that NIID-Net achieves competitive performance in reflectance estimation and outperforms all previous methods in shading estimation quantitatively and qualitatively. The source code of our implementation is released at https://github.com/zju3dv/NIID-Net.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Intrinsic image decomposition, i.e., decomposing a natural image into a reflectance image and a shading image, is used in many augmented reality applications for achieving better visual coherence between virtual contents and real scenes. The main challenge is that the decomposition is ill-posed, especially in indoor scenes where lighting conditions are complicated, while real training data is inadequate. To solve this challenge, we propose NIID-Net, a novel learning-based framework that adapts surface normal knowledge for improving the decomposition. The knowledge learned from relatively more abundant data for surface normal estimation is integrated into intrinsic image decomposition in two novel ways. First, normal feature adapters are proposed to incorporate scene geometry features when decomposing the image. Secondly, a map of integrated lighting is proposed for propagating object contour and planarity information during shading rendering. Furthermore, this map is capable of representing spatially-varying lighting conditions indoors. Experiments show that NIID-Net achieves competitive performance in reflectance estimation and outperforms all previous methods in shading estimation quantitatively and qualitatively. The source code of our implementation is released at https://github.com/zju3dv/NIID-Net.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Intrinsic image decomposition, i.e., decomposing a natural image into a reflectance image and a shading image, is used in many augmented reality applications for achieving better visual coherence between virtual contents and real scenes. The main challenge is that the decomposition is ill-posed, especially in indoor scenes where lighting conditions are complicated, while real training data is inadequate. To solve this challenge, we propose NIID-Net, a novel learning-based framework that adapts surface normal knowledge for improving the decomposition. The knowledge learned from relatively more abundant data for surface normal estimation is integrated into intrinsic image decomposition in two novel ways. First, normal feature adapters are proposed to incorporate scene geometry features when decomposing the image. Secondly, a map of integrated lighting is proposed for propagating object contour and planarity information during shading rendering. Furthermore, this map is capable of representing spatially-varying lighting conditions indoors. Experiments show that NIID-Net achieves competitive performance in reflectance estimation and outperforms all previous methods in shading estimation quantitatively and qualitatively. The source code of our implementation is released at https://github.com/zju3dv/NIID-Net.",
"title": "NIID-Net: Adapting Surface Normal Knowledge for Intrinsic Image Decomposition in Indoor Scenes",
"normalizedTitle": "NIID-Net: Adapting Surface Normal Knowledge for Intrinsic Image Decomposition in Indoor Scenes",
"fno": "09199573",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Feature Extraction",
"Image Representation",
"Image Resolution",
"Image Segmentation",
"Learning Artificial Intelligence",
"Lighting",
"Object Recognition",
"Rendering Computer Graphics",
"Video Signal Processing",
"Natural Image",
"Reflectance Image",
"Shading Image",
"Indoor Scenes",
"Surface Normal Knowledge",
"Surface Normal Estimation",
"Intrinsic Image Decomposition",
"Lighting Conditions",
"NIID Net",
"Scene Geometry Features",
"Feature Adapters",
"Image Decomposition",
"Estimation",
"Image Reconstruction",
"Augmented Reality",
"Training Data",
"Intrinsic Image Decomposition",
"Image Processing",
"Augmented Reality"
],
"authors": [
{
"givenName": "Jundan",
"surname": "Luo",
"fullName": "Jundan Luo",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zhaoyang",
"surname": "Huang",
"fullName": "Zhaoyang Huang",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yijin",
"surname": "Li",
"fullName": "Yijin Li",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaowei",
"surname": "Zhou",
"fullName": "Xiaowei Zhou",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Guofeng",
"surname": "Zhang",
"fullName": "Guofeng Zhang",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hujun",
"surname": "Bao",
"fullName": "Hujun Bao",
"affiliation": "State Key Lab of CAD&CGZhejiang University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2020-12-01 00:00:00",
"pubType": "trans",
"pages": "3434-3445",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2015/8391/0/8391d469",
"title": "Learning Data-Driven Reflectance Priors for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d469/12OmNBoNrqU",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a810",
"title": "Intrinsic Scene Decomposition from RGB-D Images",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a810/12OmNy4IF17",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a433",
"title": "Intrinsic Decomposition of Image Sequences from Local Temporal Variations",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a433/12OmNzC5Tdg",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08008808",
"title": "Live User-Guided Intrinsic Video for Static Scenes",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08008808/13rRUxOdD8m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2018/5321/0/08499478",
"title": "Unrolled Optimization with Deep Priors for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2018/08499478/17D45VTRoDD",
"parentPublication": {
"id": "proceedings/bigmm/2018/5321/0",
"title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a312",
"title": "HSI-Guided Intrinsic Image Decomposition for Outdoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a312/1G56nWipNPa",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h819",
"title": "GLoSH: Global-Local Spherical Harmonics for Intrinsic Image Decomposition",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h819/1hQqy771H9u",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102901",
"title": "Learning Intrinsic Decomposition of Complex-Textured Fashion Images",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102901/1kwr0dRSDEQ",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09573351",
"title": "Intrinsic Image Decomposition Using Paradigms",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09573351/1xH5D2WNbEc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/09625763",
"title": "Unsupervised Intrinsic Image Decomposition Using Internal Self-Similarity Cues",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/09625763/1yLTnG9Uisw",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09199575",
"articleId": "1ncgpmtzdn2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09201064",
"articleId": "1niUpdweh2g",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz5JC2z",
"title": "Nov.",
"year": "2017",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "23",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILc8fg",
"doi": "10.1109/TVCG.2017.2734426",
"abstract": "Augmented Reality offers many applications today, especially on mobile devices. Due to the lack of mobile hardware for illumination measurements, photorealistic rendering with consistent appearance of virtual objects is still an area of active research. In this paper, we present a full two-stage pipeline for environment acquisition and augmentation of live camera images using a mobile device with a depth sensor. We show how to directly work on a recorded 3D point cloud of the real environment containing high dynamic range color values. For unknown and automatically changing camera settings, a color compensation method is introduced. Based on this, we show photorealistic augmentations using variants of differential light simulation techniques. The presented methods are tailored for mobile devices and run at interactive frame rates. However, our methods are scalable to trade performance for quality and can produce quality renderings on desktop hardware.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality offers many applications today, especially on mobile devices. Due to the lack of mobile hardware for illumination measurements, photorealistic rendering with consistent appearance of virtual objects is still an area of active research. In this paper, we present a full two-stage pipeline for environment acquisition and augmentation of live camera images using a mobile device with a depth sensor. We show how to directly work on a recorded 3D point cloud of the real environment containing high dynamic range color values. For unknown and automatically changing camera settings, a color compensation method is introduced. Based on this, we show photorealistic augmentations using variants of differential light simulation techniques. The presented methods are tailored for mobile devices and run at interactive frame rates. However, our methods are scalable to trade performance for quality and can produce quality renderings on desktop hardware.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality offers many applications today, especially on mobile devices. Due to the lack of mobile hardware for illumination measurements, photorealistic rendering with consistent appearance of virtual objects is still an area of active research. In this paper, we present a full two-stage pipeline for environment acquisition and augmentation of live camera images using a mobile device with a depth sensor. We show how to directly work on a recorded 3D point cloud of the real environment containing high dynamic range color values. For unknown and automatically changing camera settings, a color compensation method is introduced. Based on this, we show photorealistic augmentations using variants of differential light simulation techniques. The presented methods are tailored for mobile devices and run at interactive frame rates. However, our methods are scalable to trade performance for quality and can produce quality renderings on desktop hardware.",
"title": "Natural Environment Illumination: Coherent Interactive Augmented Reality for Mobile and Non-Mobile Devices",
"normalizedTitle": "Natural Environment Illumination: Coherent Interactive Augmented Reality for Mobile and Non-Mobile Devices",
"fno": "08007317",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Image Color Analysis",
"Three Dimensional Displays",
"Rendering Computer Graphics",
"Lighting",
"Image Reconstruction",
"Estimation",
"Augmented Reality",
"Mixed Reality",
"Differential Rendering",
"Color Compensation",
"Impostors Tracing",
"GPU Importance Sampling",
"Mobile AR",
"Scene Reconstruction",
"Light Estimation",
"Material Estimation",
"Depth Sensing",
"Point Clouds",
"Global Illumination"
],
"authors": [
{
"givenName": "Kai",
"surname": "Rohmer",
"fullName": "Kai Rohmer",
"affiliation": "Graphical Data Processing and Multimedia Group, TU Clausthal, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Johannes",
"surname": "Jendersie",
"fullName": "Johannes Jendersie",
"affiliation": "Graphical Data Processing and Multimedia Group, TU Clausthal, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "Grosch",
"fullName": "Thorsten Grosch",
"affiliation": "Graphical Data Processing and Multimedia Group, TU Clausthal, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2017-11-01 00:00:00",
"pubType": "trans",
"pages": "2474-2484",
"year": "2017",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948406",
"title": "Interactive near-field illumination for photorealistic augmented reality on mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948406/12OmNAGNCfe",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6265/1/00576263",
"title": "Online color camera calibration",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00576263/12OmNBLdKIw",
"parentPublication": {
"id": "proceedings/icpr/1994/6265/1",
"title": "Proceedings of 12th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2016/0806/0/07550918",
"title": "A mosaic style rendering method based on fuzzy color modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2016/07550918/12OmNrAMF1Y",
"parentPublication": {
"id": "proceedings/icis/2016/0806/0",
"title": "2016 IEEE/ACIS 15th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571370",
"title": "Preserving Coherent Illumination in Style Transfer Functions for Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571370/12OmNwF0BUx",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223334",
"title": "Image-space illumination for augmented reality in dynamic environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223334/12OmNyFU73E",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802055",
"title": "Global illumination for Augmented Reality on mobile phones",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802055/12OmNyRg4FC",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444836",
"title": "Photorealistic rendering for Augmented Reality: A global illumination and BRDF solution",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444836/12OmNz6iOaA",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/12/07138641",
"title": "Interactive Near-Field Illumination for Photorealistic Augmented Reality with Varying Materials on Mobile Devices",
"doi": null,
"abstractUrl": "/journal/tg/2015/12/07138641/13rRUNvgz4i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699239",
"title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699239/19F1QemV928",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699176",
"title": "Reproducing Material Appearance of Real Objects Using Mobile Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699176/19F1ToU9wNG",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08007218",
"articleId": "13rRUxcbnHi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08007318",
"articleId": "13rRUxOve9O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet1R",
"name": "ttg201711-08007317s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201711-08007317s1.zip",
"extension": "zip",
"size": "74.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvqEvRo",
"title": "PrePrints",
"year": "5555",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": null,
"label": "PrePrints",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1HJuJYF342Y",
"doi": "10.1109/TVCG.2022.3216712",
"abstract": "<italic>Image-warping</italic>, a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, <italic>Metameric image inpainting</italic> - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.",
"abstracts": [
{
"abstractType": "Regular",
"content": "<italic>Image-warping</italic>, a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, <italic>Metameric image inpainting</italic> - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image-warping, a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, Metameric image inpainting - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.",
"title": "Metameric Inpainting for Image Warping",
"normalizedTitle": "Metameric Inpainting for Image Warping",
"fno": "09928218",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Real Time Systems",
"Visualization",
"Rendering Computer Graphics",
"Neural Networks",
"Image Color Analysis",
"Task Analysis",
"Visual Perception",
"Inpainting",
"Warping",
"Perception",
"Real Time Rendering"
],
"authors": [
{
"givenName": "Rafael",
"surname": "Kuffner dos Anjos",
"fullName": "Rafael Kuffner dos Anjos",
"affiliation": "University of Leeds, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David R.",
"surname": "Walton",
"fullName": "David R. Walton",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kaan",
"surname": "Aksit",
"fullName": "Kaan Aksit",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sebastian",
"surname": "Friston",
"fullName": "Sebastian Friston",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Swapp",
"fullName": "David Swapp",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anthony",
"surname": "Steed",
"fullName": "Anthony Steed",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tobias",
"surname": "Ritschel",
"fullName": "Tobias Ritschel",
"affiliation": "University College London, U.K.",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2022-10-01 00:00:00",
"pubType": "trans",
"pages": "1-12",
"year": "5555",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2016/5407/0/5407a351",
"title": "Multi-View Inpainting for Image-Based Scene Editing and Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a351/12OmNxEjXRB",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icndc/2012/4832/0/06386573",
"title": "A Virtual View Synthesis Algorithm Based on Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/icndc/2012/06386573/12OmNynJMTf",
"parentPublication": {
"id": "proceedings/icndc/2012/4832/0",
"title": "2012 Third International Conference on Networking and Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011901",
"title": "Stereoscopic image inpainting using scene geometry",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011901/12OmNyr8YlH",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004a221",
"title": "A Survey on Tangka Image Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004a221/12OmNzcxZv1",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a464",
"title": "Multi-view Inpainting for RGB-D Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a464/17D45WgziNa",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699212",
"title": "3D PixMix: Image Inpainting in 3D Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699212/19F1PUM1Yk0",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwecai/2022/7997/0/799700a145",
"title": "GAN Based Image Inpainting Methods: A Taxonomy",
"doi": null,
"abstractUrl": "/proceedings-article/iwecai/2022/799700a145/1CugnMVrJbG",
"parentPublication": {
"id": "proceedings/iwecai/2022/7997/0",
"title": "2022 3rd International Conference on Electronic Communication and Artificial Intelligence (IWECAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956171",
"title": "Interactive Image Inpainting Using Semantic Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956171/1IHqiFLbCPm",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10038566",
"title": "Content-aware Warping for View Synthesis",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10038566/1KxPVE9pkxG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900c266",
"title": "TransFill: Reference-guided Image Inpainting by Merging Multiple Color and Spatial Transformations",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900c266/1yeM3LNZkru",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09925645",
"articleId": "1HCQTWI9XgY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09930144",
"articleId": "1HMOX2J2VMY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HMOWsNEA6I",
"name": "ttg555501-09928218s1-tvcg-3216712-mm.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg555501-09928218s1-tvcg-3216712-mm.zip",
"extension": "zip",
"size": "235 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzvQI13",
"title": "Oct.",
"year": "2020",
"issueNum": "10",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "Oct.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1mLIesC5z0Y",
"doi": "10.1109/TVCG.2020.3003768",
"abstract": "State-of-the-art methods for diminished reality propagate pixel information from a keyframe to subsequent frames for real-time inpainting. However, these approaches produce artifacts, if the scene geometry is not sufficiently planar. In this article, we present InpaintFusion, a new real-time method that extends inpainting to non-planar scenes by considering both color and depth information in the inpainting process. We use an RGB-D sensor for simultaneous localization and mapping, in order to both track the camera and obtain a surfel map in addition to RGB images. We use the RGB-D information in a cost function for both the color and the geometric appearance to derive a global optimization for simultaneous inpainting of color and depth. The inpainted depth is merged in a global map by depth fusion. For the final rendering, we project the map model into image space, where we can use it for effects such as relighting and stereo rendering of otherwise hidden structures. We demonstrate the capabilities of our method by comparing it to inpainting results with methods using planar geometric proxies.",
"abstracts": [
{
"abstractType": "Regular",
"content": "State-of-the-art methods for diminished reality propagate pixel information from a keyframe to subsequent frames for real-time inpainting. However, these approaches produce artifacts, if the scene geometry is not sufficiently planar. In this article, we present InpaintFusion, a new real-time method that extends inpainting to non-planar scenes by considering both color and depth information in the inpainting process. We use an RGB-D sensor for simultaneous localization and mapping, in order to both track the camera and obtain a surfel map in addition to RGB images. We use the RGB-D information in a cost function for both the color and the geometric appearance to derive a global optimization for simultaneous inpainting of color and depth. The inpainted depth is merged in a global map by depth fusion. For the final rendering, we project the map model into image space, where we can use it for effects such as relighting and stereo rendering of otherwise hidden structures. We demonstrate the capabilities of our method by comparing it to inpainting results with methods using planar geometric proxies.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "State-of-the-art methods for diminished reality propagate pixel information from a keyframe to subsequent frames for real-time inpainting. However, these approaches produce artifacts, if the scene geometry is not sufficiently planar. In this article, we present InpaintFusion, a new real-time method that extends inpainting to non-planar scenes by considering both color and depth information in the inpainting process. We use an RGB-D sensor for simultaneous localization and mapping, in order to both track the camera and obtain a surfel map in addition to RGB images. We use the RGB-D information in a cost function for both the color and the geometric appearance to derive a global optimization for simultaneous inpainting of color and depth. The inpainted depth is merged in a global map by depth fusion. For the final rendering, we project the map model into image space, where we can use it for effects such as relighting and stereo rendering of otherwise hidden structures. We demonstrate the capabilities of our method by comparing it to inpainting results with methods using planar geometric proxies.",
"title": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes",
"normalizedTitle": "InpaintFusion: Incremental RGB-D Inpainting for 3D Scenes",
"fno": "09184389",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Image Colour Analysis",
"Image Fusion",
"Image Reconstruction",
"Image Resolution",
"Image Restoration",
"Image Sensors",
"Image Sequences",
"Image Texture",
"Object Detection",
"Rendering Computer Graphics",
"Stereo Image Processing",
"Inpaint Fusion",
"Incremental RGB D Inpainting",
"Pixel Information",
"Keyframe",
"Subsequent Frames",
"Real Time Inpainting",
"Scene Geometry",
"Nonplanar Scenes",
"Depth Information",
"Inpainting Process",
"RGB D Sensor",
"Surfel Map",
"RGB Images",
"RGB D Information",
"Cost Function",
"Geometric Appearance",
"Global Optimization",
"Simultaneous Inpainting",
"Inpainted Depth",
"Depth Fusion",
"Map Model",
"Image Space",
"Planar Geometric Proxies",
"Three Dimensional Displays",
"Image Color Analysis",
"Rendering Computer Graphics",
"Cameras",
"Simultaneous Localization And Mapping",
"Real Time Systems",
"Image Reconstruction",
"Diminished Reality",
"Inpainting",
"Fusion",
"SLAM"
],
"authors": [
{
"givenName": "Shohei",
"surname": "Mori",
"fullName": "Shohei Mori",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Okan",
"surname": "Erat",
"fullName": "Okan Erat",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Broll",
"fullName": "Wolfgang Broll",
"affiliation": "Virtual Worlds and Digital Games Group, Ilmenau University of Technology, Ilmenau, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hideo",
"surname": "Saito",
"fullName": "Hideo Saito",
"affiliation": "Department of Information and Computer Science, Keio University, Minato City, Tokyo, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Denis",
"surname": "Kalkofen",
"fullName": "Denis Kalkofen",
"affiliation": "Institute of Computer Graphics and Vision, Graz University of Technology, Graz, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "10",
"pubDate": "2020-10-01 00:00:00",
"pubType": "trans",
"pages": "2994-3007",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2015/8332/0/8332a452",
"title": "Planes Detection for Robust Localization and Mapping in RGB-D SLAM Systems",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a452/12OmNqH9hdY",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbr-lars-r/2016/3656/0/07783496",
"title": "Object Subtraction Planar RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/sbr-lars-r/2016/07783496/12OmNxwENic",
"parentPublication": {
"id": "proceedings/sbr-lars-r/2016/3656/0",
"title": "2016 XIII Latin-American Robotics Symposium and IV Brazilian Robotics Symposium (LARS/SBR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0/07024256",
"title": "A Fast Visual Odometry and Mapping System for RGB-D Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/sbrlarsrobocontrol/2014/07024256/12OmNylboJA",
"parentPublication": {
"id": "proceedings/sbrlarsrobocontrol/2014/6711/0",
"title": "2014 Joint Conference on Robotics: SBR-LARS Robotics Symposium and Robocontrol (SBR LARS Robocontrol)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477636",
"title": "CoRBS: Comprehensive RGB-D benchmark for SLAM using Kinect v2",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477636/12OmNzsJ7Hx",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a464",
"title": "Multi-view Inpainting for RGB-D Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a464/17D45WgziNa",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699212",
"title": "3D PixMix: Image Inpainting in 3D Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699212/19F1PUM1Yk0",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a308",
"title": "Online Adaptive Integration of Observation and Inpainting for Diminished Reality with Online Surface Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a308/1J7Wkijm8Yo",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991394",
"title": "Pseudo Depth Maps for RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991394/1JwQ1uhFF4s",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2019/2506/0/250600b248",
"title": "RGB-D Indoor Mapping Using Deep Features",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2019/250600b248/1iTvoaCYwrS",
"parentPublication": {
"id": "proceedings/cvprw/2019/2506/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09521742",
"title": "Linear RGB-D SLAM for Structured Environments",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09521742/1wkrmZrcdcQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09123549",
"articleId": "1kTxv3ChLeE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08695851",
"articleId": "19sOOqzUp7W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1mNn3PlPhO8",
"name": "ttg202010-09184389s1-supp2-3003768.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-09184389s1-supp2-3003768.mp4",
"extension": "mp4",
"size": "54 MB",
"__typename": "WebExtraType"
},
{
"id": "1mNn6F8lYXe",
"name": "ttg202010-09184389s1-supp1-3003768.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202010-09184389s1-supp1-3003768.mp4",
"extension": "mp4",
"size": "69.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBNM93d",
"title": "Nov.",
"year": "2015",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILtJzB",
"doi": "10.1109/TVCG.2015.2459892",
"abstract": "The fundamental issues in Augmented Reality (AR) are on how to naturally mediate the reality with virtual content as seen by users. In AR applications with Optical See-Through Head-Mounted Displays (OST-HMD), the issues often raise the problem of rendering color on the OST-HMD consistently to input colors. However, due to various display constraints and eye properties, it is still a challenging task to indistinguishably reproduce the colors on OST-HMDs. An approach to solve this problem is to pre-process the input color so that a user perceives the output color on the display to be the same as the input. We propose a color calibration method for OST-HMDs. We start from modeling the physical optics in the rendering and perception process between the HMD and the eye. We treat the color distortion as a semi-parametric model which separates the non-linear color distortion and the linear color shift. We demonstrate that calibrated images regain their original appearance on two OST-HMD setups with both synthetic and real datasets. Furthermore, we analyze the limitations of the proposed method and remaining problems of the color reproduction in OST-HMDs. We then discuss how to realize more practical color reproduction methods for future HMD-eye system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The fundamental issues in Augmented Reality (AR) are on how to naturally mediate the reality with virtual content as seen by users. In AR applications with Optical See-Through Head-Mounted Displays (OST-HMD), the issues often raise the problem of rendering color on the OST-HMD consistently to input colors. However, due to various display constraints and eye properties, it is still a challenging task to indistinguishably reproduce the colors on OST-HMDs. An approach to solve this problem is to pre-process the input color so that a user perceives the output color on the display to be the same as the input. We propose a color calibration method for OST-HMDs. We start from modeling the physical optics in the rendering and perception process between the HMD and the eye. We treat the color distortion as a semi-parametric model which separates the non-linear color distortion and the linear color shift. We demonstrate that calibrated images regain their original appearance on two OST-HMD setups with both synthetic and real datasets. Furthermore, we analyze the limitations of the proposed method and remaining problems of the color reproduction in OST-HMDs. We then discuss how to realize more practical color reproduction methods for future HMD-eye system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The fundamental issues in Augmented Reality (AR) are on how to naturally mediate the reality with virtual content as seen by users. In AR applications with Optical See-Through Head-Mounted Displays (OST-HMD), the issues often raise the problem of rendering color on the OST-HMD consistently to input colors. However, due to various display constraints and eye properties, it is still a challenging task to indistinguishably reproduce the colors on OST-HMDs. An approach to solve this problem is to pre-process the input color so that a user perceives the output color on the display to be the same as the input. We propose a color calibration method for OST-HMDs. We start from modeling the physical optics in the rendering and perception process between the HMD and the eye. We treat the color distortion as a semi-parametric model which separates the non-linear color distortion and the linear color shift. We demonstrate that calibrated images regain their original appearance on two OST-HMD setups with both synthetic and real datasets. Furthermore, we analyze the limitations of the proposed method and remaining problems of the color reproduction in OST-HMDs. We then discuss how to realize more practical color reproduction methods for future HMD-eye system.",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"normalizedTitle": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"fno": "07165643",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Calibration",
"Colour Displays",
"Helmet Mounted Displays",
"Optical Distortion",
"Physical Optics",
"Semiparametric Color Reproduction Method",
"Optical See Through Head Mounted Display",
"Augmented Reality",
"AR",
"OST HMD Eye System",
"Color Rendering",
"Color Calibration Method",
"Physical Optics",
"Nonlinear Color Distortion",
"Image Color Analysis",
"Cameras",
"Calibration",
"Rendering Computer Graphics",
"Optical Distortion",
"Nonlinear Distortion",
"OST HMD",
"Color Replication",
"Color Calibration",
"Optical See Through Display",
"OST HMD",
"Color Replication",
"Color Calibration",
"Optical See Through Display"
],
"authors": [
{
"givenName": "Yuta",
"surname": "Itoh",
"fullName": "Yuta Itoh",
"affiliation": "Department of Informatics at Technical University of Munich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maksym",
"surname": "Dzitsiuk",
"fullName": "Maksym Dzitsiuk",
"affiliation": "Department of Informatics at Technical University of Munich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Toshiyuki",
"surname": "Amano",
"fullName": "Toshiyuki Amano",
"affiliation": "Faculty of Systems Engineering at Wakayama University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gudrun",
"surname": "Klinker",
"fullName": "Gudrun Klinker",
"affiliation": "Department of Informatics at Technical University of Munich",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "11",
"pubDate": "2015-11-01 00:00:00",
"pubType": "trans",
"pages": "1269-1278",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948481",
"title": "[DEMO] INDICA : Interaction-free display calibration for optical see-through head-mounted displays based on 3D eye localization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948481/12OmNy1SFEx",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523375",
"title": "Gaussian Light Field: Estimation of Viewpoint-Dependent Blur for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523375/13rRUxYINfi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456571",
"title": "Restoring the Awareness in the Occluded Visual Field for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456571/14M3DYLGFgs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a259",
"title": "OSTNet: Calibration Method for Optical See-Through Head-Mounted Displays via Non-Parametric Distortion Map Generation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a259/1gysj1o4L16",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07164337",
"articleId": "13rRUytWF9n",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07165660",
"articleId": "13rRUIIVlcN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1LUpyYLBfeo",
"title": "May",
"year": "2023",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "29",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1L039oS5wDm",
"doi": "10.1109/TVCG.2023.3247064",
"abstract": "The occlusion-capable optical see-through head-mounted display (OC-OSTHMD) is actively developed in recent years since it allows mutual occlusion between virtual objects and the physical world to be correctly presented in augmented reality (AR). However, implementing occlusion with the special type of OSTHMDs prevents the appealing feature from the wide application. In this paper, a novel approach for realizing mutual occlusion for common OSTHMDs is proposed. A wearable device with per-pixel occlusion capability is designed. OSTHMD devices are upgraded to be occlusion-capable by attaching the device before optical combiners. A prototype with HoloLens 1 is built. The virtual display with mutual occlusion is demonstrated in real-time. A color correction algorithm is proposed to mitigate the color aberration caused by the occlusion device. Potential applications, including the texture replacement of real objects and the more realistic semi-transparent objects display, are demonstrated. The proposed system is expected to realize a universal implementation of mutual occlusion in AR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The occlusion-capable optical see-through head-mounted display (OC-OSTHMD) is actively developed in recent years since it allows mutual occlusion between virtual objects and the physical world to be correctly presented in augmented reality (AR). However, implementing occlusion with the special type of OSTHMDs prevents the appealing feature from the wide application. In this paper, a novel approach for realizing mutual occlusion for common OSTHMDs is proposed. A wearable device with per-pixel occlusion capability is designed. OSTHMD devices are upgraded to be occlusion-capable by attaching the device before optical combiners. A prototype with HoloLens 1 is built. The virtual display with mutual occlusion is demonstrated in real-time. A color correction algorithm is proposed to mitigate the color aberration caused by the occlusion device. Potential applications, including the texture replacement of real objects and the more realistic semi-transparent objects display, are demonstrated. The proposed system is expected to realize a universal implementation of mutual occlusion in AR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The occlusion-capable optical see-through head-mounted display (OC-OSTHMD) is actively developed in recent years since it allows mutual occlusion between virtual objects and the physical world to be correctly presented in augmented reality (AR). However, implementing occlusion with the special type of OSTHMDs prevents the appealing feature from the wide application. In this paper, a novel approach for realizing mutual occlusion for common OSTHMDs is proposed. A wearable device with per-pixel occlusion capability is designed. OSTHMD devices are upgraded to be occlusion-capable by attaching the device before optical combiners. A prototype with HoloLens 1 is built. The virtual display with mutual occlusion is demonstrated in real-time. A color correction algorithm is proposed to mitigate the color aberration caused by the occlusion device. Potential applications, including the texture replacement of real objects and the more realistic semi-transparent objects display, are demonstrated. The proposed system is expected to realize a universal implementation of mutual occlusion in AR.",
"title": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"normalizedTitle": "Add-on Occlusion: Turning Off-the-Shelf Optical See-through Head-mounted Displays Occlusion-capable",
"fno": "10050791",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Aberrations",
"Augmented Reality",
"Helmet Mounted Displays",
"Image Colour Analysis",
"Image Resolution",
"AR",
"Color Aberration",
"Color Correction Algorithm",
"Mutual Occlusion",
"OC OSTHMD",
"Occlusion Capable Optical See Through Head Mounted Display",
"Optical Combiners",
"Per Pixel Occlusion Capability",
"Realistic Semitransparent Objects Display",
"Virtual Display",
"Optical Imaging",
"Lenses",
"Adaptive Optics",
"Mirrors",
"Image Color Analysis",
"Optical Polarization",
"Holography",
"Augmented Reality",
"Near To Eye Displays",
"Occlusion Displays",
"Head Mounted Displays",
"Diminished Reality",
"Color Blending"
],
"authors": [
{
"givenName": "Yan",
"surname": "Zhang",
"fullName": "Yan Zhang",
"affiliation": "Shanghai Jiao Tong University, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaodan",
"surname": "Hu",
"fullName": "Xiaodan Hu",
"affiliation": "Nara Institute of Science and Technology, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"fullName": "Kiyoshi Kiyokawa",
"affiliation": "Nara Institute of Science and Technology, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xubo",
"surname": "Yang",
"fullName": "Xubo Yang",
"affiliation": "Nara Institute of Science and Technology, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2023-05-01 00:00:00",
"pubType": "trans",
"pages": "2700-2709",
"year": "2023",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2003/2006/0/20060133",
"title": "An Occlusion-Capable Optical See-through Head Mount Display for Supporting Co-located Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2003/20060133/12OmNyfdONw",
"parentPublication": {
"id": "proceedings/ismar/2003/2006/0",
"title": "The Second IEEE and ACM International Symposium on Mixed and Augmented Reality, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a800",
"title": "Add-on Occlusion: An External Module for Optical See-through Augmented Reality Displays to Support Mutual Occlusion",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a800/1CJeADcapNK",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a237",
"title": "A Compact Photochromic Occlusion Capable See-through Display with Holographic Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a237/1MNgTZ7ZNLO",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08827571",
"title": "Varifocal Occlusion-Capable Optical See-through Augmented Reality Display based on Focus-tunable Optics",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08827571/1dgvaPxmhbi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998139",
"title": "Factored Occlusion: Single Spatial Light Modulator Occlusion-capable Optical See-through Augmented Reality Display",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998139/1hrXe0Hbv0I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a301",
"title": "Super Wide-view Optical See-through Head Mounted Displays with Per-pixel Occlusion Capability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a301/1pysxIK95Yc",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09416829",
"title": "Design of a Pupil-Matched Occlusion-Capable Optical See-Through Wearable Display",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09416829/1t8VUXSYL2E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a422",
"title": "Blending Shadows: Casting Shadows in Virtual and Real using Occlusion-Capable Augmented Reality Near-Eye Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a422/1yeD2Kh0vxS",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "10049727",
"articleId": "1KYotDyxwkM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10049687",
"articleId": "1KYovfHOQG4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1HMOit1lSk8",
"title": "Dec.",
"year": "2022",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1txPs5wi56E",
"doi": "10.1109/TVCG.2021.3079849",
"abstract": "Estimating the depth of virtual content has proven to be a challenging task in Augmented Reality (AR) applications. Existing studies have shown that the visual system makes use of multiple depth cues to infer the distance of objects, occlusion being one of the most important ones. The ability to generate appropriate occlusions becomes particularly important for AR applications that require the visualization of augmented objects placed below a real surface. Examples of these applications are medical scenarios in which the visualization of anatomical information needs to be observed within the patient's body. In this regard, existing works have proposed several focus and context (<italic>F+C</italic>) approaches to aid users in visualizing this content using Video See-Through (VST) Head-Mounted Displays (HMDs). However, the implementation of these approaches in Optical See-Through (OST) HMDs remains an open question due to the additive characteristics of the display technology. In this article, we, for the first time, design and conduct a user study that compares depth estimation between VST and OST HMDs using existing in-situ visualization methods. Our results show that these visualizations cannot be directly transferred to OST displays without increasing error in depth perception tasks. To tackle this gap, we perform a structured decomposition of the visual properties of AR F+C methods to find best-performing combinations. We propose the use of <italic>chromatic shadows</italic> and <italic>hatching</italic> approaches transferred from computer graphics. In a second study, we perform a factorized analysis of these combinations, showing that varying the shading type and using colored shadows can lead to better depth estimation when using OST HMDs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Estimating the depth of virtual content has proven to be a challenging task in Augmented Reality (AR) applications. Existing studies have shown that the visual system makes use of multiple depth cues to infer the distance of objects, occlusion being one of the most important ones. The ability to generate appropriate occlusions becomes particularly important for AR applications that require the visualization of augmented objects placed below a real surface. Examples of these applications are medical scenarios in which the visualization of anatomical information needs to be observed within the patient's body. In this regard, existing works have proposed several focus and context (<italic>F+C</italic>) approaches to aid users in visualizing this content using Video See-Through (VST) Head-Mounted Displays (HMDs). However, the implementation of these approaches in Optical See-Through (OST) HMDs remains an open question due to the additive characteristics of the display technology. In this article, we, for the first time, design and conduct a user study that compares depth estimation between VST and OST HMDs using existing in-situ visualization methods. Our results show that these visualizations cannot be directly transferred to OST displays without increasing error in depth perception tasks. To tackle this gap, we perform a structured decomposition of the visual properties of AR F+C methods to find best-performing combinations. We propose the use of <italic>chromatic shadows</italic> and <italic>hatching</italic> approaches transferred from computer graphics. In a second study, we perform a factorized analysis of these combinations, showing that varying the shading type and using colored shadows can lead to better depth estimation when using OST HMDs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Estimating the depth of virtual content has proven to be a challenging task in Augmented Reality (AR) applications. Existing studies have shown that the visual system makes use of multiple depth cues to infer the distance of objects, occlusion being one of the most important ones. The ability to generate appropriate occlusions becomes particularly important for AR applications that require the visualization of augmented objects placed below a real surface. Examples of these applications are medical scenarios in which the visualization of anatomical information needs to be observed within the patient's body. In this regard, existing works have proposed several focus and context (F+C) approaches to aid users in visualizing this content using Video See-Through (VST) Head-Mounted Displays (HMDs). However, the implementation of these approaches in Optical See-Through (OST) HMDs remains an open question due to the additive characteristics of the display technology. In this article, we, for the first time, design and conduct a user study that compares depth estimation between VST and OST HMDs using existing in-situ visualization methods. Our results show that these visualizations cannot be directly transferred to OST displays without increasing error in depth perception tasks. To tackle this gap, we perform a structured decomposition of the visual properties of AR F+C methods to find best-performing combinations. We propose the use of chromatic shadows and hatching approaches transferred from computer graphics. In a second study, we perform a factorized analysis of these combinations, showing that varying the shading type and using colored shadows can lead to better depth estimation when using OST HMDs.",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"normalizedTitle": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"fno": "09429918",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Helmet Mounted Displays",
"Medical Image Processing",
"Object Tracking",
"Three Dimensional Displays",
"Visual Perception",
"Anatomical Information",
"Appropriate Occlusions",
"AR Applications",
"Augmented Objects",
"Augmented Reality Applications",
"Chromatic Shadows",
"Context Visualization Techniques",
"Depth Estimation",
"Depth Perception Tasks",
"Display Technology",
"Existing In Situ Visualization Methods",
"Hatching Approaches",
"Important Ones",
"Medical Scenarios",
"Multiple Depth",
"Occlusion",
"Optical See Through HM Ds",
"OST Displays",
"OST HM Ds",
"Patient",
"Video See Through Head Mounted Displays",
"Virtual Content",
"Visual Properties",
"Visual System",
"VST",
"Visualization",
"Estimation",
"User Interfaces",
"Image Color Analysis",
"Augmented Reality",
"Rendering Computer Graphics",
"Head Mounted Displays",
"Human Computer Interaction",
"Augmented Reality",
"Perception",
"Depth Estimation",
"Visualization Techniques",
"Human Computer Interaction",
"Design And Evaluation Methods",
"User Studies"
],
"authors": [
{
"givenName": "Alejandro",
"surname": "Martin-Gomez",
"fullName": "Alejandro Martin-Gomez",
"affiliation": "Department of Informatics, Chair for Computer Aided Medical Procedures and Augmented Reality, Technical University of Munich, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jakob",
"surname": "Weiss",
"fullName": "Jakob Weiss",
"affiliation": "Department of Informatics, Chair for Computer Aided Medical Procedures and Augmented Reality, Technical University of Munich, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Andreas",
"surname": "Keller",
"fullName": "Andreas Keller",
"affiliation": "Department of Informatics, Chair for Computer Aided Medical Procedures and Augmented Reality, Technical University of Munich, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ulrich",
"surname": "Eck",
"fullName": "Ulrich Eck",
"affiliation": "Department of Informatics, Chair for Computer Aided Medical Procedures and Augmented Reality, Technical University of Munich, Munich, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Roth",
"fullName": "Daniel Roth",
"affiliation": "FAU Erlangen-Nürnberg, Erlangen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nassir",
"surname": "Navab",
"fullName": "Nassir Navab",
"affiliation": "Department of Informatics, Chair for Computer Aided Medical Procedures and Augmented Reality, Technical University of Munich, Munich, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "4156-4171",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446429",
"title": "Impact of Alignment Point Distance Distribution on SPAAM Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446429/13bd1gCd7Sz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08456571",
"title": "Restoring the Awareness in the Occluded Visual Field for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08456571/14M3DYLGFgs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08676155",
"title": "Varifocal Occlusion for Optical See-Through Head-Mounted Displays using a Slide Occlusion Mask",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08676155/18LFfGhc49i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a112",
"title": "Stereoscopic Video See-Through Head-Mounted Displays for Laser Safety: An Empirical Evaluation at Advanced Optics Laboratories",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a112/1JrRo67MnwQ",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09463728",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09463728/1uFxo1ImlpK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09420254",
"articleId": "1tdUMGe1DAk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09439062",
"articleId": "1tMLvTrTWQE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1HMOit1lSk8",
"title": "Dec.",
"year": "2022",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1uFxo1ImlpK",
"doi": "10.1109/TVCG.2021.3091686",
"abstract": "Most commercially available optical see-through head-mounted displays (OST-HMDs) utilize optical combiners to simultaneously visualize the physical background and virtual objects. The displayed images perceived by users are a blend of rendered pixels and background colors. Enabling high fidelity color perception in mixed reality (MR) scenarios using OST-HMDs is an important but challenging task. We propose a real-time rendering scheme to enhance the color contrast between virtual objects and the surrounding background for OST-HMDs. Inspired by the discovery of color perception in psychophysics, we first formulate the color contrast enhancement as a constrained optimization problem. We then design an end-to-end algorithm to search the optimal complementary shift in both chromaticity and luminance of the displayed color. This aims at enhancing the contrast between virtual objects and the real background as well as keeping the consistency with the original displayed color. We assess the performance of our approach using a simulated OST-HMD environment and an off-the-shelf OST-HMD. Experimental results from objective evaluations and subjective user studies demonstrate that the proposed approach makes rendered virtual objects more distinguishable from the surrounding background, thereby bringing a better visual experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most commercially available optical see-through head-mounted displays (OST-HMDs) utilize optical combiners to simultaneously visualize the physical background and virtual objects. The displayed images perceived by users are a blend of rendered pixels and background colors. Enabling high fidelity color perception in mixed reality (MR) scenarios using OST-HMDs is an important but challenging task. We propose a real-time rendering scheme to enhance the color contrast between virtual objects and the surrounding background for OST-HMDs. Inspired by the discovery of color perception in psychophysics, we first formulate the color contrast enhancement as a constrained optimization problem. We then design an end-to-end algorithm to search the optimal complementary shift in both chromaticity and luminance of the displayed color. This aims at enhancing the contrast between virtual objects and the real background as well as keeping the consistency with the original displayed color. We assess the performance of our approach using a simulated OST-HMD environment and an off-the-shelf OST-HMD. Experimental results from objective evaluations and subjective user studies demonstrate that the proposed approach makes rendered virtual objects more distinguishable from the surrounding background, thereby bringing a better visual experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most commercially available optical see-through head-mounted displays (OST-HMDs) utilize optical combiners to simultaneously visualize the physical background and virtual objects. The displayed images perceived by users are a blend of rendered pixels and background colors. Enabling high fidelity color perception in mixed reality (MR) scenarios using OST-HMDs is an important but challenging task. We propose a real-time rendering scheme to enhance the color contrast between virtual objects and the surrounding background for OST-HMDs. Inspired by the discovery of color perception in psychophysics, we first formulate the color contrast enhancement as a constrained optimization problem. We then design an end-to-end algorithm to search the optimal complementary shift in both chromaticity and luminance of the displayed color. This aims at enhancing the contrast between virtual objects and the real background as well as keeping the consistency with the original displayed color. We assess the performance of our approach using a simulated OST-HMD environment and an off-the-shelf OST-HMD. Experimental results from objective evaluations and subjective user studies demonstrate that the proposed approach makes rendered virtual objects more distinguishable from the surrounding background, thereby bringing a better visual experience.",
"title": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"normalizedTitle": "Color Contrast Enhanced Rendering for Optical See-Through Head-Mounted Displays",
"fno": "09463728",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Image Colour Analysis",
"Image Enhancement",
"Rendering Computer Graphics",
"Virtual Reality",
"Background Colors",
"Color Contrast Enhanced Rendering",
"Color Contrast Enhancement",
"Constrained Optimization Problem",
"Displayed Images",
"End To End Algorithm",
"Head Mounted Displays",
"High Fidelity Color Perception",
"Important But Challenging Task",
"Mixed Reality Scenarios",
"Objective Evaluations",
"Off The Shelf OST HMD",
"Optical Combiners",
"Optimal Complementary Shift",
"Original Displayed Color",
"OST HM Ds",
"Physical Background",
"Real Time Rendering Scheme",
"Rendered Pixels",
"Simulated OST HMD Environment",
"Surrounding Background",
"Virtual Objects",
"Image Color Analysis",
"Augmented Reality",
"Optical Imaging",
"Rendering Computer Graphics",
"Visualization",
"Brightness",
"Color Blending",
"Color Perception",
"Human Visual System",
"Mixed Reality",
"Real Time Rendering",
"Post Processing Effect"
],
"authors": [
{
"givenName": "Yunjin",
"surname": "Zhang",
"fullName": "Yunjin Zhang",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rui",
"surname": "Wang",
"fullName": "Rui Wang",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yifan",
"surname": "Peng",
"fullName": "Yifan Peng",
"affiliation": "Electrical Engineering, Stanford University, Stanford, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wei",
"surname": "Hua",
"fullName": "Wei Hua",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hujun",
"surname": "Bao",
"fullName": "Hujun Bao",
"affiliation": "State Key Laboratory of CAD&CG, Zhejiang University, Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2022-12-01 00:00:00",
"pubType": "trans",
"pages": "4490-4502",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a202",
"title": "[POSTER] BrightView: Increasing Perceived Brightness in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a202/12OmNqI04YU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a130",
"title": "[POSTER] Two-Step Gamut Mapping for Optical See-Through Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a130/12OmNvA1hoC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948426",
"title": "SmartColor: Real-time color correction and contrast for optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948426/12OmNzaQoFo",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446441",
"title": "BrightView: Increasing Perceived Brightness of Optical See-Through Head-Mounted Displays Through Unnoticeable Incident Light Reduction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446441/13bd1sv5NxY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/11/07165643",
"title": "Semi-Parametric Color Reproduction Method for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/11/07165643/13rRUILtJzB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089640",
"title": "Effects of Dark Mode Graphics on Visual Acuity and Fatigue with Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089640/1jIxgdFEoqA",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09462502",
"articleId": "1uDSzIxXFwA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09465688",
"articleId": "1uIReC9hVQY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1HMOLInbXjy",
"name": "ttg202212-09463728s1-supp1-3091686.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202212-09463728s1-supp1-3091686.mp4",
"extension": "mp4",
"size": "50.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNqGA5hn",
"title": "Dec.",
"year": "2015",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgz4i",
"doi": "10.1109/TVCG.2015.2450717",
"abstract": "At present, photorealistic augmentation is not yet possible since the computational power of mobile devices is insufficient. Even streaming solutions from stationary PCs cause a latency that affects user interactions considerably. Therefore, we introduce a differential rendering method that allows for a consistent illumination of the inserted virtual objects on mobile devices, avoiding delays. The computation effort is shared between a stationary PC and the mobile devices to make use of the capacities available on both sides. The method is designed such that only a minimum amount of data has to be transferred asynchronously between the participants. This allows for an interactive illumination of virtual objects with a consistent appearance under both temporally and spatially varying real illumination conditions. To describe the complex near-field illumination in an indoor scenario, HDR video cameras are used to capture the illumination from multiple directions. In this way, sources of illumination can be considered that are not directly visible to the mobile device because of occlusions and the limited field of view. While our method focuses on Lambertian materials, we also provide some initial approaches to approximate non-diffuse virtual objects and thereby allow for a wider field of application at nearly the same cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "At present, photorealistic augmentation is not yet possible since the computational power of mobile devices is insufficient. Even streaming solutions from stationary PCs cause a latency that affects user interactions considerably. Therefore, we introduce a differential rendering method that allows for a consistent illumination of the inserted virtual objects on mobile devices, avoiding delays. The computation effort is shared between a stationary PC and the mobile devices to make use of the capacities available on both sides. The method is designed such that only a minimum amount of data has to be transferred asynchronously between the participants. This allows for an interactive illumination of virtual objects with a consistent appearance under both temporally and spatially varying real illumination conditions. To describe the complex near-field illumination in an indoor scenario, HDR video cameras are used to capture the illumination from multiple directions. In this way, sources of illumination can be considered that are not directly visible to the mobile device because of occlusions and the limited field of view. While our method focuses on Lambertian materials, we also provide some initial approaches to approximate non-diffuse virtual objects and thereby allow for a wider field of application at nearly the same cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "At present, photorealistic augmentation is not yet possible since the computational power of mobile devices is insufficient. Even streaming solutions from stationary PCs cause a latency that affects user interactions considerably. Therefore, we introduce a differential rendering method that allows for a consistent illumination of the inserted virtual objects on mobile devices, avoiding delays. The computation effort is shared between a stationary PC and the mobile devices to make use of the capacities available on both sides. The method is designed such that only a minimum amount of data has to be transferred asynchronously between the participants. This allows for an interactive illumination of virtual objects with a consistent appearance under both temporally and spatially varying real illumination conditions. To describe the complex near-field illumination in an indoor scenario, HDR video cameras are used to capture the illumination from multiple directions. In this way, sources of illumination can be considered that are not directly visible to the mobile device because of occlusions and the limited field of view. While our method focuses on Lambertian materials, we also provide some initial approaches to approximate non-diffuse virtual objects and thereby allow for a wider field of application at nearly the same cost.",
"title": "Interactive Near-Field Illumination for Photorealistic Augmented Reality with Varying Materials on Mobile Devices",
"normalizedTitle": "Interactive Near-Field Illumination for Photorealistic Augmented Reality with Varying Materials on Mobile Devices",
"fno": "07138641",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Lighting",
"Cameras",
"Mobile Handsets",
"Rendering Computer Graphics",
"Image Reconstruction",
"Light Sources",
"Geometry",
"Augmented And Virtual Realities",
"Computer Graphics",
"Three Dimensional Graphics And Realism",
"Augmented And Virtual Realities",
"Computer Graphics",
"Three Dimensional Graphics And Realism"
],
"authors": [
{
"givenName": "Kai",
"surname": "Rohmer",
"fullName": "Kai Rohmer",
"affiliation": "Computational Visualistics Group, University of Magdeburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Büschel",
"fullName": "Wolfgang Büschel",
"affiliation": "Interactive Media Lab, Technische Universität Dresden, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Raimund",
"surname": "Dachselt",
"fullName": "Raimund Dachselt",
"affiliation": "Interactive Media Lab, Technische Universität Dresden, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "Grosch",
"fullName": "Thorsten Grosch",
"affiliation": "Computational Visualistics Group, University of Magdeburg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2015-12-01 00:00:00",
"pubType": "trans",
"pages": "1349-1362",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948406",
"title": "Interactive near-field illumination for photorealistic augmented reality on mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948406/12OmNAGNCfe",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/worv/2013/5646/0/06521924",
"title": "Color-based detection robust to varying illumination spectrum",
"doi": null,
"abstractUrl": "/proceedings-article/worv/2013/06521924/12OmNqJq4EZ",
"parentPublication": {
"id": "proceedings/worv/2013/5646/0",
"title": "2013 IEEE Workshop on Robot Vision (WORV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2017/2089/0/2089a056",
"title": "Voxel-Based Interactive Rendering of Translucent Materials under Area Lights Using Sparse Samples",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2017/2089a056/12OmNvDqsQf",
"parentPublication": {
"id": "proceedings/cw/2017/2089/0",
"title": "2017 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726809",
"title": "A review on illumination techniques in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726809/12OmNwMFMfk",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2013/5050/0/5050a913",
"title": "Cartoon Rendering Illumination Model Based on Phong",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2013/5050a913/12OmNwoPtun",
"parentPublication": {
"id": "proceedings/icig/2013/5050/0",
"title": "2013 Seventh International Conference on Image and Graphics (ICIG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/05/ttg2013050749",
"title": "Interactive Rendering of Acquired Materials on Dynamic Geometry Using Frequency Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2013/05/ttg2013050749/13rRUyp7tWW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0/08276761",
"title": "Global Illumination of Dynamic 3D Scene Based on Light Transport Path Reusing",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2017/08276761/17D45WYQJam",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2017/3066/0",
"title": "2017 IEEE International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09904431",
"title": "Neural Global Illumination: Interactive Indirect Illumination Prediction under Dynamic Area Lights",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09904431/1H0GdxnVnws",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a189",
"title": "Deep Consistent Illumination in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a189/1gyslmCJMjK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i077",
"title": "Lighthouse: Predicting Lighting Volumes for Spatially-Coherent Illumination",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i077/1m3omNjwpW0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07138644",
"articleId": "13rRUwfZC0k",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07150416",
"articleId": "13rRUILLkDS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgOK",
"name": "ttg201512-07138641s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201512-07138641s1.zip",
"extension": "zip",
"size": "15.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAWYKCi",
"title": "May",
"year": "2018",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdIOUT",
"doi": "10.1109/TVCG.2017.2677445",
"abstract": "Specularities are often problematic in computer vision since they impact the dynamic range of the image intensity. A natural approach would be to predict and discard them using computer graphics models. However, these models depend on parameters which are difficult to estimate (light sources, objects’ material properties and camera). We present a geometric model called JOLIMAS: JOint LIght-MAterial Specularity, which predicts the shape of specularities. JOLIMAS is reconstructed from images of specularities observed on a planar surface. It implicitly includes light and material properties, which are intrinsic to specularities. This model was motivated by the observation that specularities have a conic shape on planar surfaces. The conic shape is obtained by projecting a fixed quadric on the planar surface. JOLIMAS thus predicts the specularity using a simple geometric approach with static parameters (object material and light source shape). It is adapted to indoor light sources such as light bulbs and fluorescent lamps. The prediction has been tested on synthetic and real sequences. It works in a multi-light context by reconstructing a quadric for each light source with special cases such as lights being switched on or off. We also used specularity prediction for dynamic retexturing and obtained convincing rendering results. Further results are presented as supplementary video material, which can be found on the Computer Society Digital Library at http://doi.ieeecomputersociety.org/10.1109/TVCG.2017.2677445.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Specularities are often problematic in computer vision since they impact the dynamic range of the image intensity. A natural approach would be to predict and discard them using computer graphics models. However, these models depend on parameters which are difficult to estimate (light sources, objects’ material properties and camera). We present a geometric model called JOLIMAS: JOint LIght-MAterial Specularity, which predicts the shape of specularities. JOLIMAS is reconstructed from images of specularities observed on a planar surface. It implicitly includes light and material properties, which are intrinsic to specularities. This model was motivated by the observation that specularities have a conic shape on planar surfaces. The conic shape is obtained by projecting a fixed quadric on the planar surface. JOLIMAS thus predicts the specularity using a simple geometric approach with static parameters (object material and light source shape). It is adapted to indoor light sources such as light bulbs and fluorescent lamps. The prediction has been tested on synthetic and real sequences. It works in a multi-light context by reconstructing a quadric for each light source with special cases such as lights being switched on or off. We also used specularity prediction for dynamic retexturing and obtained convincing rendering results. Further results are presented as supplementary video material, which can be found on the Computer Society Digital Library at http://doi.ieeecomputersociety.org/10.1109/TVCG.2017.2677445.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Specularities are often problematic in computer vision since they impact the dynamic range of the image intensity. A natural approach would be to predict and discard them using computer graphics models. However, these models depend on parameters which are difficult to estimate (light sources, objects’ material properties and camera). We present a geometric model called JOLIMAS: JOint LIght-MAterial Specularity, which predicts the shape of specularities. JOLIMAS is reconstructed from images of specularities observed on a planar surface. It implicitly includes light and material properties, which are intrinsic to specularities. This model was motivated by the observation that specularities have a conic shape on planar surfaces. The conic shape is obtained by projecting a fixed quadric on the planar surface. JOLIMAS thus predicts the specularity using a simple geometric approach with static parameters (object material and light source shape). It is adapted to indoor light sources such as light bulbs and fluorescent lamps. The prediction has been tested on synthetic and real sequences. It works in a multi-light context by reconstructing a quadric for each light source with special cases such as lights being switched on or off. We also used specularity prediction for dynamic retexturing and obtained convincing rendering results. Further results are presented as supplementary video material, which can be found on the Computer Society Digital Library at http://doi.ieeecomputersociety.org/10.1109/TVCG.2017.2677445.",
"title": "A Geometric Model for Specularity Prediction on Planar Surfaces with Multiple Light Sources",
"normalizedTitle": "A Geometric Model for Specularity Prediction on Planar Surfaces with Multiple Light Sources",
"fno": "07869421",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Light Sources",
"Image Reconstruction",
"Shape",
"Computational Modeling",
"Surface Reconstruction",
"Predictive Models",
"Cameras",
"JOLIMAS",
"Specular Reflection",
"Multiple Light Sources",
"Phong",
"Blinn Phong",
"Specularity",
"Prediction",
"Retexturing",
"Quadric",
"Dual Space",
"Conic",
"Real Time"
],
"authors": [
{
"givenName": "Alexandre",
"surname": "Morgand",
"fullName": "Alexandre Morgand",
"affiliation": "CEA, LIST, Gif-sur-Yvette, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mohamed",
"surname": "Tamaazousti",
"fullName": "Mohamed Tamaazousti",
"affiliation": "CEA, LIST, Gif-sur-Yvette, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Adrien",
"surname": "Bartoli",
"fullName": "Adrien Bartoli",
"affiliation": "IP, Clermont-Ferrand, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2018-05-01 00:00:00",
"pubType": "trans",
"pages": "1691-1704",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2017/0733/0/0733b735",
"title": "Surface Normal Reconstruction from Specular Information in Light Field Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402544",
"title": "Real-time surface light-field capture for augmentation of planar specular surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402544/12OmNASILPn",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a044",
"title": "An Empirical Model for Specularity Prediction with Application to Dynamic Retexturing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a044/12OmNCd2rxc",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2018/2526/0/08368465",
"title": "Near-light photometric stereo using circularly placed point light sources",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368465/12OmNqBbHSi",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2013/4983/0/4983a356",
"title": "Real-Time Specularity Detection Using Unnormalized Wiener Entropy",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2013/4983a356/12OmNvDqsPj",
"parentPublication": {
"id": "proceedings/crv/2013/4983/0",
"title": "2013 International Conference on Computer and Robot Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2014/7000/1/7000a115",
"title": "Close-Range Photometric Stereo with Point Light Sources",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2014/7000a115/12OmNx3ZjoX",
"parentPublication": {
"id": "proceedings/3dv/2014/7000/2",
"title": "2014 2nd International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220132",
"title": "Inspecting specular lobe objects using four light sources",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220132/12OmNzd7bWl",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2016/07/07274730",
"title": "The Information Available to a Moving Observer on Shape with Unknown, Isotropic BRDFs",
"doi": null,
"abstractUrl": "/journal/tp/2016/07/07274730/13rRUB6Sq1G",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08007318",
"title": "A Multiple-View Geometric Model of Specularities on Non-Planar Shapes with Application to Dynamic Retexturing",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08007318/13rRUxOve9O",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/07/09064908",
"title": "Shape and Reflectance Reconstruction Using Concentric Multi-Spectral Light Field",
"doi": null,
"abstractUrl": "/journal/tp/2020/07/09064908/1iZGtGUiMhO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "07903744",
"articleId": "13rRUwbJD4R",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgzk",
"name": "ttg201805-07869421s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg201805-07869421s1.zip",
"extension": "zip",
"size": "56.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvGPE8n",
"title": "Jan.",
"year": "2016",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIIVlkl",
"doi": "10.1109/TVCG.2015.2469111",
"abstract": null,
"abstracts": [],
"normalizedAbstract": null,
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "07307929",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2016-01-01 00:00:00",
"pubType": "trans",
"pages": "x-x",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "mags/co/1986/01/01663026",
"title": "Message: From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/magazine/co/1986/01/01663026/13rRUILLkGt",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg20111200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg20111200ix/13rRUwjoNx0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2017/01/07870827",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2017/01/07870827/13rRUxE04mk",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08165928",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08165928/13rRUxly8T3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg201006000x",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg201006000x/13rRUytF41v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2020/04/09280500",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2020/04/09280500/1pg8LOZjN28",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07308133",
"articleId": "13rRUxE04tE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07307931",
"articleId": "13rRUIJcWlp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwJPMX5",
"title": "Dec.",
"year": "2011",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjoNx0",
"doi": "10.1109/TVCG.2011.221",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "ttg20111200ix",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Ming Lin",
"surname": "Lin",
"fullName": "Ming Lin Lin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2011-12-01 00:00:00",
"pubType": "trans",
"pages": "ix-ix",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2016/01/07307929",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07307929/13rRUIIVlkl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/1986/01/01663026",
"title": "Message: From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/magazine/co/1986/01/01663026/13rRUILLkGt",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2017/01/07870827",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2017/01/07870827/13rRUxE04mk",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08165928",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08165928/13rRUxly8T3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg201006000x",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg201006000x/13rRUytF41v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2020/04/09280500",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2020/04/09280500/1pg8LOZjN28",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201112iii",
"articleId": "13rRUwdIOUG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201112000x",
"articleId": "13rRUxlgxTh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz5apx8",
"title": "April",
"year": "2015",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5no",
"doi": "10.1109/TVCG.2015.2399593",
"abstract": "Presents the message from the Editor-in-Chief for this issue of the publication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the message from the Editor-in-Chief for this issue of the publication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the message from the Editor-in-Chief for this issue of the publication.",
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "07064831",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2015-04-01 00:00:00",
"pubType": "trans",
"pages": "v-v",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2018/01/08306211",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2018/01/08306211/13rRUxYrbQj",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2003/01/w1004",
"title": "From the Editor in Chief: Weather Forecast",
"doi": null,
"abstractUrl": "/magazine/ic/2003/01/w1004/13rRUxly91S",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09991004",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09991004/1J9y5CM0PzW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/09016331",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/09016331/1hQmFe10XqU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/sp/2021/01/09336071",
"title": "Message from IEEE S&P's Outgoing Editor in Chief",
"doi": null,
"abstractUrl": "/magazine/sp/2021/01/09336071/1qHMUGaLGCY",
"parentPublication": {
"id": "mags/sp",
"title": "IEEE Security & Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09340110",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09340110/1qMJUCAU2YM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2021/01/09372372",
"title": "Editorial from the New Editor in Chief",
"doi": null,
"abstractUrl": "/journal/ec/2021/01/09372372/1rNPKvWrRu0",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/sp/2021/05/09529259",
"title": "Updates From IEEE Security & Privacy's Editor in Chief Sean Peisert",
"doi": null,
"abstractUrl": "/magazine/sp/2021/05/09529259/1wB2BeLxUbe",
"parentPublication": {
"id": "mags/sp",
"title": "IEEE Security & Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07064827",
"articleId": "13rRUNvgyWq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07064835",
"articleId": "13rRUwh80uB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvDI3IP",
"title": "Jan.-March",
"year": "2017",
"issueNum": "01",
"idPrefix": "ec",
"pubType": "journal",
"volume": "5",
"label": "Jan.-March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxE04mk",
"doi": "10.1109/TETC.2017.2649406",
"abstract": null,
"abstracts": [],
"normalizedAbstract": null,
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "07870827",
"hasPdf": true,
"idPrefix": "ec",
"keywords": [],
"authors": [
{
"givenName": "Fabrizio",
"surname": "Lombardi",
"fullName": "Fabrizio Lombardi",
"affiliation": "Boston, MA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2017-01-01 00:00:00",
"pubType": "trans",
"pages": "3-4",
"year": "2017",
"issn": "2168-6750",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/01/07747755",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07747755/13rRUwI5Ugh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2018/01/08306211",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2018/01/08306211/13rRUxYrbQj",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/09016331",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/09016331/1hQmFe10XqU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09340110",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09340110/1qMJUCAU2YM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07869454",
"articleId": "13rRUxYINcl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07869427",
"articleId": "13rRUxN5exl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCaLEju",
"title": "Jan.",
"year": "2018",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "24",
"label": "Jan.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly8T3",
"doi": "10.1109/TVCG.2017.2752499",
"abstract": null,
"abstracts": [],
"normalizedAbstract": null,
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "08165928",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Leila",
"surname": "De Floriani",
"fullName": "Leila De Floriani",
"affiliation": "University of Maryland at College Park",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "01",
"pubDate": "2018-01-01 00:00:00",
"pubType": "trans",
"pages": "x-x",
"year": "2018",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2017/01/07747755",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07747755/13rRUwI5Ugh",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935055",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935055/13rRUwh80He",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/11/08053887",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2017/11/08053887/13rRUxBa56a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2018/01/08306211",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2018/01/08306211/13rRUxYrbQj",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927195",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927195/1HGJm87UJvq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/09016331",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/09016331/1hQmFe10XqU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09340110",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09340110/1qMJUCAU2YM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08165931",
"articleId": "13rRUyeTVi8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08167064",
"articleId": "13rRUwgQpqO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAnuTvo",
"title": "Nov.",
"year": "2016",
"issueNum": "11",
"idPrefix": "tg",
"pubType": "journal",
"volume": "22",
"label": "Nov.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyp7tX0",
"doi": "10.1109/TVCG.2016.2603558",
"abstract": "Presents the introductory editorial for this issue of the publication.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the introductory editorial for this issue of the publication.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the introductory editorial for this issue of the publication.",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"fno": "07572705",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Leila",
"surname": "De Floriani",
"fullName": "Leila De Floriani",
"affiliation": "EIC IEEE TVCG, University of Genova",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "AEIC IEEE TVCG, University of Graz",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "11",
"pubDate": "2016-11-01 00:00:00",
"pubType": "trans",
"pages": "i",
"year": "2016",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/ec/2018/01/08306211",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2018/01/08306211/13rRUxYrbQj",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2018/01/08249710",
"title": "Editorial from the New Editor in Chief",
"doi": null,
"abstractUrl": "/journal/ts/2018/01/08249710/13rRUygT7uv",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08570933",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08570933/17D45Xq6dDd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2019/01/08657703",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2019/01/08657703/187Y4PTVwtO",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08925350",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08925350/1fvZm5VNd1C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2020/01/09016414",
"title": "Message from the Incoming Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/bd/2020/01/09016414/1hN4dOpVujC",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/09016331",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/09016331/1hQmFe10XqU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2020/04/09280500",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2020/04/09280500/1pg8LOZjN28",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2021/01/09280502",
"title": "A Message From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tm/2021/01/09280502/1phNuKZblgk",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09340110",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09340110/1qMJUCAU2YM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "07523411",
"articleId": "13rRUwjXZSi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzFdtc6",
"title": "November/December",
"year": "2010",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "November/December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUytF41v",
"doi": "10.1109/TVCG.2010.178",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Message from the Editor-in-Chief",
"normalizedTitle": "Message from the Editor-in-Chief",
"fno": "ttg201006000x",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Thomas",
"surname": "Ertl",
"fullName": "Thomas Ertl",
"affiliation": "INRIA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2010-11-01 00:00:00",
"pubType": "trans",
"pages": "x-x",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2016/01/07307929",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07307929/13rRUIIVlkl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/1986/01/01663026",
"title": "Message: From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/magazine/co/1986/01/01663026/13rRUILLkGt",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg20111200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg20111200ix/13rRUwjoNx0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2017/01/07870827",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2017/01/07870827/13rRUxE04mk",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08165928",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08165928/13rRUxly8T3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07572705",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07572705/13rRUyp7tX0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855103",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855103/1dNHm0Dq8lG",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254193",
"title": "Message from the Editor-in-Chief and from the Associate Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254193/1oDXLUaRaDK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2020/04/09280500",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2020/04/09280500/1pg8LOZjN28",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201006000i",
"articleId": "13rRUwwaKt2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20100600xi",
"articleId": "13rRUxASuAs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILtJma",
"doi": "10.1109/TVCG.2013.54",
"abstract": "The apers in this special issue were presented at the 2013 IEEE Virtual Reality Conference held March 16-20, 2013, in Orlando, Florida.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The apers in this special issue were presented at the 2013 IEEE Virtual Reality Conference held March 16-20, 2013, in Orlando, Florida.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The apers in this special issue were presented at the 2013 IEEE Virtual Reality Conference held March 16-20, 2013, in Orlando, Florida.",
"title": "Message from the Paper Chairs and Guest Editors",
"normalizedTitle": "Message from the Paper Chairs and Guest Editors",
"fno": "ttg2013040000vi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Computer Graphics",
"Visualization"
],
"authors": [
{
"givenName": "Sabine",
"surname": "Coquillart",
"fullName": "Sabine Coquillart",
"affiliation": "INRIA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Joseph J.",
"surname": "LaViola",
"fullName": "Joseph J. LaViola",
"affiliation": "University of Central Florida, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "vi",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/12/ttg20131200xi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg20131200xi/13rRUNvyaf0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/07/06748102",
"title": "Guest Editor' Introduction: Special Issue on the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2013",
"doi": null,
"abstractUrl": "/journal/tg/2014/07/06748102/13rRUwInvyy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064835",
"title": "Message from the VR Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064835/13rRUwh80uB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/08/06847259",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium",
"doi": null,
"abstractUrl": "/journal/tg/2014/08/06847259/13rRUxD9gXJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405530",
"title": "Message from the Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405530/1sP1eDRuGMU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000v",
"articleId": "13rRUwbs20U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000vii",
"articleId": "13rRUx0xPIG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBhpS2B",
"title": "April",
"year": "2014",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5Ug9",
"doi": "10.1109/TVCG.2014.32",
"abstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the long papers from the IEEE Virtual Reality Conference 2014 (IEEE VR 2014), held March 29–April 2, 2014 in Minneapolis, Minnesota, USA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the long papers from the IEEE Virtual Reality Conference 2014 (IEEE VR 2014), held March 29–April 2, 2014 in Minneapolis, Minnesota, USA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this special issue of IEEE Transactions on Visualization and Computer Graphics (TVCG), we are pleased to present the long papers from the IEEE Virtual Reality Conference 2014 (IEEE VR 2014), held March 29–April 2, 2014 in Minneapolis, Minnesota, USA.",
"title": "Message from the Paper Chairs and Guest Editors",
"normalizedTitle": "Message from the Paper Chairs and Guest Editors",
"fno": "ttg2014040vi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Computer Graphics",
"V Isualization",
"Virtual Reality"
],
"authors": [
{
"givenName": "Sabine",
"surname": "Coquillart",
"fullName": "Sabine Coquillart",
"affiliation": "INRIA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"fullName": "Kiyoshi Kiyokawa",
"affiliation": "Osaka University, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. Edward",
"surname": "Swan",
"fullName": "J. Edward Swan",
"affiliation": "Mississippi State University, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug",
"surname": "Bowman",
"fullName": "Doug Bowman",
"affiliation": "Virginia Tech, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2014-04-01 00:00:00",
"pubType": "trans",
"pages": "vi-vi",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064835",
"title": "Message from the VR Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064835/13rRUwh80uB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg20140400v",
"articleId": "13rRUxjQybT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201404vii",
"articleId": "13rRUwInvf6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz5apx8",
"title": "April",
"year": "2015",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwh80uB",
"doi": "10.1109/TVCG.2015.2399595",
"abstract": "The 13 papers in this special issue were presented at the IEEE Virtual Reality Conference 2015 (IEEE VR 2015) that was held March 23–27, 2015 in Arles, France.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The 13 papers in this special issue were presented at the IEEE Virtual Reality Conference 2015 (IEEE VR 2015) that was held March 23–27, 2015 in Arles, France.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The 13 papers in this special issue were presented at the IEEE Virtual Reality Conference 2015 (IEEE VR 2015) that was held March 23–27, 2015 in Arles, France.",
"title": "Message from the VR Program Chairs and Guest Editors",
"normalizedTitle": "Message from the VR Program Chairs and Guest Editors",
"fno": "07064835",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Virtual Reality"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2015-04-01 00:00:00",
"pubType": "trans",
"pages": "vi-vi",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405530",
"title": "Message from the Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405530/1sP1eDRuGMU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07064831",
"articleId": "13rRUxBa5no",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07064818",
"articleId": "13rRUxYrbMk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwpGgK8",
"title": "Dec.",
"year": "2014",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa564",
"doi": "10.1109/TVCG.2014.2346661",
"abstract": "The papers in this special issue were presented at the Proceedings of IEEE VIS 2014, held during November 9-14, 2014, in Paris, France. VIS consists of three conferences, held concurrently: the IEEE Visual Analytics Science and Technology Conference (VAST 2014), the IEEE Information Visualization Conference (InfoVis 2014), and the IEEE Scientific Visualization Conference (SciVis 2014). Visualization continues to develop rapidly as a research discipline and the three conferences are maintaining their positions as the leading annual events for researchers and practitioners to share the most innovative and impactful results of an increasingly diverse and influential community.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The papers in this special issue were presented at the Proceedings of IEEE VIS 2014, held during November 9-14, 2014, in Paris, France. VIS consists of three conferences, held concurrently: the IEEE Visual Analytics Science and Technology Conference (VAST 2014), the IEEE Information Visualization Conference (InfoVis 2014), and the IEEE Scientific Visualization Conference (SciVis 2014). Visualization continues to develop rapidly as a research discipline and the three conferences are maintaining their positions as the leading annual events for researchers and practitioners to share the most innovative and impactful results of an increasingly diverse and influential community.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The papers in this special issue were presented at the Proceedings of IEEE VIS 2014, held during November 9-14, 2014, in Paris, France. VIS consists of three conferences, held concurrently: the IEEE Visual Analytics Science and Technology Conference (VAST 2014), the IEEE Information Visualization Conference (InfoVis 2014), and the IEEE Scientific Visualization Conference (SciVis 2014). Visualization continues to develop rapidly as a research discipline and the three conferences are maintaining their positions as the leading annual events for researchers and practitioners to share the most innovative and impactful results of an increasingly diverse and influential community.",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"normalizedTitle": "Message from the VIS Paper Chairs and Guest Editors",
"fno": "06935059",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Visualization"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2014-12-01 00:00:00",
"pubType": "trans",
"pages": "xi-xiv",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/08/07138667",
"title": "Guest Editors’ Introduction: Special Section on the IEEE Pacific Visualization Symposium 2014",
"doi": null,
"abstractUrl": "/journal/tg/2015/08/07138667/13rRUwI5Ugf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064835",
"title": "Message from the VR Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064835/13rRUwh80uB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08703194",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2019",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08703194/19Er7j5Ad7a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/06/09766260",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2022",
"doi": null,
"abstractUrl": "/journal/tg/2022/06/09766260/1D34QjpFGyQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/09430173",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2021",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/09430173/1tzuiF6azcs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "06935055",
"articleId": "13rRUwh80He",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06935062",
"articleId": "13rRUwj7cpd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYIN49",
"doi": "10.1109/TVCG.2012.247",
"abstract": "This special issue includes papers that were presented at the IEEE Scientific Visualization Conference 2012 (SciVis 2012) and the IEEE Information Visualization Conference 2012 (InfoVis 2012), held together at IEEE VisWeek from 14-19 October 2012 in Seattle, WA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This special issue includes papers that were presented at the IEEE Scientific Visualization Conference 2012 (SciVis 2012) and the IEEE Information Visualization Conference 2012 (InfoVis 2012), held together at IEEE VisWeek from 14-19 October 2012 in Seattle, WA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This special issue includes papers that were presented at the IEEE Scientific Visualization Conference 2012 (SciVis 2012) and the IEEE Information Visualization Conference 2012 (InfoVis 2012), held together at IEEE VisWeek from 14-19 October 2012 in Seattle, WA.",
"title": "Message from the Paper Chairs and Guest Editors",
"normalizedTitle": "Message from the Paper Chairs and Guest Editors",
"fno": "ttg201212000x",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Visualization"
],
"authors": [
{
"givenName": "Jason",
"surname": "Dykes",
"fullName": "Jason Dykes",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Laidlaw",
"fullName": "David Laidlaw",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Giuseppe",
"surname": "Santucci",
"fullName": "Giuseppe Santucci",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerik",
"surname": "Scheuermann",
"fullName": "Gerik Scheuermann",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthew",
"surname": "Ward",
"fullName": "Matthew Ward",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "Weaver",
"fullName": "Chris Weaver",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "x-xii",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/06/ttg2013060898",
"title": "Guest Editors' Introduction: Special Section on the IEEE Pacific Visualization Symposium 2012",
"doi": null,
"abstractUrl": "/journal/tg/2013/06/ttg2013060898/13rRUNvgziD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg20131200xi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg20131200xi/13rRUNvyaf0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064835",
"title": "Message from the VR Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064835/13rRUwh80uB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405530",
"title": "Message from the Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405530/1sP1eDRuGMU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg20121200ix",
"articleId": "13rRUwIF69i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201212xiii",
"articleId": "13rRUygBwhI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1sP18ke9Y64",
"title": "May",
"year": "2021",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "27",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1sP1eDRuGMU",
"doi": "10.1109/TVCG.2021.3067835",
"abstract": "The papers in this special section were presented at the 28th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2021), that was held virtually March 27–April 3, 2020, in Lisbon, Portugal. ",
"abstracts": [
{
"abstractType": "Regular",
"content": "The papers in this special section were presented at the 28th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2021), that was held virtually March 27–April 3, 2020, in Lisbon, Portugal. ",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The papers in this special section were presented at the 28th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR 2021), that was held virtually March 27–April 3, 2020, in Lisbon, Portugal. ",
"title": "Message from the Program Chairs and Guest Editors",
"normalizedTitle": "Message from the Program Chairs and Guest Editors",
"fno": "09405530",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Virtual Reality",
"User Interfaces"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "05",
"pubDate": "2021-05-01 00:00:00",
"pubType": "trans",
"pages": "v-v",
"year": "2021",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064835",
"title": "Message from the VR Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064835/13rRUwh80uB",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/06/08703194",
"title": "Guest Editors' Introduction: Special Section on IEEE PacificVis 2019",
"doi": null,
"abstractUrl": "/journal/tg/2019/06/08703194/19Er7j5Ad7a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09927176",
"title": "Message from the ISMAR 2022 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09927176/1HGJ8mlD3S8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/06/09082802",
"title": "Guest Editors’ Introduction: Special Section on IEEE PacificVis 2020",
"doi": null,
"abstractUrl": "/journal/tg/2020/06/09082802/1jrTVLo1tpC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09254194",
"title": "Message from the ISMAR 2020 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09254194/1oDXMHvn1aU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09591492",
"title": "Message from the ISMAR 2021 Science and Technology Journal Program Chairs and TVCG Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09591492/1y2FvGMxBuM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09405571",
"articleId": "1sP18PmVuQU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09405518",
"articleId": "1sP18FjRYli",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzFdtc6",
"title": "November/December",
"year": "2010",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "November/December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0gefi",
"doi": "10.1109/TVCG.2010.219",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "VisWeek Capstone Address",
"normalizedTitle": "VisWeek Capstone Address",
"fno": "ttg20100600xxv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Alexander S.",
"surname": "Szalay",
"fullName": "Alexander S. Szalay",
"affiliation": "The Johns Hopkins University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "06",
"pubDate": "2010-11-01 00:00:00",
"pubType": "trans",
"pages": "xxv-xxvi",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg20100600xxiv",
"articleId": "13rRUILtJzs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2010060881",
"articleId": "13rRUxjyX3S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyY294C",
"doi": "10.1109/TVCG.2012.289",
"abstract": "Summary form only given, as follows. \"Help Me See! Some Thoughts From a Potential User.\" What you are doing as visualization researchers and developers is critical and, in fact, your role is more important than ever in this age of massive data. I and many others desperately want to use your work, but sometimes I just cannot seem to wrap my head around what you are showing - even if it really looks cool. Cool doesn't cut it for me. This talk will give examples from my own successes and failures in photography and graphics and suggest, with a little imagination and open minds, there might be some lessons learned from my own commitment to delving into and communicating information. A brief professional biography the presenter is included.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given, as follows. \"Help Me See! Some Thoughts From a Potential User.\" What you are doing as visualization researchers and developers is critical and, in fact, your role is more important than ever in this age of massive data. I and many others desperately want to use your work, but sometimes I just cannot seem to wrap my head around what you are showing - even if it really looks cool. Cool doesn't cut it for me. This talk will give examples from my own successes and failures in photography and graphics and suggest, with a little imagination and open minds, there might be some lessons learned from my own commitment to delving into and communicating information. A brief professional biography the presenter is included.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given, as follows. \"Help Me See! Some Thoughts From a Potential User.\" What you are doing as visualization researchers and developers is critical and, in fact, your role is more important than ever in this age of massive data. I and many others desperately want to use your work, but sometimes I just cannot seem to wrap my head around what you are showing - even if it really looks cool. Cool doesn't cut it for me. This talk will give examples from my own successes and failures in photography and graphics and suggest, with a little imagination and open minds, there might be some lessons learned from my own commitment to delving into and communicating information. A brief professional biography the presenter is included.",
"title": "VisWeek 2012 Capstone Speaker",
"normalizedTitle": "VisWeek 2012 Capstone Speaker",
"fno": "ttg201212xxii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Felice",
"surname": "Frankel",
"fullName": "Felice Frankel",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "xxii",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cool-chips/2016/1386/0/07503663",
"title": "Message from the organizing committee chair",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2016/07503663/12OmNCesr1k",
"parentPublication": {
"id": "proceedings/cool-chips/2016/1386/0",
"title": "2016 IEEE Symposium in Low-Power and High-Speed Chips (COOL CHIPS XIX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cool-chips/2012/1202/0/06216571",
"title": "Message from the organizing committee chair",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2012/06216571/12OmNrkBwwy",
"parentPublication": {
"id": "proceedings/cool-chips/2012/1202/0",
"title": "2012 IEEE COOL Chips XV",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cool-chips/2014/3810/0/06842939",
"title": "Message from the organizing committee chair",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2014/06842939/12OmNvAiSak",
"parentPublication": {
"id": "proceedings/cool-chips/2014/3810/0",
"title": "2014 IEEE COOL Chips XVII (COOL Chips)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cool-chips/2017/3828/0/07946368",
"title": "Message from the organizing committee chair",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2017/07946368/12OmNvk7JSu",
"parentPublication": {
"id": "proceedings/cool-chips/2017/3828/0",
"title": "2017 IEEE Symposium in Low-Power and High-Speed Chips (COOL CHIPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cool-chips/2015/7325/0/07158524",
"title": "Message from the organizing committee chair",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2015/07158524/12OmNzZEADw",
"parentPublication": {
"id": "proceedings/cool-chips/2015/7325/0",
"title": "2015 IEEE Symposium in Low-Power and High-Speed Chips (COOL CHIPS XVIII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2011/02/mmu2011020012",
"title": "Web-Scale Multimedia Analysis: Does Content Matter?",
"doi": null,
"abstractUrl": "/magazine/mu/2011/02/mmu2011020012/13rRUEgs2yK",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012120xxi",
"title": "VisWeek 2012 Keynote Speaker",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012120xxi/13rRUILc8fb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2014/02/mso2014020014",
"title": "A Final Word about Stories",
"doi": null,
"abstractUrl": "/magazine/so/2014/02/mso2014020014/13rRUwh80B2",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cool-chips/2022/1989/0/09772698",
"title": "Message from the Organizing Committee Chair",
"doi": null,
"abstractUrl": "/proceedings-article/cool-chips/2022/09772698/1Dqll7k6Lm0",
"parentPublication": {
"id": "proceedings/cool-chips/2022/1989/0",
"title": "2022 IEEE Symposium in Low-Power and High-Speed Chips (COOL CHIPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012120xxi",
"articleId": "13rRUILc8fb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122005",
"articleId": "13rRUx0xPmY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygBwhJ",
"doi": "10.1109/TVCG.2013.216",
"abstract": "Summary form only given. In the past decades many new techniques have been developed to visualize and interact with abstract data, but also, many challenges remain. In my talk I will reflect on how to make progress in our field: how to identify interesting problems and next how to find effective solutions. I will begin with an attempt to identify characteristics of interesting problems, and discuss windows of opportunity for data, tasks, and users. Some problems have been solved, some are too hard to deal with, what is the range we should aim at? And what impact can be obtained? Next, I discuss strategies and approaches for finding novel solutions, such as combining existing approaches and finding inspiration in other disciplines, including art and design. This talk is based on lessons we learned while developing new techniques, and will be illustrated with a variety of cases and demos from our group at TU/e, showing successes and failures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given. In the past decades many new techniques have been developed to visualize and interact with abstract data, but also, many challenges remain. In my talk I will reflect on how to make progress in our field: how to identify interesting problems and next how to find effective solutions. I will begin with an attempt to identify characteristics of interesting problems, and discuss windows of opportunity for data, tasks, and users. Some problems have been solved, some are too hard to deal with, what is the range we should aim at? And what impact can be obtained? Next, I discuss strategies and approaches for finding novel solutions, such as combining existing approaches and finding inspiration in other disciplines, including art and design. This talk is based on lessons we learned while developing new techniques, and will be illustrated with a variety of cases and demos from our group at TU/e, showing successes and failures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given. In the past decades many new techniques have been developed to visualize and interact with abstract data, but also, many challenges remain. In my talk I will reflect on how to make progress in our field: how to identify interesting problems and next how to find effective solutions. I will begin with an attempt to identify characteristics of interesting problems, and discuss windows of opportunity for data, tasks, and users. Some problems have been solved, some are too hard to deal with, what is the range we should aim at? And what impact can be obtained? Next, I discuss strategies and approaches for finding novel solutions, such as combining existing approaches and finding inspiration in other disciplines, including art and design. This talk is based on lessons we learned while developing new techniques, and will be illustrated with a variety of cases and demos from our group at TU/e, showing successes and failures.",
"title": "VIS 2013 Capstone Speaker: Information Visualization: Challenges and Opportunities",
"normalizedTitle": "VIS 2013 Capstone Speaker: Information Visualization: Challenges and Opportunities",
"fno": "ttg201312xxviii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Jarke",
"surname": "van Wijk",
"fullName": "Jarke van Wijk",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxviii-xxviii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2015/9783/0/07347623",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0",
"parentPublication": {
"id": "proceedings/vast/2015/9783/0",
"title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2015/9785/0/07429483",
"title": "VIS capstone address: Architectures physical and digital",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2015/07429483/12OmNx6Piuq",
"parentPublication": {
"id": "proceedings/scivis/2015/9785/0",
"title": "2015 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883505",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504680",
"title": "Capstone speaker: Agents? Seriously",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504680/12OmNzV70mm",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460022",
"title": "Keynote speaker: Getting real",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460022/12OmNzt0IxZ",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg201312xxvii",
"title": "VIS 2013 Keynote Speaker: Erez Lieberman Aiden [biography]",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg201312xxvii/13rRUILLkDQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212xxii",
"title": "VisWeek 2012 Capstone Speaker",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212xxii/13rRUyY294C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500z025",
"title": "Keynote Speaker: Digital Humans in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500z025/1MNgtJP55y8",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2018/6861/0/08802482",
"title": "VIS Capstone Address: Can I believe what I see?-Information theoretic algorithm validation",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2018/08802482/1cJ6WL6h2iA",
"parentPublication": {
"id": "proceedings/vast/2018/6861/0",
"title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2020/4716/0/09156201",
"title": "Invited Talk: Software Engineering, AI and autonomous vehicles: Security assurance",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2020/09156201/1m1jBbaFOes",
"parentPublication": {
"id": "proceedings/percom-workshops/2020/4716/0",
"title": "2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xxvii",
"articleId": "13rRUILLkDQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013121962",
"articleId": "13rRUwhHcJg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwJPMX5",
"title": "Dec.",
"year": "2011",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "17",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUynHuj6",
"doi": "10.1109/TVCG.2011.256",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "VisWeek Capstone Address",
"normalizedTitle": "VisWeek Capstone Address",
"fno": "ttg201112xxiv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Amanda",
"surname": "Cox",
"fullName": "Amanda Cox",
"affiliation": "New York Times",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2011-12-01 00:00:00",
"pubType": "trans",
"pages": "xxiv-xxiv",
"year": "2011",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2015/9783/0/07347623",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0",
"parentPublication": {
"id": "proceedings/vast/2015/9783/0",
"title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/1998/9093/0/9093xii",
"title": "Capstone Address",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/1998/9093xii/12OmNBhZ4rc",
"parentPublication": {
"id": "proceedings/ieee-infovis/1998/9093/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2015/9785/0/07429483",
"title": "VIS capstone address: Architectures physical and digital",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2015/07429483/12OmNx6Piuq",
"parentPublication": {
"id": "proceedings/scivis/2015/9785/0",
"title": "2015 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883505",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg20100600xxiv",
"title": "VisWeek Keynote Address",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg20100600xxiv/13rRUILtJzs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg20100600xxv",
"title": "VisWeek Capstone Address",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg20100600xxv/13rRUx0gefi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg201112xxiii",
"title": "VisWeek Keynote Address",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg201112xxiii/13rRUy2YLYs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/05/ttg2009050880",
"title": "VisWeek 09",
"doi": null,
"abstractUrl": "/journal/tg/2009/05/ttg2009050880/13rRUyYjK5g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2018/6882/0/08823613",
"title": "SciVis 2018 Capstone Address",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2018/08823613/1d5kwNLsKhW",
"parentPublication": {
"id": "proceedings/scivis/2018/6882/0",
"title": "2018 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201112xxiii",
"articleId": "13rRUy2YLYs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2011121737",
"articleId": "13rRUwgyOjj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNARAncZ",
"title": "March/April",
"year": "2005",
"issueNum": "02",
"idPrefix": "it",
"pubType": "magazine",
"volume": "7",
"label": "March/April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgarxx",
"doi": "10.1109/MITP.2005.47",
"abstract": "The idea of abstracted, well-defined, and ubiquitously invokable services replacing proprietary interprocess communications has been a goal of system designers for a long time. The rise of Web services has led to a lot of misconceptions about how they can and cannot support the Holy Grail of a service-oriented architecture (SOA). This article seeks to put Web services in perspective, explaining their current capabilities and what industry can expect from them in the near term. It gives an overview of how technologies such as the Extensible Markup Language (XML), XML schemas, Extensible Stylesheet Language Transformations (XSLT), the Simple Object Access Protocol (SOAP), the Web Services Description Language (WSDL), and universal description, discovery, and integration (UDDI) fit into the equation for an SOA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The idea of abstracted, well-defined, and ubiquitously invokable services replacing proprietary interprocess communications has been a goal of system designers for a long time. The rise of Web services has led to a lot of misconceptions about how they can and cannot support the Holy Grail of a service-oriented architecture (SOA). This article seeks to put Web services in perspective, explaining their current capabilities and what industry can expect from them in the near term. It gives an overview of how technologies such as the Extensible Markup Language (XML), XML schemas, Extensible Stylesheet Language Transformations (XSLT), the Simple Object Access Protocol (SOAP), the Web Services Description Language (WSDL), and universal description, discovery, and integration (UDDI) fit into the equation for an SOA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The idea of abstracted, well-defined, and ubiquitously invokable services replacing proprietary interprocess communications has been a goal of system designers for a long time. The rise of Web services has led to a lot of misconceptions about how they can and cannot support the Holy Grail of a service-oriented architecture (SOA). This article seeks to put Web services in perspective, explaining their current capabilities and what industry can expect from them in the near term. It gives an overview of how technologies such as the Extensible Markup Language (XML), XML schemas, Extensible Stylesheet Language Transformations (XSLT), the Simple Object Access Protocol (SOAP), the Web Services Description Language (WSDL), and universal description, discovery, and integration (UDDI) fit into the equation for an SOA.",
"title": "Web Services: What's Real and What's Not?",
"normalizedTitle": "Web Services: What's Real and What's Not?",
"fno": "f2014",
"hasPdf": true,
"idPrefix": "it",
"keywords": [
"Extensible Markup Language",
"XML",
"XML Schemas",
"Extensible Stylesheet Language Transformations",
"XSLT",
"Simple Object Access Protocol",
"SOAP",
"Web Services Description Language",
"WSDL",
"Universal Description Discovery And Integration",
"UDDI",
"Service Oriented Architecture",
"SOA",
"Web Services",
"Business Process Execution Language",
"BPEL"
],
"authors": [
{
"givenName": "Kevin J.",
"surname": "Ma",
"fullName": "Kevin J. Ma",
"affiliation": "Cisco Systems",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2005-03-01 00:00:00",
"pubType": "mags",
"pages": "14-21",
"year": "2005",
"issn": "1520-9202",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/issre/2005/2482/0/24820257",
"title": "Testing Web Services by XML Perturbation",
"doi": null,
"abstractUrl": "/proceedings-article/issre/2005/24820257/12OmNAIvd14",
"parentPublication": {
"id": "proceedings/issre/2005/2482/0",
"title": "16th IEEE International Symposium on Software Reliability Engineering (ISSRE'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2007/3003/0/30030701",
"title": "Parallel XML Transformations on Multi-Core Processors",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2007/30030701/12OmNBBQZtj",
"parentPublication": {
"id": "proceedings/icebe/2007/3003/0",
"title": "IEEE International Conference on e-Business Engineering (ICEBE'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciw/2010/4022/0/4022a067",
"title": "Nontraditional Approach to XML Web Services Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/iciw/2010/4022a067/12OmNCdBDWO",
"parentPublication": {
"id": "proceedings/iciw/2010/4022/0",
"title": "Internet and Web Applications and Services, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2004/2051/1/205110556",
"title": "Using the Extension Function of XSLT and DSL to Secure XML Documents",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2004/205110556/12OmNClQ0tI",
"parentPublication": {
"id": "proceedings/aina/2004/2051/1",
"title": "18th International Conference on Advanced Information Networking and Applications, 2004. AINA 2004.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cec-eee/2007/2913/0/29130505",
"title": "WSC-07: Evolving the Web Services Challenge",
"doi": null,
"abstractUrl": "/proceedings-article/cec-eee/2007/29130505/12OmNrFTray",
"parentPublication": {
"id": "proceedings/cec-eee/2007/2913/0",
"title": "2007 9th IEEE International Conference on e-Commerce Technology and the 4th IEEE International Conference on Enterprise Computing, e-Commerce, and e-Services",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wisa/2009/3874/0/3874a122",
"title": "Modeling and Analysis for Web Services Composition Based on Dynamic Software Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/wisa/2009/3874a122/12OmNrkBwtC",
"parentPublication": {
"id": "proceedings/wisa/2009/3874/0",
"title": "Web Information Systems and Applications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icws/2012/4752/0/4752a138",
"title": "Enabling the Delivery of Customizable Web Services",
"doi": null,
"abstractUrl": "/proceedings-article/icws/2012/4752a138/12OmNxX3uBz",
"parentPublication": {
"id": "proceedings/icws/2012/4752/0",
"title": "2012 IEEE 19th International Conference on Web Services",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2003/1874/4/187440122b",
"title": "XML Security Using XSLT",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2003/187440122b/12OmNxiKs66",
"parentPublication": {
"id": "proceedings/hicss/2003/1874/4",
"title": "36th Annual Hawaii International Conference on System Sciences, 2003. Proceedings of the",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsoft/2015/8139/1/07521140",
"title": "Automatic generation of test data for XML schema-based testing of web services",
"doi": null,
"abstractUrl": "/proceedings-article/icsoft/2015/07521140/12OmNywxlUI",
"parentPublication": {
"id": "proceedings/icsoft/2015/8139/1",
"title": "2015 10th International Joint Conference on Software Technologies (ICSOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nswctc/2010/4011/1/4011a554",
"title": "Design of XML Web Services Based E-government Heterogeneous Platform",
"doi": null,
"abstractUrl": "/proceedings-article/nswctc/2010/4011a554/12OmNzV70E7",
"parentPublication": {
"id": "proceedings/nswctc/2010/4011/1",
"title": "Networks Security, Wireless Communications and Trusted Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "f2023",
"articleId": "13rRUypp548",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyGtjeI",
"title": "January/February",
"year": "2003",
"issueNum": "01",
"idPrefix": "ic",
"pubType": "magazine",
"volume": "7",
"label": "January/February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgs2HF",
"doi": "10.1109/MIC.2003.10001",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Getting on the Third Wave",
"normalizedTitle": "Getting on the Third Wave",
"fno": "w1013",
"hasPdf": true,
"idPrefix": "ic",
"keywords": [],
"authors": [
{
"givenName": "Adam",
"surname": "Stone",
"fullName": "Adam Stone",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2003-01-01 00:00:00",
"pubType": "mags",
"pages": "13-15",
"year": "2003",
"issn": "1089-7801",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "w1008",
"articleId": "13rRUwI5U4e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "w1016",
"articleId": "13rRUytnsSN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzw8iSG",
"title": "September",
"year": "1996",
"issueNum": "05",
"idPrefix": "so",
"pubType": "magazine",
"volume": "13",
"label": "September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjGoE5",
"doi": "10.1109/MS.1996.10019",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "What's Practical",
"normalizedTitle": "What's Practical",
"fno": "s5004",
"hasPdf": true,
"idPrefix": "so",
"keywords": [],
"authors": [
{
"givenName": "Alan M.",
"surname": "Davis",
"fullName": "Alan M. Davis",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "1996-09-01 00:00:00",
"pubType": "mags",
"pages": "4-5",
"year": "1996",
"issn": "0740-7459",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "s5006",
"articleId": "13rRUEgarlN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvmXJ42",
"title": "July/August",
"year": "2011",
"issueNum": "04",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "31",
"label": "July/August",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxZRbrc",
"doi": "10.1109/MCG.2011.64",
"abstract": "Computer graphics advances driven by product development became mature in the late 1980s, and advances driven by arts and entertainment matured in the early 2000s. The graphics industry is at an innovation plateau and is ready for the next wave of innovation. This third wave won't be driven in response to a single industry. Rather, innovative researchers will respond to three drivers: the visual representations necessary to handle emerging application disciplines, display and interaction device advances, and graphics systems design and implementation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Computer graphics advances driven by product development became mature in the late 1980s, and advances driven by arts and entertainment matured in the early 2000s. The graphics industry is at an innovation plateau and is ready for the next wave of innovation. This third wave won't be driven in response to a single industry. Rather, innovative researchers will respond to three drivers: the visual representations necessary to handle emerging application disciplines, display and interaction device advances, and graphics systems design and implementation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Computer graphics advances driven by product development became mature in the late 1980s, and advances driven by arts and entertainment matured in the early 2000s. The graphics industry is at an innovation plateau and is ready for the next wave of innovation. This third wave won't be driven in response to a single industry. Rather, innovative researchers will respond to three drivers: the visual representations necessary to handle emerging application disciplines, display and interaction device advances, and graphics systems design and implementation.",
"title": "The Third Wave in Computer Graphics and Interactive Techniques",
"normalizedTitle": "The Third Wave in Computer Graphics and Interactive Techniques",
"fno": "mcg2011040089",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Data Visualization",
"Computer Graphics",
"Rendering Computer Graphics",
"Interactive Systems",
"Product Development",
"Graphics And Multimedia",
"Computer Graphics",
"Visualization",
"Interactive Techniques",
"Display Technology"
],
"authors": [
{
"givenName": "David J.",
"surname": "Kasik",
"fullName": "David J. Kasik",
"affiliation": "Boeing",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2011-07-01 00:00:00",
"pubType": "mags",
"pages": "89-93",
"year": "2011",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2012/1247/0/06180864",
"title": "Banquet presentation: What's next?: The third wave in computer graphics and interactive techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2012/06180864/12OmNCf1DlJ",
"parentPublication": {
"id": "proceedings/vr/2012/1247/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1993/1363/0/00380766",
"title": "An interactive graphics display architecture",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1993/00380766/12OmNvTjZUe",
"parentPublication": {
"id": "proceedings/vrais/1993/1363/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06012087",
"title": "Wave seeds",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06012087/12OmNwM6zZ3",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1993/3940/0/00398881",
"title": "Interactive shading for surface and volume visualization on graphics workstations",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1993/00398881/12OmNxH9XgR",
"parentPublication": {
"id": "proceedings/visual/1993/3940/0",
"title": "Proceedings Visualization '93",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2008/3359/0/04626981",
"title": "Evolution of Computer Graphics and Its impact on Engineering Product Development",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2008/04626981/12OmNyen1vs",
"parentPublication": {
"id": "proceedings/cgiv/2008/3359/0",
"title": "2008 Fifth International Conference on Computer Graphics, Imaging and Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870011",
"title": "Interactive Maximum Projection Volume Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870011/12OmNzZmZv2",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1993/06/i0602",
"title": "Visually Controlled Graphics",
"doi": null,
"abstractUrl": "/journal/tp/1993/06/i0602/13rRUwI5U8N",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/10/ttg2012101589",
"title": "Guest Editors' Introduction: Special Section on the Symposium on Interactive 3D Graphics and Games (I3D)",
"doi": null,
"abstractUrl": "/journal/tg/2012/10/ttg2012101589/13rRUwdIOUI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/03/mcg2006030053",
"title": "Generating Comics from 3D Interactive Computer Graphics",
"doi": null,
"abstractUrl": "/magazine/cg/2006/03/mcg2006030053/13rRUxEhFuW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1998/03/mcg1998030042",
"title": "Computer Vision for Interactive Computer Graphics",
"doi": null,
"abstractUrl": "/magazine/cg/1998/03/mcg1998030042/13rRUyhaIiZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2011040078",
"articleId": "13rRUynZ5qi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2011040094",
"articleId": "13rRUxDItl5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxRWI3Y",
"title": "July-September",
"year": "2010",
"issueNum": "03",
"idPrefix": "mu",
"pubType": "magazine",
"volume": "17",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxjQyrS",
"doi": "10.1109/MMUL.2010.53",
"abstract": "In this article, Chris Harrison, Jason Wiese, and Anind K. Dey discuss the predictions of Mark Weiser, the father of ubiquitous computing, who envisioned that we would have smart personal environments, with numerous computational devices embedded within each environment. The authors point out that, rather than this happening, what we have currently are personalized computational devices, for example, smart phones, tied to users rather than embedded in the environment. The interesting development of this observation is the crux of their article. Even though multimedia, per se, is not specifically addressed in the article, what the authors have to say is certainly relevant to our community, as smart computational devices and sensors of various sorts are certainly siblings under the skin.-William I. Grosky",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this article, Chris Harrison, Jason Wiese, and Anind K. Dey discuss the predictions of Mark Weiser, the father of ubiquitous computing, who envisioned that we would have smart personal environments, with numerous computational devices embedded within each environment. The authors point out that, rather than this happening, what we have currently are personalized computational devices, for example, smart phones, tied to users rather than embedded in the environment. The interesting development of this observation is the crux of their article. Even though multimedia, per se, is not specifically addressed in the article, what the authors have to say is certainly relevant to our community, as smart computational devices and sensors of various sorts are certainly siblings under the skin.-William I. Grosky",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this article, Chris Harrison, Jason Wiese, and Anind K. Dey discuss the predictions of Mark Weiser, the father of ubiquitous computing, who envisioned that we would have smart personal environments, with numerous computational devices embedded within each environment. The authors point out that, rather than this happening, what we have currently are personalized computational devices, for example, smart phones, tied to users rather than embedded in the environment. The interesting development of this observation is the crux of their article. Even though multimedia, per se, is not specifically addressed in the article, what the authors have to say is certainly relevant to our community, as smart computational devices and sensors of various sorts are certainly siblings under the skin.-William I. Grosky",
"title": "Achieving Ubiquity: The New Third Wave",
"normalizedTitle": "Achieving Ubiquity: The New Third Wave",
"fno": "mmu2010030008",
"hasPdf": true,
"idPrefix": "mu",
"keywords": [
"Pervasive Computing",
"Ubiquitous Computing",
"Embedded Computing",
"Microprocessors",
"DVD",
"Costs",
"Microcomputers",
"TV",
"Media Impact",
"Ubiquitous",
"Pervasive",
"Quality",
"Quantity",
"Computing",
"Ubicomp",
"Tab",
"Pad",
"Mobile Devices",
"Smart Environments",
"Multimedia And Graphics"
],
"authors": [
{
"givenName": "Chris",
"surname": "Harrison",
"fullName": "Chris Harrison",
"affiliation": "Carnegie Mellon University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jason",
"surname": "Wiese",
"fullName": "Jason Wiese",
"affiliation": "Carnegie Mellon University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anind K.",
"surname": "Dey",
"fullName": "Anind K. Dey",
"affiliation": "Carnegie Mellon University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2010-07-01 00:00:00",
"pubType": "mags",
"pages": "8-12",
"year": "2010",
"issn": "1070-986X",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ficloudw/2017/3281/0/3281a240",
"title": "A Mobile-Programmable Smart Mirror for Ambient IoT Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ficloudw/2017/3281a240/12OmNwO5LTS",
"parentPublication": {
"id": "proceedings/ficloudw/2017/3281/0",
"title": "2017 IEEE 5th International Conference on Future Internet of Things and Cloud: Workshops (W-FiCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isuvr/2010/4124/0/4124a028",
"title": "Ubiquity: Micro to Macro Ecosystems?",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a028/12OmNypIYyq",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsacw/2010/4105/0/4105a299",
"title": "A Middleware for Personal Smart Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/compsacw/2010/4105a299/12OmNyugyY3",
"parentPublication": {
"id": "proceedings/compsacw/2010/4105/0",
"title": "2010 IEEE 34th Annual Computer Software and Applications Conference Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2010/4108/0/4108b340",
"title": "Reasoning for Smart Space Application: Comparing Three Reasoning Engines CLIPS, Jess and Win-prolog",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2010/4108b340/12OmNzC5SQm",
"parentPublication": {
"id": "proceedings/cit/2010/4108/0",
"title": "Computer and Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2011/4477/0/4477z029",
"title": "Keynote 1",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2011/4477z029/12OmNzSyCfh",
"parentPublication": {
"id": "proceedings/cse/2011/4477/0",
"title": "2011 14th IEEE International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mue/2011/4470/0/4470a018",
"title": "An Object-based Virtual Network in Unbiquitous Computing Environment",
"doi": null,
"abstractUrl": "/proceedings-article/mue/2011/4470a018/12OmNzd7bYP",
"parentPublication": {
"id": "proceedings/mue/2011/4470/0",
"title": "Multimedia and Ubiquitous Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2005/03/b3055",
"title": "Audio Networking: The Forgotten Wireless Technology",
"doi": null,
"abstractUrl": "/magazine/pc/2005/03/b3055/13rRUILtJwt",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2007/02/b2041",
"title": "The Urbanet Revolution: Sensor Power to the People!",
"doi": null,
"abstractUrl": "/magazine/pc/2007/02/b2041/13rRUwI5TO6",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2013/03/mpc2013030005",
"title": "Ingredients for a New Wave of Ubicomp Products",
"doi": null,
"abstractUrl": "/magazine/pc/2013/03/mpc2013030005/13rRUwInvvn",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2002/01/00993141",
"title": "The computer for the 21st Century",
"doi": null,
"abstractUrl": "/magazine/pc/2002/01/00993141/13rRUxAASYb",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mmu2010030054",
"articleId": "13rRUzp02la",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mmu2010030014",
"articleId": "13rRUxNEqLr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNAolH0P",
"title": "July-September",
"year": "2000",
"issueNum": "03",
"idPrefix": "mu",
"pubType": "magazine",
"volume": "7",
"label": "July-September",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy3xYcq",
"doi": "10.1109/MMUL.2000.10018",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "What's New",
"normalizedTitle": "What's New",
"fno": "u3005",
"hasPdf": true,
"idPrefix": "mu",
"keywords": [],
"authors": [
{
"givenName": "William",
"surname": "Grosky",
"fullName": "William Grosky",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "03",
"pubDate": "2000-07-01 00:00:00",
"pubType": "mags",
"pages": "5",
"year": "2000",
"issn": "1070-986X",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "u3004",
"articleId": "13rRUxDIte0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "u3006",
"articleId": "13rRUwwaKoU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNCfAPy0",
"title": "April",
"year": "2005",
"issueNum": "04",
"idPrefix": "td",
"pubType": "journal",
"volume": "16",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwj7coN",
"doi": "10.1109/TPDS.2005.46",
"abstract": "Abstract—Many solutions have been proposed to tackle the load balancing issue in DHT-based P2P systems. However, all these solutions either ignore the heterogeneity nature of the system, or reassign loads among nodes without considering proximity relationships, or both. In this paper, we present an efficient, proximity-aware load balancing scheme by using the concept of virtual servers. To the best of our knowledge, this is the first work to use proximity information in load balancing. In particular, our main contributions are: 1) Relying on a self-organized, fully distributed k{\\hbox{-}}{\\rm{ary}} tree structure constructed on top of a DHT, load balance is achieved by aligning those two skews in load distribution and node capacity inherent in P2P systems—that is, have higher capacity nodes carry more loads; 2) proximity information is used to guide virtual server reassignments such that virtual servers are reassigned and transferred between physically close heavily loaded nodes and lightly loaded nodes, thereby minimizing the load movement cost and allowing load balancing to perform efficiently; and 3) our simulations show that our proximity-aware load balancing scheme reduces the load movement cost by 11-65 percent for all the combinations of two representative network topologies, two node capacity profiles, and two load distributions of virtual servers. Moreover, we achieve virtual server reassignments in O(\\log{N}) time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Many solutions have been proposed to tackle the load balancing issue in DHT-based P2P systems. However, all these solutions either ignore the heterogeneity nature of the system, or reassign loads among nodes without considering proximity relationships, or both. In this paper, we present an efficient, proximity-aware load balancing scheme by using the concept of virtual servers. To the best of our knowledge, this is the first work to use proximity information in load balancing. In particular, our main contributions are: 1) Relying on a self-organized, fully distributed k{\\hbox{-}}{\\rm{ary}} tree structure constructed on top of a DHT, load balance is achieved by aligning those two skews in load distribution and node capacity inherent in P2P systems—that is, have higher capacity nodes carry more loads; 2) proximity information is used to guide virtual server reassignments such that virtual servers are reassigned and transferred between physically close heavily loaded nodes and lightly loaded nodes, thereby minimizing the load movement cost and allowing load balancing to perform efficiently; and 3) our simulations show that our proximity-aware load balancing scheme reduces the load movement cost by 11-65 percent for all the combinations of two representative network topologies, two node capacity profiles, and two load distributions of virtual servers. Moreover, we achieve virtual server reassignments in O(\\log{N}) time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Many solutions have been proposed to tackle the load balancing issue in DHT-based P2P systems. However, all these solutions either ignore the heterogeneity nature of the system, or reassign loads among nodes without considering proximity relationships, or both. In this paper, we present an efficient, proximity-aware load balancing scheme by using the concept of virtual servers. To the best of our knowledge, this is the first work to use proximity information in load balancing. In particular, our main contributions are: 1) Relying on a self-organized, fully distributed k{\\hbox{-}}{\\rm{ary}} tree structure constructed on top of a DHT, load balance is achieved by aligning those two skews in load distribution and node capacity inherent in P2P systems—that is, have higher capacity nodes carry more loads; 2) proximity information is used to guide virtual server reassignments such that virtual servers are reassigned and transferred between physically close heavily loaded nodes and lightly loaded nodes, thereby minimizing the load movement cost and allowing load balancing to perform efficiently; and 3) our simulations show that our proximity-aware load balancing scheme reduces the load movement cost by 11-65 percent for all the combinations of two representative network topologies, two node capacity profiles, and two load distributions of virtual servers. Moreover, we achieve virtual server reassignments in O(\\log{N}) time.",
"title": "Efficient, Proximity-Aware Load Balancing for DHT-Based P2P Systems",
"normalizedTitle": "Efficient, Proximity-Aware Load Balancing for DHT-Based P2P Systems",
"fno": "l0349",
"hasPdf": true,
"idPrefix": "td",
"keywords": [
"Proximity Aware",
"Peer To Peer",
"Virtual Server",
"Load Balancing"
],
"authors": [
{
"givenName": "Yingwu",
"surname": "Zhu",
"fullName": "Yingwu Zhu",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yiming",
"surname": "Hu",
"fullName": "Yiming Hu",
"affiliation": "IEEE",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2005-04-01 00:00:00",
"pubType": "trans",
"pages": "349-361",
"year": "2005",
"issn": "1045-9219",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ipdps/2006/0054/0/01639279",
"title": "Hash-based proximity clustering for load balancing in heterogeneous DHT networks",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2006/01639279/12OmNqGA56K",
"parentPublication": {
"id": "proceedings/ipdps/2006/0054/0",
"title": "Proceedings 20th IEEE International Parallel & Distributed Processing Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lcn/2006/0418/0/04116535",
"title": "Achieving Resilient and Efficient Load Balancing in DHT-based P2P Systems",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2006/04116535/12OmNs5rl03",
"parentPublication": {
"id": "proceedings/lcn/2006/0418/0",
"title": "Proceedings. 2006 31st IEEE Conference on Local Computer Networks",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/gpc-workshops/2008/3177/0/3177a364",
"title": "A Proximity-Aware Load Balancing Algorithm in P2P Systems",
"doi": null,
"abstractUrl": "/proceedings-article/gpc-workshops/2008/3177a364/12OmNvy25d6",
"parentPublication": {
"id": "proceedings/gpc-workshops/2008/3177/0",
"title": "GPC Workshops - 2008 3rd International Conference on Grid and Pervasive Computing Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2009/3900/0/3900a440",
"title": "Papnet: A Proximity-aware Alphanumeric Overlay Supporting Ganesan On-Line Load Balancing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2009/3900a440/12OmNwtWfQ6",
"parentPublication": {
"id": "proceedings/icpads/2009/3900/0",
"title": "Parallel and Distributed Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/p2p/2003/2023/0/20230220",
"title": "Efficient, Proximity-Aware Load Balancing for Structured P2P Systems",
"doi": null,
"abstractUrl": "/proceedings-article/p2p/2003/20230220/12OmNxE2mLv",
"parentPublication": {
"id": "proceedings/p2p/2003/2023/0",
"title": "Peer-to-Peer Computing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2009/3846/0/3846a195",
"title": "Building a Low-latency, Proximity-aware DHT-Based P2P Network",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2009/3846a195/12OmNxEBziq",
"parentPublication": {
"id": "proceedings/kse/2009/3846/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispa/2008/3471/0/3471a035",
"title": "Towards Practical Virtual Server-Based Load Balancing for Distributed Hash Tables",
"doi": null,
"abstractUrl": "/proceedings-article/ispa/2008/3471a035/12OmNxHryiE",
"parentPublication": {
"id": "proceedings/ispa/2008/3471/0",
"title": "2008 IEEE International Symposium on Parallel and Distributed Processing with Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/paccs/2009/3614/0/3614a191",
"title": "A Load-balancing Approach for DHT-Based P2P Networks",
"doi": null,
"abstractUrl": "/proceedings-article/paccs/2009/3614a191/12OmNxR5UGH",
"parentPublication": {
"id": "proceedings/paccs/2009/3614/0",
"title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/euc/2008/3492/2/3492b513",
"title": "Adaptive Load Balancing for Lookups in Heterogeneous DHT",
"doi": null,
"abstractUrl": "/proceedings-article/euc/2008/3492b513/12OmNxeutcY",
"parentPublication": {
"id": "proceedings/euc/2008/3492/1",
"title": "Embedded and Ubiquitous Computing, IEEE/IFIP International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2011/4477/0/4477a464",
"title": "Proximity-Aware DHT for Efficient Lookup Service in Peer-to-Peer Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2011/4477a464/12OmNylboIO",
"parentPublication": {
"id": "proceedings/cse/2011/4477/0",
"title": "2011 14th IEEE International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "l0335",
"articleId": "13rRUxBJhuV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "l0362",
"articleId": "13rRUxBa5rq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNx8fieO",
"title": "January/February",
"year": "2010",
"issueNum": "01",
"idPrefix": "tg",
"pubType": "journal",
"volume": "16",
"label": "January/February",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxZ0o1t",
"doi": "10.1109/TVCG.2009.62",
"abstract": "In immersive virtual environments (IVEs), users can control their virtual viewpoint by moving their tracked head and walking through the real world. Usually, movements in the real world are mapped one-to-one to virtual camera motions. With redirection techniques, the virtual camera is manipulated by applying gains to user motion so that the virtual world moves differently than the real world. Thus, users can walk through large-scale IVEs while physically remaining in a reasonably small workspace. In psychophysical experiments with a two-alternative forced-choice task, we have quantified how much humans can unknowingly be redirected on physical paths that are different from the visually perceived paths. We tested 12 subjects in three different experiments: (E1) discrimination between virtual and physical rotations, (E2) discrimination between virtual and physical straightforward movements, and (E3) discrimination of path curvature. In experiment E1, subjects performed rotations with different gains, and then had to choose whether the visually perceived rotation was smaller or greater than the physical rotation. In experiment E2, subjects chose whether the physical walk was shorter or longer than the visually perceived scaled travel distance. In experiment E3, subjects estimate the path curvature when walking a curved path in the real world while the visual display shows a straight path in the virtual world. Our results show that users can be turned physically about 49 percent more or 20 percent less than the perceived virtual rotation, distances can be downscaled by 14 percent and upscaled by 26 percent, and users can be redirected on a circular arc with a radius greater than 22 m while they believe that they are walking straight.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In immersive virtual environments (IVEs), users can control their virtual viewpoint by moving their tracked head and walking through the real world. Usually, movements in the real world are mapped one-to-one to virtual camera motions. With redirection techniques, the virtual camera is manipulated by applying gains to user motion so that the virtual world moves differently than the real world. Thus, users can walk through large-scale IVEs while physically remaining in a reasonably small workspace. In psychophysical experiments with a two-alternative forced-choice task, we have quantified how much humans can unknowingly be redirected on physical paths that are different from the visually perceived paths. We tested 12 subjects in three different experiments: (E1) discrimination between virtual and physical rotations, (E2) discrimination between virtual and physical straightforward movements, and (E3) discrimination of path curvature. In experiment E1, subjects performed rotations with different gains, and then had to choose whether the visually perceived rotation was smaller or greater than the physical rotation. In experiment E2, subjects chose whether the physical walk was shorter or longer than the visually perceived scaled travel distance. In experiment E3, subjects estimate the path curvature when walking a curved path in the real world while the visual display shows a straight path in the virtual world. Our results show that users can be turned physically about 49 percent more or 20 percent less than the perceived virtual rotation, distances can be downscaled by 14 percent and upscaled by 26 percent, and users can be redirected on a circular arc with a radius greater than 22 m while they believe that they are walking straight.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In immersive virtual environments (IVEs), users can control their virtual viewpoint by moving their tracked head and walking through the real world. Usually, movements in the real world are mapped one-to-one to virtual camera motions. With redirection techniques, the virtual camera is manipulated by applying gains to user motion so that the virtual world moves differently than the real world. Thus, users can walk through large-scale IVEs while physically remaining in a reasonably small workspace. In psychophysical experiments with a two-alternative forced-choice task, we have quantified how much humans can unknowingly be redirected on physical paths that are different from the visually perceived paths. We tested 12 subjects in three different experiments: (E1) discrimination between virtual and physical rotations, (E2) discrimination between virtual and physical straightforward movements, and (E3) discrimination of path curvature. In experiment E1, subjects performed rotations with different gains, and then had to choose whether the visually perceived rotation was smaller or greater than the physical rotation. In experiment E2, subjects chose whether the physical walk was shorter or longer than the visually perceived scaled travel distance. In experiment E3, subjects estimate the path curvature when walking a curved path in the real world while the visual display shows a straight path in the virtual world. Our results show that users can be turned physically about 49 percent more or 20 percent less than the perceived virtual rotation, distances can be downscaled by 14 percent and upscaled by 26 percent, and users can be redirected on a circular arc with a radius greater than 22 m while they believe that they are walking straight.",
"title": "Estimation of Detection Thresholds for Redirected Walking Techniques",
"normalizedTitle": "Estimation of Detection Thresholds for Redirected Walking Techniques",
"fno": "ttg2010010017",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Virtual Locomotion",
"Redirected Walking"
],
"authors": [
{
"givenName": "Frank",
"surname": "Steinicke",
"fullName": "Frank Steinicke",
"affiliation": "University of Münster, Münster",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gerd",
"surname": "Bruder",
"fullName": "Gerd Bruder",
"affiliation": "University of Münster, Münster",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jason",
"surname": "Jerald",
"fullName": "Jason Jerald",
"affiliation": "University of North Carolina at Chapel Hill, Chapel Hill",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harald",
"surname": "Frenz",
"fullName": "Harald Frenz",
"affiliation": "University of Münster, Münster",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Markus",
"surname": "Lappe",
"fullName": "Markus Lappe",
"affiliation": "University of Münster, Münster",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2010-01-01 00:00:00",
"pubType": "trans",
"pages": "17-27",
"year": "2010",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2008/3381/0/3381a217",
"title": "Taxonomy and Implementation of Redirection Techniques for Ubiquitous Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a217/12OmNARiM8i",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549412",
"title": "Estimation of detection thresholds for acoustic based redirected walking techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446225",
"title": "Effect of Environment Size on Curvature Redirected Walking Thresholds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446225/13bd1sx4Zt8",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/01/mcg2013010006",
"title": "Using Perceptual Illusions for Redirected Walking",
"doi": null,
"abstractUrl": "/magazine/cg/2013/01/mcg2013010006/13rRUB6SpRZ",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040538",
"title": "Redirecting Walking and Driving for Natural Navigation in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040538/13rRUwgQpDs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a379",
"title": "Effects of Virtual Room Size and Objects on Relative Translation Gain Thresholds in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a379/1CJcsYYBYJi",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798231",
"title": "The Effect of Hanger Reflex on Virtual Reality Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798231/1cJ0KBrAUYE",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798117",
"title": "Estimation of Rotation Gain Thresholds for Redirected Walking Considering FOV and Gender",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798117/1cJ1fo5PwqY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a448",
"title": "Walking Outside the Box: Estimation of Detection Thresholds for Non-Forward Steps",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a448/1tuAlT6IUfu",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2010010004",
"articleId": "13rRUwvT9gn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2010010028",
"articleId": "13rRUwhHcJd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz2C1BC",
"title": "July",
"year": "2012",
"issueNum": "07",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyuNswW",
"doi": "10.1109/TVCG.2011.275",
"abstract": "Redirected walking techniques allow people to walk in a larger virtual space than the physical extents of the laboratory. We describe two experiments conducted to investigate human sensitivity to walking on a curved path and to validate a new redirected walking technique. In a psychophysical experiment, we found that sensitivity to walking on a curved path was significantly lower for slower walking speeds (radius of 10 m versus 22 m). In an applied study, we investigated the influence of a velocity-dependent dynamic gain controller and an avatar controller on the average distance that participants were able to freely walk before needing to be reoriented. The mean walked distance was significantly greater in the dynamic gain controller condition, as compared to the static controller (22 m versus 15 m). Our results demonstrate that perceptually motivated dynamic redirected walking techniques, in combination with reorientation techniques, allow for unaided exploration of a large virtual city model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking techniques allow people to walk in a larger virtual space than the physical extents of the laboratory. We describe two experiments conducted to investigate human sensitivity to walking on a curved path and to validate a new redirected walking technique. In a psychophysical experiment, we found that sensitivity to walking on a curved path was significantly lower for slower walking speeds (radius of 10 m versus 22 m). In an applied study, we investigated the influence of a velocity-dependent dynamic gain controller and an avatar controller on the average distance that participants were able to freely walk before needing to be reoriented. The mean walked distance was significantly greater in the dynamic gain controller condition, as compared to the static controller (22 m versus 15 m). Our results demonstrate that perceptually motivated dynamic redirected walking techniques, in combination with reorientation techniques, allow for unaided exploration of a large virtual city model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking techniques allow people to walk in a larger virtual space than the physical extents of the laboratory. We describe two experiments conducted to investigate human sensitivity to walking on a curved path and to validate a new redirected walking technique. In a psychophysical experiment, we found that sensitivity to walking on a curved path was significantly lower for slower walking speeds (radius of 10 m versus 22 m). In an applied study, we investigated the influence of a velocity-dependent dynamic gain controller and an avatar controller on the average distance that participants were able to freely walk before needing to be reoriented. The mean walked distance was significantly greater in the dynamic gain controller condition, as compared to the static controller (22 m versus 15 m). Our results demonstrate that perceptually motivated dynamic redirected walking techniques, in combination with reorientation techniques, allow for unaided exploration of a large virtual city model.",
"title": "Velocity-Dependent Dynamic Curvature Gain for Redirected Walking",
"normalizedTitle": "Velocity-Dependent Dynamic Curvature Gain for Redirected Walking",
"fno": "06200791",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Psychology",
"Avatars",
"Gain Control",
"Gait Analysis",
"Virtual City Model",
"Velocity Dependent Dynamic Curvature Gain",
"Redirected Walking Techniques",
"Virtual Space",
"Human Walking Sensitivity",
"Curved Path",
"Psychophysical Experiment",
"Velocity Dependent Dynamic Gain Controller",
"Avatar Controller",
"Dynamic Gain Controller Condition",
"Static Controller",
"Reorientation Techniques",
"Legged Locomotion",
"Sensitivity",
"Trajectory",
"Virtual Environments",
"Particle Measurements",
"Atmospheric Measurements",
"Games",
"Avatars",
"Virtual Reality",
"Redirected Walking",
"Virtual Locomotion",
"Curvature Sensitivity"
],
"authors": [
{
"givenName": "H. H.",
"surname": "Bulthoff",
"fullName": "H. H. Bulthoff",
"affiliation": "Max Planck Inst. for Biol. Cybern., Tubingen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "U.",
"surname": "Kloos",
"fullName": "U. Kloos",
"affiliation": "Reutlingen Univ., Reutlingen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D.",
"surname": "Engel",
"fullName": "D. Engel",
"affiliation": "Max Planck Inst. for Biol. Cybern., Tubingen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. L.",
"surname": "Souman",
"fullName": "J. L. Souman",
"affiliation": "Max Planck Inst. for Biol. Cybern., Tubingen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "C. T.",
"surname": "Neth",
"fullName": "C. T. Neth",
"affiliation": "Max Planck Inst. for Biol. Cybern., Tubingen, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "B. J.",
"surname": "Mohler",
"fullName": "B. J. Mohler",
"affiliation": "Max Planck Inst. for Biol. Cybern., Tubingen, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2012-07-01 00:00:00",
"pubType": "trans",
"pages": "1041-1052",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2011/0039/0/05759454",
"title": "Velocity-dependent dynamic curvature gain for redirected walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759454/12OmNC8MsBR",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759437",
"title": "An evaluation of navigational ability comparing Redirected Free Exploration with Distractors to Walking-in-Place and joystick locomotio interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759437/12OmNx8OuyK",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549412",
"title": "Estimation of detection thresholds for acoustic based redirected walking techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549412/12OmNz2C1yn",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pive/2012/1218/0/06229795",
"title": "Adaptive redirected walking in a virtual world",
"doi": null,
"abstractUrl": "/proceedings-article/pive/2012/06229795/12OmNzUxOk4",
"parentPublication": {
"id": "proceedings/pive/2012/1218/0",
"title": "2012 IEEE VR Workshop on Perceptual Illusions in Virtual Environments",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010017",
"title": "Estimation of Detection Thresholds for Redirected Walking Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010017/13rRUxZ0o1t",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645699",
"title": "Shrinking Circles: Adaptation to Increased Curvature Gain in Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645699/17PYElBjW00",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049730",
"title": "Monte-Carlo Redirected Walking: Gain Selection Through Simulated Walks",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049730/1KYowitu5OM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523890",
"title": "Redirected Walking using Continuous Curvature Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523890/1wpqBpgOKUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "05999665",
"articleId": "13rRUILLkvo",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "1CpcG1DISYM",
"title": "May",
"year": "2022",
"issueNum": "05",
"idPrefix": "tg",
"pubType": "journal",
"volume": "28",
"label": "May",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "1B4hxt06P9m",
"doi": "10.1109/TVCG.2022.3150466",
"abstract": "Developing effective strategies for redirected walking requires extensive evaluations across a variety of factors that influence performance. Because these large-scale experiments are often not practical with user studies, researchers have instead utilized simulations to systematically test different algorithm parameters, physical space configurations, and virtual walking paths. Although simulation offers an efficient way to evaluate redirected walking algorithms, it remains an open question whether this evaluation methodology is ecologically valid. In this paper, we investigate the interaction between locomotion behavior and redirection gains at a micro-level (across small path segments) and macro-level (across an entire experience). This examination involves analyzing data from real users and comparing algorithm performance metrics with a simulated user model. The results identify specific properties of user locomotion behavior that influence the application of redirected walking gains and resets. Overall, we found that the simulation provided a conservative estimate of the average performance with real users and observed that performance trends when comparing two redirected walking algorithms were preserved. In general, these results indicate that simulation is an empirically valid evaluation methodology for redirected walking algorithms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Developing effective strategies for redirected walking requires extensive evaluations across a variety of factors that influence performance. Because these large-scale experiments are often not practical with user studies, researchers have instead utilized simulations to systematically test different algorithm parameters, physical space configurations, and virtual walking paths. Although simulation offers an efficient way to evaluate redirected walking algorithms, it remains an open question whether this evaluation methodology is ecologically valid. In this paper, we investigate the interaction between locomotion behavior and redirection gains at a micro-level (across small path segments) and macro-level (across an entire experience). This examination involves analyzing data from real users and comparing algorithm performance metrics with a simulated user model. The results identify specific properties of user locomotion behavior that influence the application of redirected walking gains and resets. Overall, we found that the simulation provided a conservative estimate of the average performance with real users and observed that performance trends when comparing two redirected walking algorithms were preserved. In general, these results indicate that simulation is an empirically valid evaluation methodology for redirected walking algorithms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Developing effective strategies for redirected walking requires extensive evaluations across a variety of factors that influence performance. Because these large-scale experiments are often not practical with user studies, researchers have instead utilized simulations to systematically test different algorithm parameters, physical space configurations, and virtual walking paths. Although simulation offers an efficient way to evaluate redirected walking algorithms, it remains an open question whether this evaluation methodology is ecologically valid. In this paper, we investigate the interaction between locomotion behavior and redirection gains at a micro-level (across small path segments) and macro-level (across an entire experience). This examination involves analyzing data from real users and comparing algorithm performance metrics with a simulated user model. The results identify specific properties of user locomotion behavior that influence the application of redirected walking gains and resets. Overall, we found that the simulation provided a conservative estimate of the average performance with real users and observed that performance trends when comparing two redirected walking algorithms were preserved. In general, these results indicate that simulation is an empirically valid evaluation methodology for redirected walking algorithms.",
"title": "Validating Simulation-Based Evaluation of Redirected Walking Systems",
"normalizedTitle": "Validating Simulation-Based Evaluation of Redirected Walking Systems",
"fno": "09715721",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Solid Modeling",
"Prediction Algorithms",
"Biological System Modeling",
"Task Analysis",
"Virtual Environments",
"Heuristic Algorithms",
"Virtual Reality",
"Redirected Walking",
"Locomotion",
"Simulation"
],
"authors": [
{
"givenName": "Mahdi",
"surname": "Azmandian",
"fullName": "Mahdi Azmandian",
"affiliation": "Institute for Creative Technologies, University of Southern California, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rhys",
"surname": "Yahata",
"fullName": "Rhys Yahata",
"affiliation": "Institute for Creative Technologies, University of Southern California, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Timofey",
"surname": "Grechkin",
"fullName": "Timofey Grechkin",
"affiliation": "Institute for Creative Technologies, University of Southern California, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jerald",
"surname": "Thomas",
"fullName": "Jerald Thomas",
"affiliation": "Department of Computer Science & Engineering, University of Minnesota, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Evan Suma",
"surname": "Rosenberg",
"fullName": "Evan Suma Rosenberg",
"affiliation": "Department of Computer Science & Engineering, University of Minnesota, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "05",
"pubDate": "2022-05-01 00:00:00",
"pubType": "trans",
"pages": "2288-2298",
"year": "2022",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549395",
"title": "Flexible and general redirected walking for head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549395/12OmNxFJXN3",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460030",
"title": "Eye tracking for locomotion prediction in redirected walking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460030/12OmNz4SOsF",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448288",
"title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798319",
"title": "Simulation and Evaluation of Three-User Redirected Walking Algorithm in Shared Physical Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798319/1cJ1aPwr8l2",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "09715723",
"articleId": "1B4hxCQXB4c",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNANBZko",
"title": "Jan.-Feb.",
"year": "2013",
"issueNum": "01",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "33",
"label": "Jan.-Feb.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUB6SpRZ",
"doi": "10.1109/MCG.2013.13",
"abstract": "Redirected walking (RDW) gives users the ability to explore a virtual world by walking in a confined physical space. It inconspicuously guides them on a physical path that might differ from the path they perceive in the virtual world. Exploiting three motion illusions-the change-blindness illusion, the four-stroke motion illusion, and the motion-without-movement illusion-can increase RDW's effectiveness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking (RDW) gives users the ability to explore a virtual world by walking in a confined physical space. It inconspicuously guides them on a physical path that might differ from the path they perceive in the virtual world. Exploiting three motion illusions-the change-blindness illusion, the four-stroke motion illusion, and the motion-without-movement illusion-can increase RDW's effectiveness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking (RDW) gives users the ability to explore a virtual world by walking in a confined physical space. It inconspicuously guides them on a physical path that might differ from the path they perceive in the virtual world. Exploiting three motion illusions-the change-blindness illusion, the four-stroke motion illusion, and the motion-without-movement illusion-can increase RDW's effectiveness.",
"title": "Using Perceptual Illusions for Redirected Walking",
"normalizedTitle": "Using Perceptual Illusions for Redirected Walking",
"fno": "mcg2013010006",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Legged Locomotion",
"Visualization",
"Cameras",
"Tracking",
"Virtual Reality",
"Blindness",
"Image Edge Detection",
"Human Computer Interaction",
"Spatial Resolution",
"Spatial Interfaces",
"Legged Locomotion",
"Visualization",
"Cameras",
"Tracking",
"Virtual Reality",
"Blindness",
"Image Edge Detection",
"Human Computer Interaction",
"Spatial Resolution",
"Human Computer Interaction",
"Virtual Reality",
"Immersive Virtual Environments",
"Redirected Walking",
"Change Blindness Illusion",
"Four Stroke Motion Illusion",
"Motion Without Movement Illusion",
"Computer Graphics"
],
"authors": [
{
"givenName": "F.",
"surname": "Steinicke",
"fullName": "F. Steinicke",
"affiliation": "Univ. of Wurzburg, Wurzburg, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "G.",
"surname": "Bruder",
"fullName": "G. Bruder",
"affiliation": "Univ. of Wurzburg, Wurzburg, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2013-01-01 00:00:00",
"pubType": "mags",
"pages": "6-11",
"year": "2013",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892373",
"title": "Application of redirected walking in room-scale VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892373/12OmNxG1ySA",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a349",
"title": "A Redirected Walking Toolkit for Exploring Large-Scale Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a349/1BLnzoFxHHy",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0",
"title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a524",
"title": "The Chaotic Behavior of Redirection – Revisiting Simulations in Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a524/1CJc4FECUko",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a922",
"title": "Robust Redirected Walking in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a922/1CJfaCP53nq",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a201",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a498",
"title": "Redirected Walking using Noisy Galvanic Vestibular Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a498/1yeCU92Xt5K",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mcg2013010004",
"articleId": "13rRUB7a1ii",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg2013010012",
"articleId": "13rRUxBa5hz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNB9bvm9",
"title": "March-April",
"year": "1998",
"issueNum": "02",
"idPrefix": "cg",
"pubType": "magazine",
"volume": "18",
"label": "March-April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPvt",
"doi": "10.1109/38.656787",
"abstract": "A whole variety of different techniques for simulating global illumination in virtual environments have been developed over recent years. Each technique, including radiosity, Monte-Carlo ray- or photon-tracing, and directional-dependent radiance computations, is best suited for simulating only some special case environments. None of these techniques is currently able to efficiently simulate all important lighting effects in nontrivial scenes. In this paper, we describe a new approach for efficiently combining different global illumination algorithms to yield a composite lighting simulation: Lighting Networks. Lighting Networks can exploit the advantages of each algorithm and combine them in such a way as to simulate lighting effects that could only be computed at great cost by any single algorithm. Furthermore, this approach allows a user to configure the Lighting Network to compute only specific lighting effects that are important for a given task, while avoiding a costly simulation of the full global illumination in a scene.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A whole variety of different techniques for simulating global illumination in virtual environments have been developed over recent years. Each technique, including radiosity, Monte-Carlo ray- or photon-tracing, and directional-dependent radiance computations, is best suited for simulating only some special case environments. None of these techniques is currently able to efficiently simulate all important lighting effects in nontrivial scenes. In this paper, we describe a new approach for efficiently combining different global illumination algorithms to yield a composite lighting simulation: Lighting Networks. Lighting Networks can exploit the advantages of each algorithm and combine them in such a way as to simulate lighting effects that could only be computed at great cost by any single algorithm. Furthermore, this approach allows a user to configure the Lighting Network to compute only specific lighting effects that are important for a given task, while avoiding a costly simulation of the full global illumination in a scene.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A whole variety of different techniques for simulating global illumination in virtual environments have been developed over recent years. Each technique, including radiosity, Monte-Carlo ray- or photon-tracing, and directional-dependent radiance computations, is best suited for simulating only some special case environments. None of these techniques is currently able to efficiently simulate all important lighting effects in nontrivial scenes. In this paper, we describe a new approach for efficiently combining different global illumination algorithms to yield a composite lighting simulation: Lighting Networks. Lighting Networks can exploit the advantages of each algorithm and combine them in such a way as to simulate lighting effects that could only be computed at great cost by any single algorithm. Furthermore, this approach allows a user to configure the Lighting Network to compute only specific lighting effects that are important for a given task, while avoiding a costly simulation of the full global illumination in a scene.",
"title": "Composite Lighting Simulations with Lighting Networks",
"normalizedTitle": "Composite Lighting Simulations with Lighting Networks",
"fno": "mcg1998020022",
"hasPdf": true,
"idPrefix": "cg",
"keywords": [
"Composite Lighting",
"Lighting Simulation",
"Global Illumination",
"Representation",
"Basis Functions",
"Domain Decomposition",
"Reflection Approximation",
"Monte Carlo",
"Finite Elements"
],
"authors": [
{
"givenName": "Philipp",
"surname": "Slusallek",
"fullName": "Philipp Slusallek",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Stamminger",
"fullName": "Marc Stamminger",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Heidrich",
"fullName": "Wolfgang Heidrich",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jan-Christian",
"surname": "Popp",
"fullName": "Jan-Christian Popp",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hans-Peter",
"surname": "Seidel",
"fullName": "Hans-Peter Seidel",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1998-03-01 00:00:00",
"pubType": "mags",
"pages": "22-31",
"year": "1998",
"issn": "0272-1716",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "mcg1998020020",
"articleId": "13rRUyZaxsS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mcg1998020032",
"articleId": "13rRUxAASMR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNC36tSf",
"title": "Aug.",
"year": "2013",
"issueNum": "08",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Aug.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxC0SWa",
"doi": "10.1109/TVCG.2012.319",
"abstract": "We propose \"StereoPasting,” an efficient method for depth-consistent stereoscopic composition, in which a source 2D image is interactively blended into a target stereoscopic image. As we paint \"disparity” on a 2D image, the disparity map of the selected region is gradually produced by edge-aware diffusion, and then blended with that of the target stereoscopic image. By considering constraints of the expected disparities and perspective scaling, the 2D object is warped to generate an image pair, which is then blended into the target image pair to get the composition result. The warping is formulated as an energy minimization, which could be solved in real time. We also present an interactive composition system, in which users can edit the disparity maps of 2D images by strokes, while viewing the composition results instantly. Experiments show that our method is intuitive and efficient for interactive stereoscopic composition. A lot of applications demonstrate the versatility of our method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose \"StereoPasting,” an efficient method for depth-consistent stereoscopic composition, in which a source 2D image is interactively blended into a target stereoscopic image. As we paint \"disparity” on a 2D image, the disparity map of the selected region is gradually produced by edge-aware diffusion, and then blended with that of the target stereoscopic image. By considering constraints of the expected disparities and perspective scaling, the 2D object is warped to generate an image pair, which is then blended into the target image pair to get the composition result. The warping is formulated as an energy minimization, which could be solved in real time. We also present an interactive composition system, in which users can edit the disparity maps of 2D images by strokes, while viewing the composition results instantly. Experiments show that our method is intuitive and efficient for interactive stereoscopic composition. A lot of applications demonstrate the versatility of our method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose \"StereoPasting,” an efficient method for depth-consistent stereoscopic composition, in which a source 2D image is interactively blended into a target stereoscopic image. As we paint \"disparity” on a 2D image, the disparity map of the selected region is gradually produced by edge-aware diffusion, and then blended with that of the target stereoscopic image. By considering constraints of the expected disparities and perspective scaling, the 2D object is warped to generate an image pair, which is then blended into the target image pair to get the composition result. The warping is formulated as an energy minimization, which could be solved in real time. We also present an interactive composition system, in which users can edit the disparity maps of 2D images by strokes, while viewing the composition results instantly. Experiments show that our method is intuitive and efficient for interactive stereoscopic composition. A lot of applications demonstrate the versatility of our method.",
"title": "StereoPasting: Interactive Composition in Stereoscopic Images",
"normalizedTitle": "StereoPasting: Interactive Composition in Stereoscopic Images",
"fno": "ttg2013081375",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Stereo Image Processing",
"Videos",
"Image Color Analysis",
"Image Edge Detection",
"Visualization",
"Equations",
"Paints",
"Disparity Map",
"Stereo Pasting",
"Depth Consistent",
"Stereoscopic Composition"
],
"authors": [
{
"givenName": null,
"surname": "Ruo-Feng Tong",
"fullName": "Ruo-Feng Tong",
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Yun Zhang",
"fullName": "Yun Zhang",
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ke-Li Cheng",
"fullName": "Ke-Li Cheng",
"affiliation": "State Key Lab. of CAD&CG, Zhejiang Univ., Hangzhou, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "08",
"pubDate": "2013-08-01 00:00:00",
"pubType": "trans",
"pages": "1375-1385",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdh/2014/4284/0/4284a095",
"title": "A Content-Aware Scaling Method for Stereoscopic Images",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a095/12OmNAQJzOw",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/3/4296e346",
"title": "Stereoscopic Video Object Segmentation Based on SVM and Mean-Shift",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296e346/12OmNAnuTFr",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/3",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2014/4717/0/06890709",
"title": "Learning visual saliency for stereoscopic images",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2014/06890709/12OmNqIhFMD",
"parentPublication": {
"id": "proceedings/icmew/2014/4717/0",
"title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgames/2011/1451/0/06000331",
"title": "Stereoscopic 3D in video games: A review of current design practices and challenges",
"doi": null,
"abstractUrl": "/proceedings-article/cgames/2011/06000331/12OmNqJZgHD",
"parentPublication": {
"id": "proceedings/cgames/2011/1451/0",
"title": "2011 16th International Conference on Computer Games (CGAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvmp/2011/4621/0/4621a165",
"title": "Disparity-Aware Stereo 3D Production Tools",
"doi": null,
"abstractUrl": "/proceedings-article/cvmp/2011/4621a165/12OmNvoWUZK",
"parentPublication": {
"id": "proceedings/cvmp/2011/4621/0",
"title": "2011 Conference for Visual Media Production",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/3/3962e493",
"title": "Stereoscopic Video Object Segmentation Based on Disparity Map",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e493/12OmNxGALg7",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2011/4296/2/4296c745",
"title": "Objective Quality Assessment of Noised Stereoscopic Images",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2011/4296c745/12OmNyUFfUe",
"parentPublication": {
"id": "proceedings/icmtma/2011/4296/2",
"title": "2011 Third International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2012/4875/0/4875a169",
"title": "Visualizing the Perceived Discomfort of Stereoscopic Video",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2012/4875a169/12OmNz4SOrX",
"parentPublication": {
"id": "proceedings/ism/2012/4875/0",
"title": "2012 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109e101",
"title": "Stereoscopic Image Inpainting: Distinct Depth Maps and Images Inpainting",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109e101/12OmNzCWFZI",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/08/ttg2013081288",
"title": "Changing Perspective in Stereoscopic Images",
"doi": null,
"abstractUrl": "/journal/tg/2013/08/ttg2013081288/13rRUwghd4Y",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013081362",
"articleId": "13rRUx0xPi8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013081386",
"articleId": "13rRUwI5U7X",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRON",
"name": "ttg2013081375s1.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013081375s1.pdf",
"extension": "pdf",
"size": "7.18 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWROO",
"name": "ttg2013081375s3.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013081375s3.avi",
"extension": "avi",
"size": "11.2 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWROP",
"name": "ttg2013081375s2.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013081375s2.avi",
"extension": "avi",
"size": "23 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNz5apx8",
"title": "April",
"year": "2015",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "21",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxE04tC",
"doi": "10.1109/TVCG.2015.2391857",
"abstract": "In recent years optical see-through head-mounted displays (OST-HMDs) have moved from conceptual research to a market of mass-produced devices with new models and applications being released continuously. It remains challenging to deploy augmented reality (AR) applications that require consistent spatial visualization. Examples include maintenance, training and medical tasks, as the view of the attached scene camera is shifted from the user's view. A calibration step can compute the relationship between the HMD-screen and the user's eye to align the digital content. However, this alignment is only viable as long as the display does not move, an assumption that rarely holds for an extended period of time. As a consequence, continuous recalibration is necessary. Manual calibration methods are tedious and rarely support practical applications. Existing automated methods do not account for user-specific parameters and are error prone. We propose the combination of a pre-calibrated display with a per-frame estimation of the user's cornea position to estimate the individual eye center and continuously recalibrate the system. With this, we also obtain the gaze direction, which allows for instantaneous uncalibrated eye gaze tracking, without the need for additional hardware and complex illumination. Contrary to existing methods, we use simple image processing and do not rely on iris tracking, which is typically noisy and can be ambiguous. Evaluation with simulated and real data shows that our approach achieves a more accurate and stable eye pose estimation, which results in an improved and practical calibration with a largely improved distribution of projection error.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In recent years optical see-through head-mounted displays (OST-HMDs) have moved from conceptual research to a market of mass-produced devices with new models and applications being released continuously. It remains challenging to deploy augmented reality (AR) applications that require consistent spatial visualization. Examples include maintenance, training and medical tasks, as the view of the attached scene camera is shifted from the user's view. A calibration step can compute the relationship between the HMD-screen and the user's eye to align the digital content. However, this alignment is only viable as long as the display does not move, an assumption that rarely holds for an extended period of time. As a consequence, continuous recalibration is necessary. Manual calibration methods are tedious and rarely support practical applications. Existing automated methods do not account for user-specific parameters and are error prone. We propose the combination of a pre-calibrated display with a per-frame estimation of the user's cornea position to estimate the individual eye center and continuously recalibrate the system. With this, we also obtain the gaze direction, which allows for instantaneous uncalibrated eye gaze tracking, without the need for additional hardware and complex illumination. Contrary to existing methods, we use simple image processing and do not rely on iris tracking, which is typically noisy and can be ambiguous. Evaluation with simulated and real data shows that our approach achieves a more accurate and stable eye pose estimation, which results in an improved and practical calibration with a largely improved distribution of projection error.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In recent years optical see-through head-mounted displays (OST-HMDs) have moved from conceptual research to a market of mass-produced devices with new models and applications being released continuously. It remains challenging to deploy augmented reality (AR) applications that require consistent spatial visualization. Examples include maintenance, training and medical tasks, as the view of the attached scene camera is shifted from the user's view. A calibration step can compute the relationship between the HMD-screen and the user's eye to align the digital content. However, this alignment is only viable as long as the display does not move, an assumption that rarely holds for an extended period of time. As a consequence, continuous recalibration is necessary. Manual calibration methods are tedious and rarely support practical applications. Existing automated methods do not account for user-specific parameters and are error prone. We propose the combination of a pre-calibrated display with a per-frame estimation of the user's cornea position to estimate the individual eye center and continuously recalibrate the system. With this, we also obtain the gaze direction, which allows for instantaneous uncalibrated eye gaze tracking, without the need for additional hardware and complex illumination. Contrary to existing methods, we use simple image processing and do not rely on iris tracking, which is typically noisy and can be ambiguous. Evaluation with simulated and real data shows that our approach achieves a more accurate and stable eye pose estimation, which results in an improved and practical calibration with a largely improved distribution of projection error.",
"title": "Corneal-Imaging Calibration for Optical See-Through Head-Mounted Displays",
"normalizedTitle": "Corneal-Imaging Calibration for Optical See-Through Head-Mounted Displays",
"fno": "07012105",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Augmented Reality",
"Calibration",
"Data Visualisation",
"Eye",
"Gaze Tracking",
"Helmet Mounted Displays",
"Natural Scenes",
"Pose Estimation",
"Optical See Through Head Mounted Display",
"OST HMD Screen",
"Mass Produced Device",
"Augmented Reality",
"Consistent Spatial Visualization",
"Scene Camera",
"User View",
"Continuous Recalibration",
"Manual Calibration Method",
"User Specific Parameters",
"Precalibrated Display",
"Per Frame Estimation",
"User Cornea Position Estimation",
"Gaze Direction",
"Instantaneous Uncalibrated Eye Gaze Tracking",
"Image Processing",
"Eye Pose Estimation",
"Projection Error Distribution",
"Corneal Imaging Calibration",
"Calibration",
"Cornea",
"Cameras",
"Estimation",
"Iris",
"Three Dimensional Displays",
"OST HMD Calibration",
"Optical See Through",
"Eye Pose Estimation",
"OST HMD Calibration",
"Eye Pose Estimation",
"Corneal Imaging",
"Optical See Through"
],
"authors": [
{
"givenName": "Alexander",
"surname": "Plopski",
"fullName": "Alexander Plopski",
"affiliation": "Osaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yuta",
"surname": "Itoh",
"fullName": "Yuta Itoh",
"affiliation": "Technical University of Munich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christian",
"surname": "Nitschke",
"fullName": "Christian Nitschke",
"affiliation": "Kyoto University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kiyoshi",
"surname": "Kiyokawa",
"fullName": "Kiyoshi Kiyokawa",
"affiliation": "Osaka University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gudrun",
"surname": "Klinker",
"fullName": "Gudrun Klinker",
"affiliation": "Technical University of Munich",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haruo",
"surname": "Takemura",
"fullName": "Haruo Takemura",
"affiliation": "Osaka University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2015-04-01 00:00:00",
"pubType": "trans",
"pages": "481-490",
"year": "2015",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504693",
"title": "A calibration method for optical see-through head-mounted displays with a depth camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504693/12OmNAnMuMd",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798846",
"title": "Interaction-free calibration for optical see-through head-mounted displays based on 3D Eye localization",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798846/12OmNCdBDWL",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223443",
"title": "Non-obscuring binocular eye tracking for wide field-of-view head-mounted-displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223443/12OmNqzu6MP",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a245",
"title": "[POSTER] An Accurate Calibration Method for Optical See-Through Head-Mounted Displays Based on Actual Eye-Observation Model",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a245/12OmNwErpLb",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948424",
"title": "Performance and sensitivity analysis of INDICA: INteraction-Free DIsplay CAlibration for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948424/12OmNyYm2oO",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892268",
"title": "RIDE: Region-induced data enhancement method for dynamic calibration of optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892268/12OmNyjtNG2",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08052554",
"title": "A Survey of Calibration Methods for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08052554/13rRUILtJqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064856",
"title": "Light-Field Correction for Spatial Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064856/13rRUwjGoG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "07064856",
"articleId": "13rRUwjGoG5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07021939",
"articleId": "13rRUwInvyB",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvyx",
"doi": "10.1109/TVCG.2013.43",
"abstract": "In our research agenda to study the effects of immersion (level of fidelity) on various tasks in virtual reality (VR) systems, we have found that the most generalizable findings come not from direct comparisons of different technologies, but from controlled simulations of those technologies. We call this the mixed reality (MR) simulation approach. However, the validity of MR simulation, especially when different simulator platforms are used, can be questioned. In this paper, we report the results of an experiment examining the effects of field of regard (FOR) and head tracking on the analysis of volume visualized micro-CT datasets, and compare them with those from a previous study. The original study used a CAVE-like display as the MR simulator platform, while the present study used a high-end head-mounted display (HMD). Out of the 24 combinations of system characteristics and tasks tested on the two platforms, we found that the results produced by the two different MR simulators were similar in 20 cases. However, only one of the significant effects found in the original experiment for quantitative tasks was reproduced in the present study. Our observations provide evidence both for and against the validity of MR simulation, and give insight into the differences caused by different MR simulator platforms. The present experiment also examined new conditions not present in the original study, and produced new significant results, which confirm and extend previous existing knowledge on the effects of FOR and head tracking. We provide design guidelines for choosing display systems that can improve the effectiveness of volume visualization applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In our research agenda to study the effects of immersion (level of fidelity) on various tasks in virtual reality (VR) systems, we have found that the most generalizable findings come not from direct comparisons of different technologies, but from controlled simulations of those technologies. We call this the mixed reality (MR) simulation approach. However, the validity of MR simulation, especially when different simulator platforms are used, can be questioned. In this paper, we report the results of an experiment examining the effects of field of regard (FOR) and head tracking on the analysis of volume visualized micro-CT datasets, and compare them with those from a previous study. The original study used a CAVE-like display as the MR simulator platform, while the present study used a high-end head-mounted display (HMD). Out of the 24 combinations of system characteristics and tasks tested on the two platforms, we found that the results produced by the two different MR simulators were similar in 20 cases. However, only one of the significant effects found in the original experiment for quantitative tasks was reproduced in the present study. Our observations provide evidence both for and against the validity of MR simulation, and give insight into the differences caused by different MR simulator platforms. The present experiment also examined new conditions not present in the original study, and produced new significant results, which confirm and extend previous existing knowledge on the effects of FOR and head tracking. We provide design guidelines for choosing display systems that can improve the effectiveness of volume visualization applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In our research agenda to study the effects of immersion (level of fidelity) on various tasks in virtual reality (VR) systems, we have found that the most generalizable findings come not from direct comparisons of different technologies, but from controlled simulations of those technologies. We call this the mixed reality (MR) simulation approach. However, the validity of MR simulation, especially when different simulator platforms are used, can be questioned. In this paper, we report the results of an experiment examining the effects of field of regard (FOR) and head tracking on the analysis of volume visualized micro-CT datasets, and compare them with those from a previous study. The original study used a CAVE-like display as the MR simulator platform, while the present study used a high-end head-mounted display (HMD). Out of the 24 combinations of system characteristics and tasks tested on the two platforms, we found that the results produced by the two different MR simulators were similar in 20 cases. However, only one of the significant effects found in the original experiment for quantitative tasks was reproduced in the present study. Our observations provide evidence both for and against the validity of MR simulation, and give insight into the differences caused by different MR simulator platforms. The present experiment also examined new conditions not present in the original study, and produced new significant results, which confirm and extend previous existing knowledge on the effects of FOR and head tracking. We provide design guidelines for choosing display systems that can improve the effectiveness of volume visualization applications.",
"title": "Validation of the MR Simulation Approach for Evaluating the Effects of Immersion on Visual Analysis of Volume Data",
"normalizedTitle": "Validation of the MR Simulation Approach for Evaluating the Effects of Immersion on Visual Analysis of Volume Data",
"fno": "ttg2013040529",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Visualization",
"Mice",
"Solid Modeling",
"Head",
"Training",
"Computational Modeling",
"Virtual Environments",
"MR Simulator",
"Immersion",
"Micro CT",
"Volume Visualization",
"Virtual Reality",
"3 D Visualization",
"HMD"
],
"authors": [
{
"givenName": "B.",
"surname": "Laha",
"fullName": "B. Laha",
"affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. A.",
"surname": "Bowman",
"fullName": "D. A. Bowman",
"affiliation": "Dept. of Comput. Sci., Center for Human-Comput. Interaction, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. D.",
"surname": "Schiffbauer",
"fullName": "J. D. Schiffbauer",
"affiliation": "Dept. of Geol. Sci., Univ. of Missouri, Columbia, MO, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "529-538",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504730",
"title": "Effects of field of regard and stereoscopy and the validity of MR simulation for visual analysis of scientific data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504730/12OmNBbJTpU",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b003",
"title": "An Algorithm Based on Girth-location for MR Head Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b003/12OmNBc1uwK",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2012/4651/0/4651a663",
"title": "Instruction for Remote MR Cooperative Work with Captured Still Worker's View Video",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2012/4651a663/12OmNxdDFSs",
"parentPublication": {
"id": "proceedings/aina/2012/4651/0",
"title": "2012 IEEE 26th International Conference on Advanced Information Networking and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836524",
"title": "Perceptual Issues of a Passive Haptics Feedback Based MR System",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836524/12OmNxecS4t",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750169",
"title": "MR2 (MR Square): A Mixed-Reality Meeting Room",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750169/12OmNzyYibC",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040597",
"title": "Effects of Immersion on Visual Analysis of Volume Data",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040597/13rRUxASuve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a414",
"title": "Evaluating the Object-Centered User Interface in Head-Worn Mixed Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a414/1JrRiVjEd44",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797957",
"title": "A Simulation for Examining the Effects of Inaccurate Head Tracking on Drivers of Vehicles with Transparent Cockpit Projections",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797957/1cJ15UmejkI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199574",
"title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xviii",
"articleId": "13rRUy0qnGl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040539",
"articleId": "13rRUwInvl1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgCr",
"name": "ttg2013040529s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040529s1.zip",
"extension": "zip",
"size": "14.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxb5hpv",
"title": "July",
"year": "2007",
"issueNum": "07",
"idPrefix": "co",
"pubType": "magazine",
"volume": "40",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwwJWIr",
"doi": "10.1109/MC.2007.257",
"abstract": "Solid evidence of virtual reality's benefits has graduated from impressive visual demonstrations to producing results in practical applications. Further, a realistic experience is no longer immersion's sole asset. Empirical studies show that various components of immersion provide other benefits--full immersion is not always necessary.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Solid evidence of virtual reality's benefits has graduated from impressive visual demonstrations to producing results in practical applications. Further, a realistic experience is no longer immersion's sole asset. Empirical studies show that various components of immersion provide other benefits--full immersion is not always necessary.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Solid evidence of virtual reality's benefits has graduated from impressive visual demonstrations to producing results in practical applications. Further, a realistic experience is no longer immersion's sole asset. Empirical studies show that various components of immersion provide other benefits--full immersion is not always necessary.",
"title": "Virtual Reality: How Much Immersion Is Enough?",
"normalizedTitle": "Virtual Reality: How Much Immersion Is Enough?",
"fno": "r7036",
"hasPdf": true,
"idPrefix": "co",
"keywords": [
"3 D Visualization",
"Virtual Reality",
"Immersion"
],
"authors": [
{
"givenName": "Doug A.",
"surname": "Bowman",
"fullName": "Doug A. Bowman",
"affiliation": "Virginia Tech",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ryan P.",
"surname": "McMahan",
"fullName": "Ryan P. McMahan",
"affiliation": "Virginia Tech",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2007-07-01 00:00:00",
"pubType": "mags",
"pages": "36-43",
"year": "2007",
"issn": "0018-9162",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/nbis/2009/3767/0/3767a608",
"title": "Tele-Immersion Environment for Video Avatar Based CVE",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2009/3767a608/12OmNB06l11",
"parentPublication": {
"id": "proceedings/nbis/2009/3767/0",
"title": "2009 International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759470",
"title": "VEGI: Virtual Environment GUI Immersion system",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759470/12OmNBKEyn1",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2012/4814/0/4814a107",
"title": "Immersion in Virtual Worlds - But not Second Life!",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a107/12OmNrAv3P5",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2008/3096/0/3096b023",
"title": "Proposal of Multimodal Communications System Using Tele-Immersion Technology",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2008/3096b023/12OmNvHGrAg",
"parentPublication": {
"id": "proceedings/ainaw/2008/3096/0",
"title": "2008 22nd International Conference on Advanced Information Networking and Applications (AINA 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itc/1998/5093/0/50931129",
"title": "How Much Testing Is Enough...?",
"doi": null,
"abstractUrl": "/proceedings-article/itc/1998/50931129/12OmNvnwVjd",
"parentPublication": {
"id": "proceedings/itc/1998/5093/0",
"title": "Proceedings International Test Conference 1998 (IEEE Cat. No.98CH36270)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2009/3711/0/3711a358",
"title": "How to Bring Immersion into Learning Games?",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2009/3711a358/12OmNwlHSSY",
"parentPublication": {
"id": "proceedings/icalt/2009/3711/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/1999/0481/0/04810083",
"title": "Multiple Display Viewing Architecture for Virtual Environments over Heterogeneous Networks",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/1999/04810083/12OmNzlD9pC",
"parentPublication": {
"id": "proceedings/sibgrapi/1999/0481/0",
"title": "XII Brazilian Symposium on Computer Graphics and Image Processing (Cat. No.PR00481)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2012/2027/0/06266278",
"title": "Distributed Augmented Reality Systems: How Much Performance is Enough?",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2012/06266278/12OmNzwHv94",
"parentPublication": {
"id": "proceedings/icmew/2012/2027/0",
"title": "2012 IEEE International Conference on Multimedia & Expo Workshops (ICMEW 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040597",
"title": "Effects of Immersion on Visual Analysis of Volume Data",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040597/13rRUxASuve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isca/2021/3333/0/333300a623",
"title": "Cost-Efficient Overclocking in Immersion-Cooled Datacenters",
"doi": null,
"abstractUrl": "/proceedings-article/isca/2021/333300a623/1vNjMPVDKZa",
"parentPublication": {
"id": "proceedings/isca/2021/3333/0",
"title": "2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "r7028",
"articleId": "13rRUypGGeq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "r7045",
"articleId": "13rRUxlgxYQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBhpS2B",
"title": "April",
"year": "2014",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "20",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly95A",
"doi": "10.1109/TVCG.2014.20",
"abstract": "Volume visualization is an important technique for analyzing datasets from a variety of different scientific domains. Volume data analysis is inherently difficult because volumes are three-dimensional, dense, and unfamiliar, requiring scientists to precisely control the viewpoint and to make precise spatial judgments. Researchers have proposed that more immersive (higher fidelity) VR systems might improve task performance with volume datasets, and significant results tied to different components of display fidelity have been reported. However, more information is needed to generalize these results to different task types, domains, and rendering styles. We visualized isosurfaces extracted from synchrotron microscopic computed tomography (SR-μCT) scans of beetles, in a CAVE-like display. We ran a controlled experiment evaluating the effects of three components of system fidelity (field of regard, stereoscopy, and head tracking) on a variety of abstract task categories that are applicable to various scientific domains, and also compared our results with those from our prior experiment using 3D texture-based rendering. We report many significant findings. For example, for search and spatial judgment tasks with isosurface visualization, a stereoscopic display provides better performance, but for tasks with 3D texture-based rendering, displays with higher field of regard were more effective, independent of the levels of the other display components. We also found that systems with high field of regard and head tracking improve performance in spatial judgment tasks. Our results extend existing knowledge and produce new guidelines for designing VR systems to improve the effectiveness of volume data analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Volume visualization is an important technique for analyzing datasets from a variety of different scientific domains. Volume data analysis is inherently difficult because volumes are three-dimensional, dense, and unfamiliar, requiring scientists to precisely control the viewpoint and to make precise spatial judgments. Researchers have proposed that more immersive (higher fidelity) VR systems might improve task performance with volume datasets, and significant results tied to different components of display fidelity have been reported. However, more information is needed to generalize these results to different task types, domains, and rendering styles. We visualized isosurfaces extracted from synchrotron microscopic computed tomography (SR-μCT) scans of beetles, in a CAVE-like display. We ran a controlled experiment evaluating the effects of three components of system fidelity (field of regard, stereoscopy, and head tracking) on a variety of abstract task categories that are applicable to various scientific domains, and also compared our results with those from our prior experiment using 3D texture-based rendering. We report many significant findings. For example, for search and spatial judgment tasks with isosurface visualization, a stereoscopic display provides better performance, but for tasks with 3D texture-based rendering, displays with higher field of regard were more effective, independent of the levels of the other display components. We also found that systems with high field of regard and head tracking improve performance in spatial judgment tasks. Our results extend existing knowledge and produce new guidelines for designing VR systems to improve the effectiveness of volume data analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Volume visualization is an important technique for analyzing datasets from a variety of different scientific domains. Volume data analysis is inherently difficult because volumes are three-dimensional, dense, and unfamiliar, requiring scientists to precisely control the viewpoint and to make precise spatial judgments. Researchers have proposed that more immersive (higher fidelity) VR systems might improve task performance with volume datasets, and significant results tied to different components of display fidelity have been reported. However, more information is needed to generalize these results to different task types, domains, and rendering styles. We visualized isosurfaces extracted from synchrotron microscopic computed tomography (SR-μCT) scans of beetles, in a CAVE-like display. We ran a controlled experiment evaluating the effects of three components of system fidelity (field of regard, stereoscopy, and head tracking) on a variety of abstract task categories that are applicable to various scientific domains, and also compared our results with those from our prior experiment using 3D texture-based rendering. We report many significant findings. For example, for search and spatial judgment tasks with isosurface visualization, a stereoscopic display provides better performance, but for tasks with 3D texture-based rendering, displays with higher field of regard were more effective, independent of the levels of the other display components. We also found that systems with high field of regard and head tracking improve performance in spatial judgment tasks. Our results extend existing knowledge and produce new guidelines for designing VR systems to improve the effectiveness of volume data analysis.",
"title": "Effects of VR System Fidelity on Analyzing Isosurface Visualization of Volume Datasets",
"normalizedTitle": "Effects of VR System Fidelity on Analyzing Isosurface Visualization of Volume Datasets",
"fno": "ttg201404513",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering Computer Graphics",
"Isosurfaces",
"Three Dimensional Displays",
"Visualization",
"Abstracts",
"Measurement",
"Computed Tomography",
"Immersion Micro CT Data Analysis Volume Visualization 3 D Visualization CAVE Virtual Environments Virtual Reality"
],
"authors": [
{
"givenName": "Bireswar",
"surname": "Laha",
"fullName": "Bireswar Laha",
"affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Doug A.",
"surname": "Bowman",
"fullName": "Doug A. Bowman",
"affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "John J.",
"surname": "Socha",
"fullName": "John J. Socha",
"affiliation": "Dept. of Eng. Sci. & Mech., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2014-04-01 00:00:00",
"pubType": "trans",
"pages": "513-522",
"year": "2014",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ieee-vis/2000/6478/0/64780016",
"title": "Hardware-Accelerated Volume and Isosurface Rendering Based on Cell-Projection",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/2000/64780016/12OmNBuL1lz",
"parentPublication": {
"id": "proceedings/ieee-vis/2000/6478/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-vis/1995/7187/0/71870003",
"title": "Interval Set: A Volume Rendering Technique Generalizing Isosurface Extraction",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-vis/1995/71870003/12OmNs5rl20",
"parentPublication": {
"id": "proceedings/ieee-vis/1995/7187/0",
"title": "Visualization Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pvg/2001/7223/0/72230051",
"title": "Scalable Isosurface Visualization of Massive Datasets on COTS* Clusters",
"doi": null,
"abstractUrl": "/proceedings-article/pvg/2001/72230051/12OmNwwMf5w",
"parentPublication": {
"id": "proceedings/pvg/2001/7223/0",
"title": "Parallel and Large-Data Visualization and Graphics, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1994/6627/0/00346306",
"title": "Nonpolygonal isosurface rendering for large volume datasets",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1994/00346306/12OmNx8Ouv6",
"parentPublication": {
"id": "proceedings/visual/1994/6627/0",
"title": "Proceedings Visualization '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vg/2005/26/0/01500543",
"title": "iSBVR: isosurface-AIDED hardware acceleration techniques for slice-based volume rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vg/2005/01500543/12OmNzSh1c6",
"parentPublication": {
"id": "proceedings/vg/2005/26/0",
"title": "Volume Graphics 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1267",
"title": "Interactive Point-based Isosurface Exploration and High-quality Rendering",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1267/13rRUxjQyhm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08493612",
"title": "CPU Isosurface Ray Tracing of Adaptive Mesh Refinement Data",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08493612/17D45Vw15vd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2006/2754/0/04089232",
"title": "A Proposal of a High Definition Haptic Rendering for Stability and Fidelity",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2006/04089232/17D45Wc1II6",
"parentPublication": {
"id": "proceedings/icat/2006/2754/0",
"title": "16th International Conference on Artificial Reality and Telexistence--Workshops (ICAT'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642297",
"title": "Immersive Virtual Colonoscopy",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642297/17PYElZaeVr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/06/08918030",
"title": "Volumetric Isosurface Rendering with Deep Learning-Based Super-Resolution",
"doi": null,
"abstractUrl": "/journal/tg/2021/06/08918030/1fm1QUuzRAI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201404495",
"articleId": "13rRUyogGAb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201404523",
"articleId": "13rRUy3xY8a",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzmclo6",
"title": "June",
"year": "2020",
"issueNum": "06",
"idPrefix": "tg",
"pubType": "journal",
"volume": "26",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "17D45WB0qbp",
"doi": "10.1109/TVCG.2018.2884468",
"abstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Various viewing and travel techniques are used in immersive virtual reality to allow users to see different areas or perspectives of 3D environments. Our research evaluates techniques for visually showing transitions between two viewpoints in head-tracked virtual reality. We present four experiments that focus on automated viewpoint changes that are controlled by the system rather than by interactive user control. The experiments evaluate three different transition techniques (teleportation, animated interpolation, and pulsed interpolation), different types of visual adjustments for each technique, and different types of viewpoint changes. We evaluated how differences in transition can influence a viewer's comfort, sickness, and ability to maintain spatial awareness of dynamic objects in a virtual scene. For instant teleportations, the experiments found participants could most easily track scene changes with rotational transitions without translational movements. Among the tested techniques, animated interpolations allowed significantly better spatial awareness of moving objects, but the animated technique was also rated worst in terms of sickness, particularly for rotational viewpoint changes. Across techniques, viewpoint transitions involving both translational and rotational changes together were more difficult to track than either individual type of change.",
"title": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness",
"normalizedTitle": "Scene Transitions and Teleportation in Virtual Reality and the Implications for Spatial Awareness and Sickness",
"fno": "08554159",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Computer Animation",
"Virtual Reality",
"Automated Viewpoint Changes",
"Head Tracked Virtual Reality",
"Immersive Virtual Reality",
"Scene Transitions",
"Rotational Changes",
"Translational Changes",
"Viewpoint Transitions",
"Rotational Viewpoint Changes",
"Animated Technique",
"Rotational Transitions",
"Scene Changes",
"Instant Teleportations",
"Virtual Scene",
"Spatial Awareness",
"Visual Adjustments",
"Pulsed Interpolation",
"Animated Interpolation",
"Teleportation",
"Transition Techniques",
"Interactive User Control",
"Teleportation",
"Three Dimensional Displays",
"Legged Locomotion",
"Tracking",
"Space Exploration",
"Motion Pictures",
"Virtual Reality",
"Animation",
"Virtual Reality",
"View Transitions",
"Scene Transitions",
"Travel",
"Immersive Cinema",
"3 D Movies",
"Teleportation",
"Navigation",
"Sickness",
"Spatial Orientation",
"Spatial Awareness"
],
"authors": [
{
"givenName": "Kasra",
"surname": "Moghadam",
"fullName": "Kasra Moghadam",
"affiliation": "Texas A&M University, College Station, TX, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Colin",
"surname": "Banigan",
"fullName": "Colin Banigan",
"affiliation": "Texas A&M University, College Station, TX, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eric D.",
"surname": "Ragan",
"fullName": "Eric D. Ragan",
"affiliation": "University of Florida, Gainesville, FL, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "06",
"pubDate": "2020-06-01 00:00:00",
"pubType": "trans",
"pages": "2273-2287",
"year": "2020",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1991/2163/0/00131936",
"title": "Biped gait transitions",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131936/12OmNAS9zt7",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892316",
"title": "An exploration of input conditions for virtual teleportation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892316/12OmNCzb9vr",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07760005",
"title": "Random forests based recognition of human activities and postural transitions on smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760005/12OmNwtEEP6",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892386",
"title": "Travel in large-scale head-worn VR: Pre-oriented teleportation with WIMs and previews",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892386/12OmNzhELm6",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08031015",
"title": "A Vector Field Design Approach to Animated Transitions",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08031015/13rRUB7a117",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998297",
"title": "Teleporting through virtual environments: Effects of path scale and environment scale on spatial updating",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998297/1hrXhk9mu9W",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090560",
"title": "Either Give Me a Reason to Stand or an Opportunity to Sit in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090560/1jIxzjmEoeY",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a608",
"title": "Walking and Teleportation in Wide-area Virtual Reality Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a608/1pysv8bIfrG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09332290",
"title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a480",
"title": "Analysis of Positional Tracking Space Usage when using Teleportation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a480/1tnXfrT4ere",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "08576679",
"articleId": "17D45XreC6e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08554186",
"articleId": "17D45WIXbPb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "1js2Mf0SAgw",
"name": "ttg202006-08554159s1-transitions-examples.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg202006-08554159s1-transitions-examples.mp4",
"extension": "mp4",
"size": "5.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNzgNXTl",
"title": "Jan.-March",
"year": "2012",
"issueNum": "01",
"idPrefix": "th",
"pubType": "journal",
"volume": "5",
"label": "Jan.-March",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvt1",
"doi": "10.1109/TOH.2011.40",
"abstract": "Haptic augmented reality (AR) is an emerging research area, which targets the modulation of haptic properties of real objects by means of virtual feedback. In our research, we explore the feasibility of using this technology for medical training systems. As a possible demonstration example, we currently examine the use of augmentation in the context of breast tumor palpation. The key idea in our prototype system is to augment the real feedback of a silicone breast mock-up with simulated forces stemming from virtual tumors. In this paper, we introduce and evaluate the underlying algorithm to provide these force augmentations. This includes a method for the identification of the contact dynamics model via measurements on real sample objects. The performance of our augmentation is examined quantitatively as well as in a user study. Initial results show that the haptic feedback of indenting a real silicone tumor with a rod can be approximated reasonably well with our algorithm. The advantage of such an augmentation approach over physical training models is the ability to create a nearly infinite variety of palpable findings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Haptic augmented reality (AR) is an emerging research area, which targets the modulation of haptic properties of real objects by means of virtual feedback. In our research, we explore the feasibility of using this technology for medical training systems. As a possible demonstration example, we currently examine the use of augmentation in the context of breast tumor palpation. The key idea in our prototype system is to augment the real feedback of a silicone breast mock-up with simulated forces stemming from virtual tumors. In this paper, we introduce and evaluate the underlying algorithm to provide these force augmentations. This includes a method for the identification of the contact dynamics model via measurements on real sample objects. The performance of our augmentation is examined quantitatively as well as in a user study. Initial results show that the haptic feedback of indenting a real silicone tumor with a rod can be approximated reasonably well with our algorithm. The advantage of such an augmentation approach over physical training models is the ability to create a nearly infinite variety of palpable findings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Haptic augmented reality (AR) is an emerging research area, which targets the modulation of haptic properties of real objects by means of virtual feedback. In our research, we explore the feasibility of using this technology for medical training systems. As a possible demonstration example, we currently examine the use of augmentation in the context of breast tumor palpation. The key idea in our prototype system is to augment the real feedback of a silicone breast mock-up with simulated forces stemming from virtual tumors. In this paper, we introduce and evaluate the underlying algorithm to provide these force augmentations. This includes a method for the identification of the contact dynamics model via measurements on real sample objects. The performance of our augmentation is examined quantitatively as well as in a user study. Initial results show that the haptic feedback of indenting a real silicone tumor with a rod can be approximated reasonably well with our algorithm. The advantage of such an augmentation approach over physical training models is the ability to create a nearly infinite variety of palpable findings.",
"title": "Rendering Virtual Tumors in Real Tissue Mock-Ups Using Haptic Augmented Reality",
"normalizedTitle": "Rendering Virtual Tumors in Real Tissue Mock-Ups Using Haptic Augmented Reality",
"fno": "tth2012010077",
"hasPdf": true,
"idPrefix": "th",
"keywords": [
"Haptic Rendering",
"Augmented Reality",
"Mixed Reality",
"Medical Simulation",
"Palpation",
"Tumor"
],
"authors": [
{
"givenName": "Seokhee",
"surname": "Jeon",
"fullName": "Seokhee Jeon",
"affiliation": "Pohang University of Science and Technology (POSTECH), Pohang",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Seungmoon",
"surname": "Choi",
"fullName": "Seungmoon Choi",
"affiliation": "Pohang University of Science and Technology (POSTECH), Pohang",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthias",
"surname": "Harders",
"fullName": "Matthias Harders",
"affiliation": "ETH Zürich, Zürich",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "01",
"pubDate": "2012-01-01 00:00:00",
"pubType": "trans",
"pages": "77-84",
"year": "2012",
"issn": "1939-1412",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2008/2005/0/04479948",
"title": "Perceptual Rendering for Learning Haptic Skills",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479948/12OmNqJq4vK",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643617",
"title": "Breast cancer palpation system using haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643617/12OmNwF0C53",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2000/0478/0/04780233",
"title": "Visuo-Haptic Display Using Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643585",
"title": "Haptic simulation of breast cancer palpation: A case study of haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643585/12OmNwtn3ui",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2011/1189/0/05999154",
"title": "Ultrasound palpation by haptic elastography",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999154/12OmNy3Agyv",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/02/tth2012020131",
"title": "Haptic Detection of Artificial Tumors by Hand and with a Tool in a MIS Environment",
"doi": null,
"abstractUrl": "/journal/th/2012/02/tth2012020131/13rRUwfZBVs",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07833030",
"title": "Shifty: A Weight-Shifting Dynamic Passive Haptic Proxy to Enhance Object Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07833030/13rRUwgQpqL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/01/ttg2013010159",
"title": "Visuo-Haptic Mixed Reality with Unobstructed Tool-Hand Integration",
"doi": null,
"abstractUrl": "/journal/tg/2013/01/ttg2013010159/13rRUyeTVi1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/12/08103791",
"title": "Magnetic Levitation Haptic Augmentation for Virtual Tissue Stiffness Perception",
"doi": null,
"abstractUrl": "/journal/tg/2018/12/08103791/14H4WM6Ory8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "tth2012010066",
"articleId": "13rRUygT7fm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "tth2012010085",
"articleId": "13rRUILtJr4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNwoxSiW",
"title": "Second",
"year": "2012",
"issueNum": "02",
"idPrefix": "th",
"pubType": "journal",
"volume": "5",
"label": "Second",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvyJ",
"doi": "10.1109/TOH.2011.66",
"abstract": "In this paper, we describe the development of a haptic device to be used in a simulator aiming to train the skills of gastroenterology assistants in abdominal palpation during colonoscopy, as well as to train team interaction skills for the colonoscopy team. To understand the haptic feedback forces to be simulated by the haptic device, we conducted an experiment with five participants of varying BMI. The applied forces and displacements were measured and hysteresis modeling was used to characterize the experimental data. These models were used to determine the haptic feedback forces required to simulate a BMI case in response to the real-time user interactions. The pneumatic haptic device consisted of a sphygmomanometer bladder as the haptic interface and a fuzzy controller to regulate the bladder pressure. The haptic device showed good steady state and dynamic response was adequate for simulating haptic interactions. Tracking accuracy averaged 94.2 percent within 300 ms of the reference input while the user was actively applying abdominal palpation and minor repositioning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we describe the development of a haptic device to be used in a simulator aiming to train the skills of gastroenterology assistants in abdominal palpation during colonoscopy, as well as to train team interaction skills for the colonoscopy team. To understand the haptic feedback forces to be simulated by the haptic device, we conducted an experiment with five participants of varying BMI. The applied forces and displacements were measured and hysteresis modeling was used to characterize the experimental data. These models were used to determine the haptic feedback forces required to simulate a BMI case in response to the real-time user interactions. The pneumatic haptic device consisted of a sphygmomanometer bladder as the haptic interface and a fuzzy controller to regulate the bladder pressure. The haptic device showed good steady state and dynamic response was adequate for simulating haptic interactions. Tracking accuracy averaged 94.2 percent within 300 ms of the reference input while the user was actively applying abdominal palpation and minor repositioning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we describe the development of a haptic device to be used in a simulator aiming to train the skills of gastroenterology assistants in abdominal palpation during colonoscopy, as well as to train team interaction skills for the colonoscopy team. To understand the haptic feedback forces to be simulated by the haptic device, we conducted an experiment with five participants of varying BMI. The applied forces and displacements were measured and hysteresis modeling was used to characterize the experimental data. These models were used to determine the haptic feedback forces required to simulate a BMI case in response to the real-time user interactions. The pneumatic haptic device consisted of a sphygmomanometer bladder as the haptic interface and a fuzzy controller to regulate the bladder pressure. The haptic device showed good steady state and dynamic response was adequate for simulating haptic interactions. Tracking accuracy averaged 94.2 percent within 300 ms of the reference input while the user was actively applying abdominal palpation and minor repositioning.",
"title": "Abdominal Palpation Haptic Device for Colonoscopy Simulation Using Pneumatic Control",
"normalizedTitle": "Abdominal Palpation Haptic Device for Colonoscopy Simulation Using Pneumatic Control",
"fno": "tth2012020097",
"hasPdf": true,
"idPrefix": "th",
"keywords": [
"Medical Simulation",
"Force Feedback",
"System Design And Analysis",
"Haptic Rendering"
],
"authors": [
{
"givenName": "Mario",
"surname": "Cheng",
"fullName": "Mario Cheng",
"affiliation": "University of Queensland, Brisbane, ICTC The Australian e-Health Research Centre, Royal Brisbane, and Women's Hospital, Herston",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Welber",
"surname": "Marinovic",
"fullName": "Welber Marinovic",
"affiliation": "University of Queensland, Brisbane",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marcus",
"surname": "Watson",
"fullName": "Marcus Watson",
"affiliation": "Queensland Health Skills Development Centre, Royal Brisbane and Women's Hospital, Herston",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sébastien",
"surname": "Ourselin",
"fullName": "Sébastien Ourselin",
"affiliation": "University of College London, London",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Josh",
"surname": "Passenger",
"fullName": "Josh Passenger",
"affiliation": "ICTC The Australian e-Health Research Centre, Royal Brisbane and Women's Hospital, Herston",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hans De",
"surname": "Visser",
"fullName": "Hans De Visser",
"affiliation": "ICTC The Australian e-Health Research Centre, Royal Brisbane and Women's Hospital, Herston",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Olivier",
"surname": "Salvado",
"fullName": "Olivier Salvado",
"affiliation": "ICTC The Australian e-Health Research Centre, Royal Brisbane and Women's Hospital, Herston",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stephan",
"surname": "Riek",
"fullName": "Stephan Riek",
"affiliation": "University of Queensland, Brisbane",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2012-04-01 00:00:00",
"pubType": "trans",
"pages": "97-108",
"year": "2012",
"issn": "1939-1412",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2014/4261/0/4261a268",
"title": "Breast Palpation Simulation with Haptic Feedback: Prototype and Initial Results",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2014/4261a268/12OmNBhHt7R",
"parentPublication": {
"id": "proceedings/svr/2014/4261/0",
"title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479998",
"title": "A Haptic Interface with Motor/Brake System for Colonoscopy Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479998/12OmNscxj1I",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643617",
"title": "Breast cancer palpation system using haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643617/12OmNwF0C53",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643585",
"title": "Haptic simulation of breast cancer palpation: A case study of haptic augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643585/12OmNwtn3ui",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2011/1189/0/05999154",
"title": "Ultrasound palpation by haptic elastography",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2011/05999154/12OmNy3Agyv",
"parentPublication": {
"id": "proceedings/cbms/2011/1189/0",
"title": "2011 24th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2009/3858/0/04810882",
"title": "Pneumatic haptic interface fuzzy controller for simulation of abdominal palpations during colonoscopy",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810882/12OmNyFU6YB",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2012/4814/0/4814a207",
"title": "Virtual Palpation for Medical Training in Cyberworlds",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a207/12OmNzFv4gM",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/04/06832631",
"title": "Haptic Tumor Augmentation: Exploring Multi-Point Interaction",
"doi": null,
"abstractUrl": "/journal/th/2014/04/06832631/13rRUx0xPn6",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/04/tth2012040301",
"title": "Design and Evaluation of a Novel Haptic Interface for Endoscopic Simulation",
"doi": null,
"abstractUrl": "/journal/th/2012/04/tth2012040301/13rRUxZzAhN",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040617",
"title": "Haptic Palpation for Medical Simulation in Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040617/13rRUyfKIHI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "tth2012020109",
"articleId": "13rRUxd2aZc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNBqdri4",
"title": "April-June",
"year": "2010",
"issueNum": "02",
"idPrefix": "pc",
"pubType": "magazine",
"volume": "9",
"label": "April-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa53l",
"doi": "10.1109/MPRV.2010.30",
"abstract": "Large display surfaces are more and more becoming a medium for supporting our daily activities: communication and collaboration, entertainment, daily life, working, and learning, A pervasive and human-centric approach to human-display interaction will be an important research trend for the years to come, as demonstrated by the efforts of both industry and academia in developing novel remote interfaces and interaction techniques.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Large display surfaces are more and more becoming a medium for supporting our daily activities: communication and collaboration, entertainment, daily life, working, and learning, A pervasive and human-centric approach to human-display interaction will be an important research trend for the years to come, as demonstrated by the efforts of both industry and academia in developing novel remote interfaces and interaction techniques.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Large display surfaces are more and more becoming a medium for supporting our daily activities: communication and collaboration, entertainment, daily life, working, and learning, A pervasive and human-centric approach to human-display interaction will be an important research trend for the years to come, as demonstrated by the efforts of both industry and academia in developing novel remote interfaces and interaction techniques.",
"title": "Human-Display Interaction Technology: Emerging Remote Interfaces for Pervasive Display Environments",
"normalizedTitle": "Human-Display Interaction Technology: Emerging Remote Interfaces for Pervasive Display Environments",
"fno": "mpc2010020072",
"hasPdf": true,
"idPrefix": "pc",
"keywords": [
"Human Display Interaction",
"Post WIMP Interaction",
"Remote Interfaces"
],
"authors": [
{
"givenName": "Andrea",
"surname": "Bellucci",
"fullName": "Andrea Bellucci",
"affiliation": "Universidad Carlos III de Madrid",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alessio",
"surname": "Malizia",
"fullName": "Alessio Malizia",
"affiliation": "Universidad Carlos III de Madrid",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paloma",
"surname": "Diaz",
"fullName": "Paloma Diaz",
"affiliation": "Universidad Carlos III de Madrid",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ignacio",
"surname": "Aedo",
"fullName": "Ignacio Aedo",
"affiliation": "Universidad Carlos III de Madrid",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2010-04-01 00:00:00",
"pubType": "mags",
"pages": "72-76",
"year": "2010",
"issn": "1536-1268",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/nbis/2011/4458/0/4458a456",
"title": "A New Interface for Large Scale Tiled Display System Considering Scalability",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2011/4458a456/12OmNBU1jQj",
"parentPublication": {
"id": "proceedings/nbis/2011/4458/0",
"title": "2011 14th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142840",
"title": "Exploring 3D Interaction in Alternate Control-Display Space Mappings",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142840/12OmNBubOUh",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2011/4602/0/4602a283",
"title": "Building Virtual Entertainment Environment with Tiled Display Wall and Motion Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2011/4602a283/12OmNwEJ0PR",
"parentPublication": {
"id": "proceedings/icvrv/2011/4602/0",
"title": "2011 International Conference on Virtual Reality and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nbis/2012/4779/0/4779a528",
"title": "A Method for User-Location Estimation for Intuitive Input System on Very Large Display Space",
"doi": null,
"abstractUrl": "/proceedings-article/nbis/2012/4779a528/12OmNwswg2f",
"parentPublication": {
"id": "proceedings/nbis/2012/4779/0",
"title": "2012 15th International Conference on Network-Based Information Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icic/2009/3634/4/3634d399",
"title": "Stereoscopic Display on Post-Process of ANSYS",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2009/3634d399/12OmNxFsmAR",
"parentPublication": {
"id": "proceedings/icic/2009/3634/4",
"title": "2009 Second International Conference on Information and Computing Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2007/0907/0/04142839",
"title": "An Exploration of Interaction-Display Offset in Surround Screen Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2007/04142839/12OmNxeM49A",
"parentPublication": {
"id": "proceedings/3dui/2007/0907/0",
"title": "2007 IEEE Symposium on 3D User Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2008/3109/0/3109a763",
"title": "PDA -- Remote Display Interaction Framework",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2008/3109a763/12OmNyL0TrV",
"parentPublication": {
"id": "proceedings/cisis/2008/3109/0",
"title": "2008 International Conference on Complex, Intelligent and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040626",
"title": "Evaluating Display Fidelity and Interaction Fidelity in a Virtual Reality Game",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040626/13rRUwIF6dN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2011/08/mco2011080046",
"title": "Remote Display Solutions for Mobile Cloud Computing",
"doi": null,
"abstractUrl": "/magazine/co/2011/08/mco2011080046/13rRUy0HYNe",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wslm/2022/0819/0/081900a038",
"title": "Design of Radar Display System Based on Human-computer Interaction Interface",
"doi": null,
"abstractUrl": "/proceedings-article/wslm/2022/081900a038/1KBdRVssvBe",
"parentPublication": {
"id": "proceedings/wslm/2022/0819/0",
"title": "2022 International Conference on Wearables, Sports and Lifestyle Management (WSLM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mpc2010020064",
"articleId": "13rRUzpzey0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mpc2010020078",
"articleId": "13rRUxBJhDe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNvDqsVI",
"title": "April-June",
"year": "2009",
"issueNum": "02",
"idPrefix": "pc",
"pubType": "magazine",
"volume": "8",
"label": "April-June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyhaIm1",
"doi": "10.1109/MPRV.2009.35",
"abstract": "Display registration allows the composition of a camera-equipped smart phone and a public display. By tracking dynamic markers on a public display, researchers can compute the mapping between the pixels on the smart phone's screen and those on the display. Display registration allows interaction through the smart phone image, facilitating both conventional direct-manipulation interaction techniques and the use of the smart phone as a tangible tool. This article describes the theory and practice of display registration with smart phones, findings in initial user studies, and the opportunities for developing markerless display registration schemes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Display registration allows the composition of a camera-equipped smart phone and a public display. By tracking dynamic markers on a public display, researchers can compute the mapping between the pixels on the smart phone's screen and those on the display. Display registration allows interaction through the smart phone image, facilitating both conventional direct-manipulation interaction techniques and the use of the smart phone as a tangible tool. This article describes the theory and practice of display registration with smart phones, findings in initial user studies, and the opportunities for developing markerless display registration schemes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Display registration allows the composition of a camera-equipped smart phone and a public display. By tracking dynamic markers on a public display, researchers can compute the mapping between the pixels on the smart phone's screen and those on the display. Display registration allows interaction through the smart phone image, facilitating both conventional direct-manipulation interaction techniques and the use of the smart phone as a tangible tool. This article describes the theory and practice of display registration with smart phones, findings in initial user studies, and the opportunities for developing markerless display registration schemes.",
"title": "Smart Phone Interaction with Registered Displays",
"normalizedTitle": "Smart Phone Interaction with Registered Displays",
"fno": "mpc2009020014",
"hasPdf": true,
"idPrefix": "pc",
"keywords": [
"Display Registration",
"Smart Phone Interaction"
],
"authors": [
{
"givenName": "Nick",
"surname": "Pears",
"fullName": "Nick Pears",
"affiliation": "University of York",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel G.",
"surname": "Jackson",
"fullName": "Daniel G. Jackson",
"affiliation": "Newcastle University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Patrick",
"surname": "Olivier",
"fullName": "Patrick Olivier",
"affiliation": "Newcastle University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "2009-04-01 00:00:00",
"pubType": "mags",
"pages": "14-21",
"year": "2009",
"issn": "1536-1268",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccis/2011/4501/0/4501a971",
"title": "A Smart Phone Anti-theft Solution Based on Locking Card of Mobile Phone",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2011/4501a971/12OmNC3o50P",
"parentPublication": {
"id": "proceedings/iccis/2011/4501/0",
"title": "2011 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifcsta/2009/3930/3/3930c131",
"title": "Smart Phone for the Perishable Goods Distribution Management",
"doi": null,
"abstractUrl": "/proceedings-article/ifcsta/2009/3930c131/12OmNwJybNb",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc/2010/4272/0/4272a262",
"title": "TouchInteract: An Interaction Technique with Large Displays Using Touchscreen-Phone",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc/2010/4272a262/12OmNxGALdP",
"parentPublication": {
"id": "proceedings/uic-atc/2010/4272/0",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nfc/2009/3577/0/3577a036",
"title": "Exploring Expressive NFC-Based Mobile Phone Interaction with Large Dynamic Displays",
"doi": null,
"abstractUrl": "/proceedings-article/nfc/2009/3577a036/12OmNxXUhTH",
"parentPublication": {
"id": "proceedings/nfc/2009/3577/0",
"title": "Near Field Communication, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc/2009/3737/0/3737a298",
"title": "Toward a NFC Phone-Driven Context Awareness Smart Environment",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc/2009/3737a298/12OmNynJMUM",
"parentPublication": {
"id": "proceedings/uic-atc/2009/3737/0",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc/2010/4272/0/4272a487",
"title": "HouseGenie: Universal Monitor and Controller of Networked Devices on Touchscreen Phone in Smart Home",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc/2010/4272a487/12OmNyqzM3m",
"parentPublication": {
"id": "proceedings/uic-atc/2010/4272/0",
"title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbi/2009/3661/0/3661a106",
"title": "An Empirical Research on Users' Acceptance of Smart Phone Online Application Software",
"doi": null,
"abstractUrl": "/proceedings-article/ecbi/2009/3661a106/12OmNzBOhXi",
"parentPublication": {
"id": "proceedings/ecbi/2009/3661/0",
"title": "Electronic Commerce and Business Intelligence, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmb-gmr/2010/4084/0/4084a264",
"title": "Task Modeling Infrastructure for Analyzing Smart Phone Usage",
"doi": null,
"abstractUrl": "/proceedings-article/icmb-gmr/2010/4084a264/12OmNzd7byz",
"parentPublication": {
"id": "proceedings/icmb-gmr/2010/4084/0",
"title": "Mobile Business / Global Mobility Roundtable, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2006/01/b1070",
"title": "The Smart Phone: A Ubiquitous Input Device",
"doi": null,
"abstractUrl": "/magazine/pc/2006/01/b1070/13rRUIJcWiR",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2005/02/b2035",
"title": "Supporting Social Interaction with Smart Phones",
"doi": null,
"abstractUrl": "/magazine/pc/2005/02/b2035/13rRUxAATdS",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "mpc2009020012",
"articleId": "13rRUx0gesu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "mpc2009020022",
"articleId": "13rRUyft7An",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNyPQ4Dx",
"title": "Dec.",
"year": "2012",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "18",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASupy",
"doi": "10.1109/TVCG.2012.217",
"abstract": "Data selection is a fundamental task in visualization because it serves as a pre-requisite to many follow-up interactions. Efficient spatial selection in 3D point cloud datasets consisting of thousands or millions of particles can be particularly challenging. We present two new techniques, TeddySelection and CloudLasso, that support the selection of subsets in large particle 3D datasets in an interactive and visually intuitive manner. Specifically, we describe how to spatially select a subset of a 3D particle cloud by simply encircling the target particles on screen using either the mouse or direct-touch input. Based on the drawn lasso, our techniques automatically determine a bounding selection surface around the encircled particles based on their density. This kind of selection technique can be applied to particle datasets in several application domains. TeddySelection and CloudLasso reduce, and in some cases even eliminate, the need for complex multi-step selection processes involving Boolean operations. This was confirmed in a formal, controlled user study in which we compared the more flexible CloudLasso technique to the standard cylinder-based selection technique. This study showed that the former is consistently more efficient than the latter - in several cases the CloudLasso selection time was half that of the corresponding cylinder-based selection.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Data selection is a fundamental task in visualization because it serves as a pre-requisite to many follow-up interactions. Efficient spatial selection in 3D point cloud datasets consisting of thousands or millions of particles can be particularly challenging. We present two new techniques, TeddySelection and CloudLasso, that support the selection of subsets in large particle 3D datasets in an interactive and visually intuitive manner. Specifically, we describe how to spatially select a subset of a 3D particle cloud by simply encircling the target particles on screen using either the mouse or direct-touch input. Based on the drawn lasso, our techniques automatically determine a bounding selection surface around the encircled particles based on their density. This kind of selection technique can be applied to particle datasets in several application domains. TeddySelection and CloudLasso reduce, and in some cases even eliminate, the need for complex multi-step selection processes involving Boolean operations. This was confirmed in a formal, controlled user study in which we compared the more flexible CloudLasso technique to the standard cylinder-based selection technique. This study showed that the former is consistently more efficient than the latter - in several cases the CloudLasso selection time was half that of the corresponding cylinder-based selection.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Data selection is a fundamental task in visualization because it serves as a pre-requisite to many follow-up interactions. Efficient spatial selection in 3D point cloud datasets consisting of thousands or millions of particles can be particularly challenging. We present two new techniques, TeddySelection and CloudLasso, that support the selection of subsets in large particle 3D datasets in an interactive and visually intuitive manner. Specifically, we describe how to spatially select a subset of a 3D particle cloud by simply encircling the target particles on screen using either the mouse or direct-touch input. Based on the drawn lasso, our techniques automatically determine a bounding selection surface around the encircled particles based on their density. This kind of selection technique can be applied to particle datasets in several application domains. TeddySelection and CloudLasso reduce, and in some cases even eliminate, the need for complex multi-step selection processes involving Boolean operations. This was confirmed in a formal, controlled user study in which we compared the more flexible CloudLasso technique to the standard cylinder-based selection technique. This study showed that the former is consistently more efficient than the latter - in several cases the CloudLasso selection time was half that of the corresponding cylinder-based selection.",
"title": "Efficient Structure-Aware Selection Techniques for 3D Point Cloud Visualizations with 2DOF Input",
"normalizedTitle": "Efficient Structure-Aware Selection Techniques for 3D Point Cloud Visualizations with 2DOF Input",
"fno": "ttg2012122245",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualisation",
"Boolean Algebra",
"Standard Cylinder Based Selection",
"Efficient Structure Aware Selection",
"3 D Point Cloud Visualizations",
"2 DOF Input",
"Data Selection",
"Spatial Selection",
"3 D Point Cloud Datasets",
"Teddy Selection",
"3 D Particle Cloud",
"Direct Touch Input",
"Bounding Selection Surface",
"Complex Multistep Selection",
"Boolean Operations",
"Flexible Cloud Lasso Technique",
"Shape Analysis",
"Three Dimensional Displays",
"Estimation",
"Data Visualization",
"Direct Touch Interaction",
"3 D Interaction",
"Spatial Selection"
],
"authors": [
{
"givenName": null,
"surname": "Lingyun Yu",
"fullName": "Lingyun Yu",
"affiliation": "Univ. of Groningen, Groningen, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "K.",
"surname": "Efstathiou",
"fullName": "K. Efstathiou",
"affiliation": "Univ. of Groningen, Groningen, Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "P.",
"surname": "Isenberg",
"fullName": "P. Isenberg",
"affiliation": "INRIA, Sophia Antipolis, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Isenberg",
"fullName": "T. Isenberg",
"affiliation": "DIGITEO/INRIA, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2012-12-01 00:00:00",
"pubType": "trans",
"pages": "2245-2254",
"year": "2012",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ncm/2009/3769/0/3769b607",
"title": "Design of the 3D Input Method Based on Touch Device for Mobile",
"doi": null,
"abstractUrl": "/proceedings-article/ncm/2009/3769b607/12OmNAS9zR1",
"parentPublication": {
"id": "proceedings/ncm/2009/3769/0",
"title": "Networked Computing and Advanced Information Management, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444783",
"title": "GPU implementation of 3D object selection by conic volume techniques in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444783/12OmNBp52w1",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2009/3733/0/3733a607",
"title": "Interaction Techniques Using a Spherical Cursor for 3D Targets Acquisition and Indicating in Volumetric Displays",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2009/3733a607/12OmNCmpcLL",
"parentPublication": {
"id": "proceedings/iv/2009/3733/0",
"title": "2009 13th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2009/3736/5/3736e063",
"title": "Peripheral Spatial Cues and Spatial Stroop Effect Can Modulate Each Other: Analyzing the Relationship between Input Selection and Dimensional Selection",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2009/3736e063/12OmNwswg39",
"parentPublication": {
"id": "proceedings/icnc/2009/3736/5",
"title": "2009 Fifth International Conference on Natural Computation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2010/6846/0/05444712",
"title": "Extending the virtual trackball metaphor to rear touch input",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444712/12OmNzFMFmw",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icig/2009/3883/0/3883a390",
"title": "Tracking 3d Pose of Rigid Object by Sparse Template Matching",
"doi": null,
"abstractUrl": "/proceedings-article/icig/2009/3883a390/12OmNzahbV0",
"parentPublication": {
"id": "proceedings/icig/2009/3883/0",
"title": "Image and Graphics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2006/02/mcg2006020015",
"title": "On 3D Input Devices",
"doi": null,
"abstractUrl": "/magazine/cg/2006/02/mcg2006020015/13rRUytF43D",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg2010061613",
"title": "FI3D: Direct-Touch Interaction for the Exploration of 3D Scientific Visualization Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg2010061613/13rRUyv53Fk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2018/5488/0/08621224",
"title": "Automated Particle Picking in Cryo-Electron Micrographs using Deep Regression",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2018/08621224/17D45X0yjS5",
"parentPublication": {
"id": "proceedings/bibm/2018/5488/0",
"title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2012122236",
"articleId": "13rRUEgarnI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2012122255",
"articleId": "13rRUygT7fb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgAh",
"name": "ttg2012122245s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012122245s1.zip",
"extension": "zip",
"size": "22 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgAg",
"name": "ttg2012122245s1.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2012122245s1.avi",
"extension": "avi",
"size": "22.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
}
|
{
"issue": {
"id": "12OmNxb5hpv",
"title": "July",
"year": "2007",
"issueNum": "07",
"idPrefix": "co",
"pubType": "magazine",
"volume": "40",
"label": "July",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUypGGeq",
"doi": "10.1109/MC.2007.225",
"abstract": "Developed largely for the clothing industry, 3D body-surface scanners are transforming our ability to accurately measure and visualize a person's body size, shape, and skin-surface area. Advancements in 3D whole-body scanning seem to offer even greater potential for healthcare applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Developed largely for the clothing industry, 3D body-surface scanners are transforming our ability to accurately measure and visualize a person's body size, shape, and skin-surface area. Advancements in 3D whole-body scanning seem to offer even greater potential for healthcare applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Developed largely for the clothing industry, 3D body-surface scanners are transforming our ability to accurately measure and visualize a person's body size, shape, and skin-surface area. Advancements in 3D whole-body scanning seem to offer even greater potential for healthcare applications.",
"title": "3D Body Scanning and Healthcare Applications",
"normalizedTitle": "3D Body Scanning and Healthcare Applications",
"fno": "r7028",
"hasPdf": true,
"idPrefix": "co",
"keywords": [
"3 D Body Scanning",
"Healthcare Technology",
"Scanning Technologies",
"Size UK"
],
"authors": [
{
"givenName": "Philip",
"surname": "Treleaven",
"fullName": "Philip Treleaven",
"affiliation": "University College London",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jonathan",
"surname": "Wells",
"fullName": "Jonathan Wells",
"affiliation": "University College London",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "07",
"pubDate": "2007-07-01 00:00:00",
"pubType": "mags",
"pages": "28-34",
"year": "2007",
"issn": "0018-9162",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icic/2009/3634/4/3634d038",
"title": "The Vacant Distance Ease Relation between Body and Garment",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2009/3634d038/12OmNAYGlsY",
"parentPublication": {
"id": "proceedings/icic/2009/3634/4",
"title": "2009 Second International Conference on Information and Computing Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1997/7943/0/79430262",
"title": "Reducing Movement Artifacts in Whole Body Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1997/79430262/12OmNAle6As",
"parentPublication": {
"id": "proceedings/3dim/1997/7943/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a432",
"title": "Accurate Full Body Scanning from a Single Fixed 3D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a432/12OmNBOCWs8",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dim/1997/7943/0/79430266",
"title": "Optimal Postures and Positioning for Human Body Scanning",
"doi": null,
"abstractUrl": "/proceedings-article/3dim/1997/79430266/12OmNBdJ5hx",
"parentPublication": {
"id": "proceedings/3dim/1997/7943/0",
"title": "3D Digital Imaging and Modeling, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iitaw/2009/3860/0/3860a356",
"title": "An Automatic System to Reconstruct and Repair Telmat Scanning Model",
"doi": null,
"abstractUrl": "/proceedings-article/iitaw/2009/3860a356/12OmNrIaeh1",
"parentPublication": {
"id": "proceedings/iitaw/2009/3860/0",
"title": "2009 Third International Symposium on Intelligent Information Technology Application Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2008/3381/0/3381a335",
"title": "Automatic Surface Scanning of 3D Artifacts",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2008/3381a335/12OmNvRU0nM",
"parentPublication": {
"id": "proceedings/cw/2008/3381/0",
"title": "2008 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2001/1272/1/127210447",
"title": "Covariance Scaled Sampling for Monocular 3D Body Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2001/127210447/12OmNxdVgTf",
"parentPublication": {
"id": "proceedings/cvpr/2001/1272/1",
"title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2012/4608/0/4608a657",
"title": "Study on Segmentation of 3D Human Body Based on Point Cloud Data",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2012/4608a657/12OmNzmclFP",
"parentPublication": {
"id": "proceedings/isdea/2012/4608/0",
"title": "2012 Second International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040643",
"title": "Scanning 3D Full Human Bodies Using Kinects",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040643/13rRUwjGoFW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccmso/2022/3288/0/328800a352",
"title": "Advanced 3D body scanning techniques and its clinical applications",
"doi": null,
"abstractUrl": "/proceedings-article/iccmso/2022/328800a352/1Mq12vt23S0",
"parentPublication": {
"id": "proceedings/iccmso/2022/3288/0",
"title": "2022 International Conference on Computational Modelling, Simulation and Optimization (ICCMSO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "r7020",
"articleId": "13rRUy08Mzt",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "r7036",
"articleId": "13rRUwwJWIr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.