data dict |
|---|
{
"proceeding": {
"id": "1wHKgXUUvh6",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"acronym": "ieit",
"groupId": "1842327",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1wHKoq6Hrwc",
"doi": "10.1109/IEIT53597.2021.00143",
"title": "Study of Intercultural Communication Training in Interpreting Teaching Based on Multimedia Technology",
"normalizedTitle": "Study of Intercultural Communication Training in Interpreting Teaching Based on Multimedia Technology",
"abstract": "With the vigorous development of social economy and the iterative update of electronic terminals, the multimedia technology is becoming more and more mature, which has played increasingly important role in human's life and learning. In modern interpreting teaching with intercultural communication training, trainers have expanded the teaching methods by organically combining multimedia technology with curriculum theory effectively. Based on analysis of intercultural characteristics of interpretation in the combination with multimedia technology, this study proposes three training patterns of intercultural communication in process of interpreting teaching, by means of discussing the influential factors of multimedia technology in intercultural communication training on current interpreting teaching.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the vigorous development of social economy and the iterative update of electronic terminals, the multimedia technology is becoming more and more mature, which has played increasingly important role in human's life and learning. In modern interpreting teaching with intercultural communication training, trainers have expanded the teaching methods by organically combining multimedia technology with curriculum theory effectively. Based on analysis of intercultural characteristics of interpretation in the combination with multimedia technology, this study proposes three training patterns of intercultural communication in process of interpreting teaching, by means of discussing the influential factors of multimedia technology in intercultural communication training on current interpreting teaching.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the vigorous development of social economy and the iterative update of electronic terminals, the multimedia technology is becoming more and more mature, which has played increasingly important role in human's life and learning. In modern interpreting teaching with intercultural communication training, trainers have expanded the teaching methods by organically combining multimedia technology with curriculum theory effectively. Based on analysis of intercultural characteristics of interpretation in the combination with multimedia technology, this study proposes three training patterns of intercultural communication in process of interpreting teaching, by means of discussing the influential factors of multimedia technology in intercultural communication training on current interpreting teaching.",
"fno": "256300a613",
"keywords": [
"Computer Based Training",
"Multimedia Computing",
"Teaching",
"Intercultural Communication Training",
"Teaching Methods",
"Multimedia Technology",
"Training Patterns",
"Interpreting Teaching",
"Training",
"Cross Cultural Communication",
"Internet",
"Multimedia Communication",
"Cultural Differences",
"Information Technology",
"Pragmatics",
"Interpreting Teaching",
"Intercultural Communication Training",
"Cultural Differences"
],
"authors": [
{
"affiliation": "School of Foreign Languages, Guangdong Pharmaceutical University,Guangzhou,510006",
"fullName": "Ning Li",
"givenName": "Ning",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ieit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-04-01T00:00:00",
"pubType": "proceedings",
"pages": "613-616",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2563-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "256300a607",
"articleId": "1wHKoTTrHLW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "256300a617",
"articleId": "1wHKwOTwlz2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2011/4523/2/4523b121",
"title": "Intercultural Communication Competence Training through Network Platform and Field Training",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523b121/12OmNB9t6xs",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/2",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670c085",
"title": "Behavioral Manifestations of Intercultural Competence in Computer-Mediated Intercultural Learning",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670c085/12OmNBh8gXp",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2015/8302/0/8302a593",
"title": "Study of Application of Multimedia Technology in English Language Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2015/8302a593/12OmNvF83px",
"parentPublication": {
"id": "proceedings/itme/2015/8302/0",
"title": "2015 7th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccis/2013/5004/0/5004b819",
"title": "Computer-Aided Multimedia Oral English Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/iccis/2013/5004b819/12OmNwJgAIF",
"parentPublication": {
"id": "proceedings/iccis/2013/5004/0",
"title": "2013 International Conference on Computational and Information Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659209",
"title": "Facilitating Intercultural Development: Preparing Future Engineers for Multidisciplinary Teams and Multicultural Environments",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659209/18j98GoGIog",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a227",
"title": "Effects of Technology-Supported Cross-cultural Communications on Learners’ Culture and Communication Competences",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a227/1FUUl1Sv99C",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiee/2020/5668/0/566800a173",
"title": "Intercultural Communication-oriented Teaching Mode of English Based on “Internet +”",
"doi": null,
"abstractUrl": "/proceedings-article/isaiee/2020/566800a173/1sQKjeZ8hBS",
"parentPublication": {
"id": "proceedings/isaiee/2020/5668/0",
"title": "2020 International Symposium on Advances in Informatics, Electronics and Education (ISAIEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmeim/2020/9623/0/962300a150",
"title": "A Study on the Practical Teaching of Intercultural Communication Competence Based on the International Development of Higher Vocational College",
"doi": null,
"abstractUrl": "/proceedings-article/icmeim/2020/962300a150/1syveFmiYdW",
"parentPublication": {
"id": "proceedings/icmeim/2020/9623/0",
"title": "2020 International Conference on Modern Education and Information Management (ICMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a009",
"title": "A study on the cultivation model of intercultural communicative competence in foreign language teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a009/1tnYl4u5nWw",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a602",
"title": "Relying on multi-modal contextual cross-cultural communication ability training big data analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a602/1wRIzxMnOX6",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyGbI52",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyo1o6e",
"doi": "10.1109/ICCPHOT.2018.8368472",
"title": "Rolling shutter imaging on the electric grid",
"normalizedTitle": "Rolling shutter imaging on the electric grid",
"abstract": "Flicker of AC-powered lights is useful for probing the electric grid and unmixing reflected contributions of different sources. Flicker has been sensed in great detail with a specially-designed camera tethered to an AC outlet. We argue that even an untethered smartphone can achieve the same task. We exploit the inter-row exposure delay of the ubiquitous rolling-shutter sensor. When pixel exposure time is kept short, this delay creates a spatiotemporal wave pattern that encodes (1) the precise capture time relative to the AC, (2) the response function of individual bulbs, and (3) the AC phase that powers them. To sense point sources, we induce the spatiotemporal wave pattern by placing a star filter or a paper diffuser in front of the camera's lens. We demonstrate several new capabilities, including: high-rate acquisition of bulb response functions from one smartphone photo; recognition of bulb type and phase from one or two images; and rendering of live flicker video, as if it came from a high speed global-shutter camera.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Flicker of AC-powered lights is useful for probing the electric grid and unmixing reflected contributions of different sources. Flicker has been sensed in great detail with a specially-designed camera tethered to an AC outlet. We argue that even an untethered smartphone can achieve the same task. We exploit the inter-row exposure delay of the ubiquitous rolling-shutter sensor. When pixel exposure time is kept short, this delay creates a spatiotemporal wave pattern that encodes (1) the precise capture time relative to the AC, (2) the response function of individual bulbs, and (3) the AC phase that powers them. To sense point sources, we induce the spatiotemporal wave pattern by placing a star filter or a paper diffuser in front of the camera's lens. We demonstrate several new capabilities, including: high-rate acquisition of bulb response functions from one smartphone photo; recognition of bulb type and phase from one or two images; and rendering of live flicker video, as if it came from a high speed global-shutter camera.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Flicker of AC-powered lights is useful for probing the electric grid and unmixing reflected contributions of different sources. Flicker has been sensed in great detail with a specially-designed camera tethered to an AC outlet. We argue that even an untethered smartphone can achieve the same task. We exploit the inter-row exposure delay of the ubiquitous rolling-shutter sensor. When pixel exposure time is kept short, this delay creates a spatiotemporal wave pattern that encodes (1) the precise capture time relative to the AC, (2) the response function of individual bulbs, and (3) the AC phase that powers them. To sense point sources, we induce the spatiotemporal wave pattern by placing a star filter or a paper diffuser in front of the camera's lens. We demonstrate several new capabilities, including: high-rate acquisition of bulb response functions from one smartphone photo; recognition of bulb type and phase from one or two images; and rendering of live flicker video, as if it came from a high speed global-shutter camera.",
"fno": "08368472",
"keywords": [
"Cameras",
"Delays",
"Optical Surface Waves",
"Spatiotemporal Phenomena",
"Light Sources",
"Lighting"
],
"authors": [
{
"affiliation": "Viterbi Faculty of Electrical Engineering, Technion - Israel Institute of Technology",
"fullName": "Mark Sheinin",
"givenName": "Mark",
"surname": "Sheinin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Viterbi Faculty of Electrical Engineering, Technion - Israel Institute of Technology",
"fullName": "Yoav Y. Schechner",
"givenName": "Yoav Y.",
"surname": "Schechner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Computer Science, University of Toronto",
"fullName": "Kiriakos N. Kutulakos",
"givenName": "Kiriakos N.",
"surname": "Kutulakos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-12",
"year": "2018",
"issn": "2472-7636",
"isbn": "978-1-5386-2526-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08368471",
"articleId": "12OmNBhZ4pc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08368473",
"articleId": "12OmNzBwGEy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457c363",
"title": "Computational Imaging on the Electric Grid",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c363/12OmNAoUTip",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2015/5785/0/5785a207",
"title": "An Adaptive and Compressive Data Gathering Scheme in Vehicular Sensor Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2015/5785a207/12OmNB1NVNo",
"parentPublication": {
"id": "proceedings/icpads/2015/5785/0",
"title": "2015 IEEE 21st International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2009/4420/0/05459166",
"title": "Stereo from flickering caustics",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2009/05459166/12OmNwswg2Z",
"parentPublication": {
"id": "proceedings/iccv/2009/4420/0",
"title": "2009 IEEE 12th International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402539",
"title": "VRCodes: Unobtrusive and active visual codes for interaction by exploiting rolling shutter",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402539/12OmNzkuKBF",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/11/07523411",
"title": "Towards Kilo-Hertz 6-DoF Visual Tracking Using an Egocentric Cluster of Rolling Shutter Cameras",
"doi": null,
"abstractUrl": "/journal/tg/2016/11/07523411/13rRUwjXZSi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/12/08658151",
"title": "Computational Imaging on the Electric Grid",
"doi": null,
"abstractUrl": "/journal/tp/2022/12/08658151/187Z9n3e5KE",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/oj/2022/01/09861714",
"title": "PowerFDNet: Deep Learning-Based Stealthy False Data Injection Attack Detection for AC-Model Transmission Systems",
"doi": null,
"abstractUrl": "/journal/oj/2022/01/09861714/1FWhSyQ7mAE",
"parentPublication": {
"id": "trans/oj",
"title": "IEEE Open Journal of the Computer Society",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873853",
"title": "Foveated Stochastic Lightcuts",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873853/1GjwMIuxYUE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2019/3263/0/08747341",
"title": "Video from Stills: Lensless Imaging with Rolling Shutter",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2019/08747341/1bcJwMUIgh2",
"parentPublication": {
"id": "proceedings/iccp/2019/3263/0",
"title": "2019 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAR1b0Z",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAlvHSi",
"doi": "10.1109/CVPRW.2017.134",
"title": "EgoTracker: Pedestrian Tracking with Re-identification in Egocentric Videos",
"normalizedTitle": "EgoTracker: Pedestrian Tracking with Re-identification in Egocentric Videos",
"abstract": "We propose and analyze a novel framework for tracking a pedestrian in egocentric videos, which is needed for analyzing social gatherings recorded with a wearable camera. The constant camera and pedestrian movement makes this a challenging problem. The main challenges are natural head movement of wearer and target loss and reappearance in a later frame, due to frequent changes in field of view. By using the optical flow information specific to egocentric videos and also by modifying the learning process and sampling region of trackers which tracks by learning an SVM online, we show that re-identification is possible. The specific trackers chosen are STRUCK and MEEM.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose and analyze a novel framework for tracking a pedestrian in egocentric videos, which is needed for analyzing social gatherings recorded with a wearable camera. The constant camera and pedestrian movement makes this a challenging problem. The main challenges are natural head movement of wearer and target loss and reappearance in a later frame, due to frequent changes in field of view. By using the optical flow information specific to egocentric videos and also by modifying the learning process and sampling region of trackers which tracks by learning an SVM online, we show that re-identification is possible. The specific trackers chosen are STRUCK and MEEM.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose and analyze a novel framework for tracking a pedestrian in egocentric videos, which is needed for analyzing social gatherings recorded with a wearable camera. The constant camera and pedestrian movement makes this a challenging problem. The main challenges are natural head movement of wearer and target loss and reappearance in a later frame, due to frequent changes in field of view. By using the optical flow information specific to egocentric videos and also by modifying the learning process and sampling region of trackers which tracks by learning an SVM online, we show that re-identification is possible. The specific trackers chosen are STRUCK and MEEM.",
"fno": "0733a980",
"keywords": [
"Cameras",
"Support Vector Machines",
"Videos",
"Target Tracking",
"Optical Imaging",
"Optical Devices"
],
"authors": [
{
"affiliation": null,
"fullName": "Jyoti Nigam",
"givenName": "Jyoti",
"surname": "Nigam",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Renu M. Rameshan",
"givenName": "Renu M.",
"surname": "Rameshan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "980-987",
"year": "2017",
"issn": "2160-7516",
"isbn": "978-1-5386-0733-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0733a969",
"articleId": "12OmNyRg4xs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0733a988",
"articleId": "12OmNyz5JTq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2016/8851/0/8851c629",
"title": "Recognizing Micro-Actions and Reactions from Paired Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851c629/12OmNAS9zSy",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2014/5118/0/5118c537",
"title": "Temporal Segmentation of Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2014/5118c537/12OmNs59JN1",
"parentPublication": {
"id": "proceedings/cvpr/2014/5118/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391e525",
"title": "Storyline Representation of Egocentric Videos with an Applications to Story-Based Search",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391e525/12OmNx8wTlB",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477707",
"title": "Discovering picturesque highlights from egocentric vacation videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477707/12OmNyL0Tuu",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477708",
"title": "Compact CNN for indexing egocentric videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477708/12OmNzE54Gp",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/06/08353133",
"title": "Egocentric Meets Top-View",
"doi": null,
"abstractUrl": "/journal/tp/2019/06/08353133/13rRUxjQyiy",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a031",
"title": "EGO-SLAM: A Robust Monocular SLAM for Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a031/18j8QSyEfja",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c300",
"title": "Anonymizing Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c300/1BmHm8x3ZFC",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2022/9109/0/910900a389",
"title": "Pedestrian Detection and Target Tracking Based on Person Re-identification in Crowded Crowd",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2022/910900a389/1HYvan9Eqha",
"parentPublication": {
"id": "proceedings/iccnea/2022/9109/0",
"title": "2022 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/06/09562265",
"title": "Generating Personalized Summaries of Day Long Egocentric Videos",
"doi": null,
"abstractUrl": "/journal/tp/2023/06/09562265/1xtOoRYTrG0",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzQhP7Z",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"acronym": "isot",
"groupId": "1002942",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBaBuPP",
"doi": "10.1109/ISOT.2014.40",
"title": "CubeSat Deformable Mirror Demonstration Mission",
"normalizedTitle": "CubeSat Deformable Mirror Demonstration Mission",
"abstract": "Coronagraphic space telescopes require wave front control systems for high-contrast imaging applications such as exoplanet direct imaging. High-actuator-count MEMS deformable mirrors (DM) are a key element of these wave front control systems, yet they have not been flown in space long enough to characterize their on-orbit performance. The MEMS Deformable Mirror Cube Sat Test bed is a conceptual nanosatellite demonstration of MEMS DM and wave front sensing technology. The test bed platform is a 3U Cube Sat bus. Of the 10 × 10 × 34.05 cm (3U) available volume, a 10 × 10 × 15 cm space is reserved for the optical payload. The main purpose of the payload is to characterize and calibrate the on-orbit performance of a MEMS deformable mirror over an extended period of time (months). Its design incorporates both a Shack Hartmann wave front sensor (internal laser illumination), and a focal plane sensor (used with an external aperture to image bright stars). We baseline a 32-actuator Boston Micromachines Mini deformable mirror for this mission, though the design is flexible and can be applied to mirrors from other vendors. We present the mission design and payload architecture and discuss the intended performance of the optical experiments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Coronagraphic space telescopes require wave front control systems for high-contrast imaging applications such as exoplanet direct imaging. High-actuator-count MEMS deformable mirrors (DM) are a key element of these wave front control systems, yet they have not been flown in space long enough to characterize their on-orbit performance. The MEMS Deformable Mirror Cube Sat Test bed is a conceptual nanosatellite demonstration of MEMS DM and wave front sensing technology. The test bed platform is a 3U Cube Sat bus. Of the 10 × 10 × 34.05 cm (3U) available volume, a 10 × 10 × 15 cm space is reserved for the optical payload. The main purpose of the payload is to characterize and calibrate the on-orbit performance of a MEMS deformable mirror over an extended period of time (months). Its design incorporates both a Shack Hartmann wave front sensor (internal laser illumination), and a focal plane sensor (used with an external aperture to image bright stars). We baseline a 32-actuator Boston Micromachines Mini deformable mirror for this mission, though the design is flexible and can be applied to mirrors from other vendors. We present the mission design and payload architecture and discuss the intended performance of the optical experiments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Coronagraphic space telescopes require wave front control systems for high-contrast imaging applications such as exoplanet direct imaging. High-actuator-count MEMS deformable mirrors (DM) are a key element of these wave front control systems, yet they have not been flown in space long enough to characterize their on-orbit performance. The MEMS Deformable Mirror Cube Sat Test bed is a conceptual nanosatellite demonstration of MEMS DM and wave front sensing technology. The test bed platform is a 3U Cube Sat bus. Of the 10 × 10 × 34.05 cm (3U) available volume, a 10 × 10 × 15 cm space is reserved for the optical payload. The main purpose of the payload is to characterize and calibrate the on-orbit performance of a MEMS deformable mirror over an extended period of time (months). Its design incorporates both a Shack Hartmann wave front sensor (internal laser illumination), and a focal plane sensor (used with an external aperture to image bright stars). We baseline a 32-actuator Boston Micromachines Mini deformable mirror for this mission, though the design is flexible and can be applied to mirrors from other vendors. We present the mission design and payload architecture and discuss the intended performance of the optical experiments.",
"fno": "07119404",
"keywords": [
"Mirrors",
"Adaptive Optics",
"Optical Imaging",
"Detectors",
"Payloads",
"Optical Sensors",
"Technology Advancement",
"Wavefront Sensing",
"MEMS Deformable Mirrors",
"Cube Sat"
],
"authors": [
{
"affiliation": null,
"fullName": "Anne Marinan",
"givenName": "Anne",
"surname": "Marinan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kerri Cahoy",
"givenName": "Kerri",
"surname": "Cahoy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isot",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-11-01T00:00:00",
"pubType": "proceedings",
"pages": "134-138",
"year": "2014",
"issn": null,
"isbn": "978-1-4673-6752-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07119403",
"articleId": "12OmNwe2IvH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07119405",
"articleId": "12OmNs0TKLP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2009/5949/0/05407571",
"title": "Reconstruction of microwave tomography data obtained using deformable mirrors",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2009/05407571/12OmNAXxXhG",
"parentPublication": {
"id": "proceedings/isspit/2009/5949/0",
"title": "2009 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119422",
"title": "Interferometric Focusing of Excitation Light onto a Guide-Star",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119422/12OmNBpEeTD",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iqec/2005/9240/0/01561075",
"title": "Demonstration of controlled-NOT gate using linear optics",
"doi": null,
"abstractUrl": "/proceedings-article/iqec/2005/01561075/12OmNCfSqU3",
"parentPublication": {
"id": "proceedings/iqec/2005/9240/0",
"title": "International Quantum Electronics Conference, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2006/0310/0/04120318",
"title": "Automation of Challenging Spatial-Temporal Biomedical Observations with the Adaptive Scanning Optical Microscope (ASOM)",
"doi": null,
"abstractUrl": "/proceedings-article/case/2006/04120318/12OmNwvDQwk",
"parentPublication": {
"id": "proceedings/case/2006/0310/0",
"title": "2006 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682377",
"title": "Simulation and Analysis of Turbulent Optical Wavefront Based on Zernike Polynomials",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682377/12OmNynJMJd",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2014/4173/0/4173a371",
"title": "Optical Ray Tracing Based on Dijkstra Algorithm in Inhomogeneous Medium",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2014/4173a371/12OmNzXFozK",
"parentPublication": {
"id": "proceedings/bwcca/2014/4173/0",
"title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07829412",
"title": "Wide Field Of View Varifocal Near-Eye Display Using See-Through Deformable Membrane Mirrors",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829412/13rRUwcS1D1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/02/i0356",
"title": "Design Analysis of a High-Resolution Panoramic Camera Using Conventional Imagers and a Mirror Pyramid",
"doi": null,
"abstractUrl": "/journal/tp/2007/02/i0356/13rRUyfKIEg",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2021/3864/0/09428126",
"title": "SDAN: Squared Deformable Alignment Network for Learning Misaligned Optical Zoom",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2021/09428126/1uilR0tgfzW",
"parentPublication": {
"id": "proceedings/icme/2021/3864/0",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09501506",
"title": "Multiple Video Frame Interpolation via Enhanced Deformable Separable Convolution",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09501506/1vDhRCLfqSc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNs0kyru",
"title": "2007 IEEE Virtual Reality Conference",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwwuE0T",
"doi": "10.1109/VR.2007.352462",
"title": "Laparoscopic Virtual Mirror New Interaction Paradigm for Monitor Based Augmented Reality",
"normalizedTitle": "Laparoscopic Virtual Mirror New Interaction Paradigm for Monitor Based Augmented Reality",
"abstract": "A major roadblock for using augmented reality in many medical and industrial applications is the fact that the user cannot take full advantage of the 3D virtual data. This usually requires the user to move the virtual object, which disturbs the real/virtual alignment, or to move his head around the real objects, which is not always possible and/or practical. This problem becomes more dramatic when a single camera is used for monitor based augmentation, such as in augmented laparoscopic surgery. In this paper we introduce an interaction and 3D visualization paradigm, which presents a new solution to this old problem. The interaction paradigm uses an interactive virtual mirror positioned into the augmented scene, which allows easy and complete interactive visualization of 3D virtual data. This paper focuses on the exemplary application of such visualization techniques to laparoscopic interventions. A large number of such interventions aims at regions inside a specific organ, e.g. blood vessels to be clipped for tumor resection. We use high-resolution intra-operative imaging data generated by a mobile C-arm with cone-beam CT imaging capability. Both the C-arm and the laparoscope are optically tracked and registered in a common world coordinate frame. After patient positioning, port placement, and carbon dioxide insufflation, a C-arm volume is reconstructed during patient exhalation and superimposed in real time on the laparoscopic live video without any need for an additional patient registration procedure. To overcome the missing perception of 3D depth and shape when rendering virtual volume data directly on top of the organ's surface view, we introduce the concept of a laparoscopic virtual mirror: A virtual reflection plane within the live laparoscopic video, which is able to visualize a reflected side view of the organ and its interior. This enables the surgeon to observe the 3D structure of, for example, blood vessels by moving the virtual mirror within the augmented monocular view of the laparoscope.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A major roadblock for using augmented reality in many medical and industrial applications is the fact that the user cannot take full advantage of the 3D virtual data. This usually requires the user to move the virtual object, which disturbs the real/virtual alignment, or to move his head around the real objects, which is not always possible and/or practical. This problem becomes more dramatic when a single camera is used for monitor based augmentation, such as in augmented laparoscopic surgery. In this paper we introduce an interaction and 3D visualization paradigm, which presents a new solution to this old problem. The interaction paradigm uses an interactive virtual mirror positioned into the augmented scene, which allows easy and complete interactive visualization of 3D virtual data. This paper focuses on the exemplary application of such visualization techniques to laparoscopic interventions. A large number of such interventions aims at regions inside a specific organ, e.g. blood vessels to be clipped for tumor resection. We use high-resolution intra-operative imaging data generated by a mobile C-arm with cone-beam CT imaging capability. Both the C-arm and the laparoscope are optically tracked and registered in a common world coordinate frame. After patient positioning, port placement, and carbon dioxide insufflation, a C-arm volume is reconstructed during patient exhalation and superimposed in real time on the laparoscopic live video without any need for an additional patient registration procedure. To overcome the missing perception of 3D depth and shape when rendering virtual volume data directly on top of the organ's surface view, we introduce the concept of a laparoscopic virtual mirror: A virtual reflection plane within the live laparoscopic video, which is able to visualize a reflected side view of the organ and its interior. This enables the surgeon to observe the 3D structure of, for example, blood vessels by moving the virtual mirror within the augmented monocular view of the laparoscope.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A major roadblock for using augmented reality in many medical and industrial applications is the fact that the user cannot take full advantage of the 3D virtual data. This usually requires the user to move the virtual object, which disturbs the real/virtual alignment, or to move his head around the real objects, which is not always possible and/or practical. This problem becomes more dramatic when a single camera is used for monitor based augmentation, such as in augmented laparoscopic surgery. In this paper we introduce an interaction and 3D visualization paradigm, which presents a new solution to this old problem. The interaction paradigm uses an interactive virtual mirror positioned into the augmented scene, which allows easy and complete interactive visualization of 3D virtual data. This paper focuses on the exemplary application of such visualization techniques to laparoscopic interventions. A large number of such interventions aims at regions inside a specific organ, e.g. blood vessels to be clipped for tumor resection. We use high-resolution intra-operative imaging data generated by a mobile C-arm with cone-beam CT imaging capability. Both the C-arm and the laparoscope are optically tracked and registered in a common world coordinate frame. After patient positioning, port placement, and carbon dioxide insufflation, a C-arm volume is reconstructed during patient exhalation and superimposed in real time on the laparoscopic live video without any need for an additional patient registration procedure. To overcome the missing perception of 3D depth and shape when rendering virtual volume data directly on top of the organ's surface view, we introduce the concept of a laparoscopic virtual mirror: A virtual reflection plane within the live laparoscopic video, which is able to visualize a reflected side view of the organ and its interior. This enables the surgeon to observe the 3D structure of, for example, blood vessels by moving the virtual mirror within the augmented monocular view of the laparoscope.",
"fno": "04161004",
"keywords": [
"Augmented Reality",
"Computerised Tomography",
"Data Visualisation",
"Medical Computing",
"Rendering Computer Graphics",
"Surgery",
"User Interfaces",
"Laparoscopic Virtual Mirror",
"Interaction Paradigm",
"Monitor Based Augmented Reality",
"3 D Virtual Data",
"Virtual Objects",
"3 D Visualization",
"Mobile C Arm",
"Cone Beam CT Imaging",
"Carbon Dioxide Insufflation",
"Virtual Volume Data Rendering",
"Laparoscopes",
"Mirrors",
"Augmented Reality",
"Data Visualization",
"Biomedical Imaging",
"Blood Vessels",
"High Resolution Imaging",
"Optical Imaging",
"Biomedical Monitoring",
"Head",
"Interactive AR Visualization",
"Depth Perception",
"Medical Augmented Reality",
"Laparoscopic Surgery",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems Artificial",
"Augmented",
"And Virtual Realities",
"H 5 2 Information Interfaces And Presentation User Interfaces Interaction Styles",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques",
"J 3 Life And Medical Sciences"
],
"authors": [
{
"affiliation": "Chair for Computer Aided Medical Procedures and Augmented Reality (CAMP), Technische Universität München, Munich, Germany. e-mail: navab@cs.tum.edu",
"fullName": "Nassir Navab",
"givenName": "Nassir",
"surname": "Navab",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chair for Computer Aided Medical Procedures and Augmented Reality (CAMP), Technische Universität München, Munich, Germany. e-mail: feuerste@cs.tum.edu",
"fullName": "Marco Feuerstein",
"givenName": "Marco",
"surname": "Feuerstein",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Chair for Computer Aided Medical Procedures and Augmented Reality (CAMP), Technische Universität München, Munich, Germany. e-mail: bichlmei@cs.tum.edu",
"fullName": "Christoph Bichlmeier",
"givenName": "Christoph",
"surname": "Bichlmeier",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "43-50",
"year": "2007",
"issn": "1087-8270",
"isbn": "1-4244-0905-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04160987",
"articleId": "12OmNxI0KxE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "04160989",
"articleId": "12OmNvAiSNY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2005/8929/0/01492775",
"title": "Realistic occlusion effects in mirror-based co-located augmented reality systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492775/12OmNAYoKpz",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892332",
"title": "Exploring non-reversing magic mirrors for screen-based augmented reality systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892332/12OmNAq3hBL",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948434",
"title": "Computer-Assisted Laparoscopic myomectomy by augmenting the uterus with pre-operative MRI data",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948434/12OmNBPc8rR",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538836",
"title": "Laparoscopic Virtual Mirror for Understanding Vessel Structure Evaluation Study by Twelve Surgeons",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538836/12OmNqBtiSv",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402565",
"title": "Augmented reality during angiography: Integration of a virtual mirror for improved 2D/3D visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402565/12OmNyoiYXA",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643555",
"title": "Evaluation of the virtual mirror as a navigational aid for augmented reality driven minimally invasive procedures",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643555/12OmNzVXNWr",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2010/4251/0/4251a203",
"title": "Augmented Rendering of Makeup Features in a Smart Interactive Mirror System for Decision Support in Cosmetic Products Selection",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2010/4251a203/12OmNzcxZqI",
"parentPublication": {
"id": "proceedings/ds-rt/2010/4251/0",
"title": "Distributed Simulation and Real Time Applications, IEEE/ACM International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/springsim/2019/8388/0/08732872",
"title": "Evaluation of Learning Curve and Peripheral Awareness Using a Novel Multiresolution Foveated Laparoscope",
"doi": null,
"abstractUrl": "/proceedings-article/springsim/2019/08732872/1aIRT7oZCik",
"parentPublication": {
"id": "proceedings/springsim/2019/8388/0",
"title": "2019 Spring Simulation Conference (SpringSim)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a288",
"title": "A Virtual Reality Video Stitching System Based on Mirror Pyramids",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a288/1ap5xH6IZfa",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a287",
"title": "A novel SLAM method for laparoscopic scene reconstruction with feature patch tracking",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a287/1vg7UqDNZIY",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNykCcdi",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyjtNIo",
"doi": "10.1109/CVPRW.2016.15",
"title": "A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation",
"normalizedTitle": "A Low-Cost Mirror-Based Active Perception System for Effective Collision Free Underwater Robotic Navigation",
"abstract": "This ongoing research work presents a servo actuated mirror-based design that allows a fixed front-view visual system mounted in an underwater vehicle to extend its field of view by controlling its gaze. We are interested in the autonomous underwater exploration of coral reefs. This type of exploration must involve a cautious and collision-free navigation to avoid damaging the marine ecosystem. Generally, vision systems of underwater vehicles are carefully isolated with mechanical seals to prevent the water from entering. However, this fact causes a strictly dependence between the angle of view of the camera and the pose of the vehicle. Furthermore, the addition of a system to control camera orientation may result in a significantly reduction of useful load capacity and the movement of the vision system could carry undesirable trusting effects, especially at higher speeds. Our design of servo actuated mirror system changes the angle of view of the camera in two degrees of freedom: pan and tilt, and reaches viewing angles from the sides, bottom top and even rear views of the robot, thus enabling a more effective navigation with obstacle avoidance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This ongoing research work presents a servo actuated mirror-based design that allows a fixed front-view visual system mounted in an underwater vehicle to extend its field of view by controlling its gaze. We are interested in the autonomous underwater exploration of coral reefs. This type of exploration must involve a cautious and collision-free navigation to avoid damaging the marine ecosystem. Generally, vision systems of underwater vehicles are carefully isolated with mechanical seals to prevent the water from entering. However, this fact causes a strictly dependence between the angle of view of the camera and the pose of the vehicle. Furthermore, the addition of a system to control camera orientation may result in a significantly reduction of useful load capacity and the movement of the vision system could carry undesirable trusting effects, especially at higher speeds. Our design of servo actuated mirror system changes the angle of view of the camera in two degrees of freedom: pan and tilt, and reaches viewing angles from the sides, bottom top and even rear views of the robot, thus enabling a more effective navigation with obstacle avoidance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This ongoing research work presents a servo actuated mirror-based design that allows a fixed front-view visual system mounted in an underwater vehicle to extend its field of view by controlling its gaze. We are interested in the autonomous underwater exploration of coral reefs. This type of exploration must involve a cautious and collision-free navigation to avoid damaging the marine ecosystem. Generally, vision systems of underwater vehicles are carefully isolated with mechanical seals to prevent the water from entering. However, this fact causes a strictly dependence between the angle of view of the camera and the pose of the vehicle. Furthermore, the addition of a system to control camera orientation may result in a significantly reduction of useful load capacity and the movement of the vision system could carry undesirable trusting effects, especially at higher speeds. Our design of servo actuated mirror system changes the angle of view of the camera in two degrees of freedom: pan and tilt, and reaches viewing angles from the sides, bottom top and even rear views of the robot, thus enabling a more effective navigation with obstacle avoidance.",
"fno": "1437a061",
"keywords": [
"Mirrors",
"Cameras",
"Robot Vision Systems",
"Collision Avoidance",
"Navigation",
"Machine Vision"
],
"authors": [
{
"affiliation": null,
"fullName": "Noel Cortés-Pérez",
"givenName": "Noel",
"surname": "Cortés-Pérez",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "L. Abril Torres-Méndez",
"givenName": "L. Abril",
"surname": "Torres-Méndez",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-06-01T00:00:00",
"pubType": "proceedings",
"pages": "61-68",
"year": "2016",
"issn": "2160-7516",
"isbn": "978-1-5090-1437-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1437a054",
"articleId": "12OmNBPc8zs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1437a069",
"articleId": "12OmNwErpsX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130257",
"title": "Underwater sensing with omni-directional stereo camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130257/12OmNAFFdJc",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671784",
"title": "See-through window vs. magic mirror: A comparison in supporting visual-motor tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671784/12OmNAoUTa9",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1991/2163/0/00131704",
"title": "Collision avoidance using omnidirectional image sensor (COPIS)",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1991/00131704/12OmNBzAcjm",
"parentPublication": {
"id": "proceedings/robot/1991/2163/0",
"title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdc/2000/6638/3/00914273",
"title": "Sliding mode control of an underwater robotic manipulator",
"doi": null,
"abstractUrl": "/proceedings-article/cdc/2000/00914273/12OmNwpoFEi",
"parentPublication": {
"id": "proceedings/cdc/2000/6638/3",
"title": "Proceedings of the 39th IEEE Conference on Decision and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643555",
"title": "Evaluation of the virtual mirror as a navigational aid for augmented reality driven minimally invasive procedures",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643555/12OmNzVXNWr",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a456",
"title": "Underwater Sound Source Localization Based on Time-Reversal Mirror and Virtual Array",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a456/1FUUmEuamvm",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873973",
"title": "Projective Bisector Mirror (PBM): Concept and Rationale",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873973/1GjwGs0MSQg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600f931",
"title": "Learning Semantic Associations for Mirror Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600f931/1H1m8sVi7e0",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdsba/2020/8164/0/816400a134",
"title": "Servo Control of Fast Steering Mirror Based on Improved Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icdsba/2020/816400a134/1xeWrCnXcpG",
"parentPublication": {
"id": "proceedings/icdsba/2020/8164/0",
"title": "2020 4th Annual International Conference on Data Science and Business Analytics (ICDSBA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900d043",
"title": "Depth-Aware Mirror Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900d043/1yeLRme2vYI",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyKJiwQ",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzwZ6i8",
"doi": "10.1109/ICCPhot.2013.6528311",
"title": "The omnipolar camera: A new approach to stereo immersive capture",
"normalizedTitle": "The omnipolar camera: A new approach to stereo immersive capture",
"abstract": "We introduce in this paper a camera setup for stereo immersive (omnistereo) capture. An omnistereo pair of images gives stereo information up to 360 degrees around a central observer. Previous methods to produce omnistereo images assume a static scene in order to stitch together multiple images captured by a stereo camera rotating on a fixed tripod. Our omnipolar camera setup uses a minimum of 3 cameras with fisheye lenses. The multiple epipoles are used as locations to stitch the images together and produce omnistereo images with no horizontal misalignments due to parallax. We show results of using 3 cameras to capture an unconstrained dynamic scene while the camera is travelling. The produced omnistereo videos are formatted to be displayed on a cylindrical screen or dome.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce in this paper a camera setup for stereo immersive (omnistereo) capture. An omnistereo pair of images gives stereo information up to 360 degrees around a central observer. Previous methods to produce omnistereo images assume a static scene in order to stitch together multiple images captured by a stereo camera rotating on a fixed tripod. Our omnipolar camera setup uses a minimum of 3 cameras with fisheye lenses. The multiple epipoles are used as locations to stitch the images together and produce omnistereo images with no horizontal misalignments due to parallax. We show results of using 3 cameras to capture an unconstrained dynamic scene while the camera is travelling. The produced omnistereo videos are formatted to be displayed on a cylindrical screen or dome.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce in this paper a camera setup for stereo immersive (omnistereo) capture. An omnistereo pair of images gives stereo information up to 360 degrees around a central observer. Previous methods to produce omnistereo images assume a static scene in order to stitch together multiple images captured by a stereo camera rotating on a fixed tripod. Our omnipolar camera setup uses a minimum of 3 cameras with fisheye lenses. The multiple epipoles are used as locations to stitch the images together and produce omnistereo images with no horizontal misalignments due to parallax. We show results of using 3 cameras to capture an unconstrained dynamic scene while the camera is travelling. The produced omnistereo videos are formatted to be displayed on a cylindrical screen or dome.",
"fno": "06528311",
"keywords": [
"Cameras",
"Lenses",
"Observers",
"Image Resolution",
"Mirrors",
"Zirconium"
],
"authors": [
{
"affiliation": "Dept. d'Inf. et Rech. Operationnelle, Univ. de Montreal, Montréal, QC, Canada",
"fullName": "V. Chapdelaine-Couture",
"givenName": "V.",
"surname": "Chapdelaine-Couture",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. d'Inf. et Rech. Operationnelle, Univ. de Montreal, Montréal, QC, Canada",
"fullName": "S. Roy",
"givenName": "S.",
"surname": "Roy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-9",
"year": "2013",
"issn": null,
"isbn": "978-1-4673-6463-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06528310",
"articleId": "12OmNwGZNI0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06528313",
"articleId": "12OmNz3bdGu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2011/0063/0/06130256",
"title": "3D environment measurement using binocular stereo and motion stereo by mobile robot with omnidirectional stereo camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130256/12OmNBl6EHn",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492777",
"title": "Spherical stereo for the construction of immersive VR environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492777/12OmNBlofN0",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccce/2014/7635/0/7635a088",
"title": "Multi-stereo Camera System to Enhance the Position Accuracy of Image-Guided Surgery Markers",
"doi": null,
"abstractUrl": "/proceedings-article/iccce/2014/7635a088/12OmNqI04VA",
"parentPublication": {
"id": "proceedings/iccce/2014/7635/0",
"title": "2014 International Conference on Computer & Communication Engineering (ICCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/omnivis/2000/0704/0/07040054",
"title": "Automatic Disparity Control in Stereo Panoramas (OmniStereo)",
"doi": null,
"abstractUrl": "/proceedings-article/omnivis/2000/07040054/12OmNxaeu2M",
"parentPublication": {
"id": "proceedings/omnivis/2000/0704/0",
"title": "Omnidirectional Vision, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/1990/2057/0/00139576",
"title": "Active surface reconstruction by integrating focus, vergence, stereo, and camera calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1990/00139576/12OmNym2c6Y",
"parentPublication": {
"id": "proceedings/iccv/1990/2057/0",
"title": "Proceedings Third International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2011/1101/0/06126376",
"title": "Panoramic stereo video textures",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/06126376/12OmNzcPAsH",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2007/3122/0/3122a979",
"title": "Obstacle Detection Using a Single Camera Stereo Sensor",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2007/3122a979/12OmNzkuKJ9",
"parentPublication": {
"id": "proceedings/sitis/2007/3122/0",
"title": "2007 Third International IEEE Conference on Signal-Image Technologies and Internet-Based System",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d755",
"title": "Panoramic Stereo Videos with a Single Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d755/12OmNzmclvZ",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2012/4686/0/4686a088",
"title": "Measuring Distance with Mobile Phones Using Single-Camera Stereo Vision",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2012/4686a088/12OmNzxgHAL",
"parentPublication": {
"id": "proceedings/icdcsw/2012/4686/0",
"title": "2012 32nd International Conference on Distributed Computing Systems Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600m2962",
"title": "Uniform Subdivision of Omnidirectional Camera Space for Efficient Spherical Stereo Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600m2962/1H1iRK3bg5O",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvTTcga",
"doi": "10.1109/ISMAR.2015.61",
"title": "[POSTER] Avatar-Mediated Contact Interaction between Remote Users for Social Telepresence",
"normalizedTitle": "[POSTER] Avatar-Mediated Contact Interaction between Remote Users for Social Telepresence",
"abstract": "Social touch such as a handshake increases the sense of coexistence and closeness between remote users in a social telepresence environment, but creating such coordinated contact movements with a distant person is extremely difficult if given only visual feedback, without haptic feedback. This paper presents a method to enable hand-contact interaction between remote users in an avatar-mediated telepresence environment. The key approach is, while the avatar directly follows its owner's motion in normal conditions, it adjusts the pose to maintain contact with the other user when the two users attempt to make contact interaction. To this end, we develop classifiers to recognize the users' intention for the contact interaction. The contact classifier identifies whether the users try to initiate contact when they are not in contact, and the separation classifier identifies whether the two in contact attempt to break contact. The classifiers are trained based on a set of geometric distance features. During the contact phase, inverse kinematics is solved to determine the pose of the avatar's arm so as to initiate and maintain natural contact with the other user's hand. Our system is unique in that two remote users can perform real time hand contact interaction in a social telepresence environment.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Social touch such as a handshake increases the sense of coexistence and closeness between remote users in a social telepresence environment, but creating such coordinated contact movements with a distant person is extremely difficult if given only visual feedback, without haptic feedback. This paper presents a method to enable hand-contact interaction between remote users in an avatar-mediated telepresence environment. The key approach is, while the avatar directly follows its owner's motion in normal conditions, it adjusts the pose to maintain contact with the other user when the two users attempt to make contact interaction. To this end, we develop classifiers to recognize the users' intention for the contact interaction. The contact classifier identifies whether the users try to initiate contact when they are not in contact, and the separation classifier identifies whether the two in contact attempt to break contact. The classifiers are trained based on a set of geometric distance features. During the contact phase, inverse kinematics is solved to determine the pose of the avatar's arm so as to initiate and maintain natural contact with the other user's hand. Our system is unique in that two remote users can perform real time hand contact interaction in a social telepresence environment.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Social touch such as a handshake increases the sense of coexistence and closeness between remote users in a social telepresence environment, but creating such coordinated contact movements with a distant person is extremely difficult if given only visual feedback, without haptic feedback. This paper presents a method to enable hand-contact interaction between remote users in an avatar-mediated telepresence environment. The key approach is, while the avatar directly follows its owner's motion in normal conditions, it adjusts the pose to maintain contact with the other user when the two users attempt to make contact interaction. To this end, we develop classifiers to recognize the users' intention for the contact interaction. The contact classifier identifies whether the users try to initiate contact when they are not in contact, and the separation classifier identifies whether the two in contact attempt to break contact. The classifiers are trained based on a set of geometric distance features. During the contact phase, inverse kinematics is solved to determine the pose of the avatar's arm so as to initiate and maintain natural contact with the other user's hand. Our system is unique in that two remote users can perform real time hand contact interaction in a social telepresence environment.",
"fno": "7660a194",
"keywords": [
"Avatars",
"Kinematics",
"Real Time Systems",
"Support Vector Machines",
"Joints",
"Augmented Reality",
"Character Animation",
"Telepresence",
"Avatar Interaction"
],
"authors": [
{
"affiliation": null,
"fullName": "Jihye Oh",
"givenName": "Jihye",
"surname": "Oh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yeonjoon Kim",
"givenName": "Yeonjoon",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Taeil Jin",
"givenName": "Taeil",
"surname": "Jin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sukwon Lee",
"givenName": "Sukwon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Youjin Lee",
"givenName": "Youjin",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sung-Hee Lee",
"givenName": "Sung-Hee",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "194-195",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a192",
"articleId": "12OmNCd2rAE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a196",
"articleId": "12OmNBuL1n1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504684",
"title": "MMSpace: Kinetically-augmented telepresence for small group-to-group conversations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504684/12OmNvlg8fs",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a503",
"title": "Studying “Avatar Transitions” in Augmented Reality: Influence on Sense of Embodiment and Physiological Activity",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a503/1J7W9twFolO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798044",
"title": "Effect of Full Body Avatar in Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798044/1cJ14GMFJdK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089599",
"title": "An Optical Design for Avatar-User Co-axial Viewpoint Telepresence",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089599/1jIx8SwZIuQ",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09257094",
"title": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523831",
"title": "Avatars for Teleconsultation: Effects of Avatar Embodiment Techniques on User Perception in 3D Asymmetric Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523831/1wpqru2GjIY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WodvTPzy",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00077",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"normalizedTitle": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"abstract": "Display technologies in the fields of virtual and augmented reality affect the appearance of human representations, such as avatars used in telepresence or entertainment applications, based on the user's current viewing conditions. With changing viewing conditions, it is possible that the perceived appearance of one's avatar changes in an unexpected or undesired manner, which may change user behavior towards these avatars and cause frustration in using the AR display. In this paper, we describe a user study (N=20) where participants saw themselves in a mirror standing next to their own avatar through use of a HoloLens 2 optical see-through head-mounted display. Participants were tasked to match their avatar's appearance to their own under two environment lighting conditions (200 lux and 2,000 lux). Our results showed that the intensity of environment lighting had a significant effect on participants selected skin colors for their avatars, where participants with dark skin colors tended to make their avatar's skin color lighter, nearly to the level of participants with light skin color. Further, in particular female participants made their avatar's hair color darker for the lighter environment lighting condition. We discuss our results with a view on technological limitations and effects on the diversity of avatar representations on optical see-through displays.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Display technologies in the fields of virtual and augmented reality affect the appearance of human representations, such as avatars used in telepresence or entertainment applications, based on the user's current viewing conditions. With changing viewing conditions, it is possible that the perceived appearance of one's avatar changes in an unexpected or undesired manner, which may change user behavior towards these avatars and cause frustration in using the AR display. In this paper, we describe a user study (N=20) where participants saw themselves in a mirror standing next to their own avatar through use of a HoloLens 2 optical see-through head-mounted display. Participants were tasked to match their avatar's appearance to their own under two environment lighting conditions (200 lux and 2,000 lux). Our results showed that the intensity of environment lighting had a significant effect on participants selected skin colors for their avatars, where participants with dark skin colors tended to make their avatar's skin color lighter, nearly to the level of participants with light skin color. Further, in particular female participants made their avatar's hair color darker for the lighter environment lighting condition. We discuss our results with a view on technological limitations and effects on the diversity of avatar representations on optical see-through displays.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Display technologies in the fields of virtual and augmented reality affect the appearance of human representations, such as avatars used in telepresence or entertainment applications, based on the user's current viewing conditions. With changing viewing conditions, it is possible that the perceived appearance of one's avatar changes in an unexpected or undesired manner, which may change user behavior towards these avatars and cause frustration in using the AR display. In this paper, we describe a user study (N=20) where participants saw themselves in a mirror standing next to their own avatar through use of a HoloLens 2 optical see-through head-mounted display. Participants were tasked to match their avatar's appearance to their own under two environment lighting conditions (200 lux and 2,000 lux). Our results showed that the intensity of environment lighting had a significant effect on participants selected skin colors for their avatars, where participants with dark skin colors tended to make their avatar's skin color lighter, nearly to the level of participants with light skin color. Further, in particular female participants made their avatar's hair color darker for the lighter environment lighting condition. We discuss our results with a view on technological limitations and effects on the diversity of avatar representations on optical see-through displays.",
"fno": "536500a352",
"keywords": [
"Augmented Reality",
"Avatars",
"Computer Games",
"Helmet Mounted Displays",
"Image Colour Analysis",
"Skin",
"Virtual Reality",
"AR Display",
"Augmented Reality",
"Avatar Changes",
"Avatar Representations",
"Dark Skin Colors",
"Display Technologies",
"Environment Lighting Conditions",
"Holo Lens 2 Optical See Through",
"Light Skin Color",
"Lighter Environment Lighting Condition",
"Optical See Through Displays",
"Participants Selected Skin Colors",
"Particular Female Participants",
"Perceived Appearance",
"Self Avatar Appearance",
"Telepresence",
"Undesired Manner",
"Unexpected Manner",
"User Behavior",
"Viewing Conditions",
"Telepresence",
"Head Mounted Displays",
"Optical Design",
"Avatars",
"Lighting",
"Color",
"Skin",
"Human Centered Computing Human Computer Interaction HCI HCI Design And Evaluation Methods User Studies",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Willamette University",
"fullName": "Meelad Doroodchi",
"givenName": "Meelad",
"surname": "Doroodchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Montclair State University",
"fullName": "Priscilla Ramos",
"givenName": "Priscilla",
"surname": "Ramos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Austin Erickson",
"givenName": "Austin",
"surname": "Erickson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Hiroshi Furuya",
"givenName": "Hiroshi",
"surname": "Furuya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Juanita Benjamin",
"givenName": "Juanita",
"surname": "Benjamin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Gerd Bruder",
"givenName": "Gerd",
"surname": "Bruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Gregory F. Welch",
"givenName": "Gregory F.",
"surname": "Welch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "352-356",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a348",
"articleId": "1J7WmlCrYUo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a357",
"articleId": "1J7Wqx1gtHO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223379",
"title": "Avatar anthropomorphism and illusion of body ownership in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223379/12OmNAWpyrk",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549379",
"title": "Head motion animation using avatar gaze space",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549379/12OmNxRWI3d",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a770",
"title": "Emotional Empathy and Facial Mimicry of Avatar Faces",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a770/1CJdHd5yTSM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a001",
"title": "A Cardboard-Based Virtual Reality Study on Self-Avatar Appearance and Breathing",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a001/1CJdXjsLKBG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873991",
"title": "Predict-and-Drive: Avatar Motion Adaption in Room-Scale Augmented Reality Telepresence with Heterogeneous Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873991/1GjwGcGrRmg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a686",
"title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797719",
"title": "The Effect of Avatar Appearance on Social Presence in an Augmented Reality Remote Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797719/1cJ1dVsXQDS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798152",
"title": "The Influence of Size in Augmented Reality Telepresence Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798152/1cJ1djEUmv6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08952604",
"title": "Effect of Avatar Appearance on Detection Thresholds for Remapped Hand Movements",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08952604/1gqqhpSZ19S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNz5JC3u",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"acronym": "whc",
"groupId": "1001635",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNro0IcJ",
"doi": "10.1109/WHC.2007.114",
"title": "Tilt Perception by Constant Tactile and Constant Proprioceptive Feedback through a Human System Interface",
"normalizedTitle": "Tilt Perception by Constant Tactile and Constant Proprioceptive Feedback through a Human System Interface",
"abstract": "Tilt perception through a haptic human system interface is experimentally investigated. Tactile feedback is provided by vibration motors and proprioceptive feedback by the Cybergrasp exoskeleton. Enriching mere vibrotactile feedback by additional constant force feedback has not been found to influence human tilt perception. Participants' verbal and haptic estimations of the displayed tilt were highly accurate. As expected, tilt estimations depend on the actual tilt",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tilt perception through a haptic human system interface is experimentally investigated. Tactile feedback is provided by vibration motors and proprioceptive feedback by the Cybergrasp exoskeleton. Enriching mere vibrotactile feedback by additional constant force feedback has not been found to influence human tilt perception. Participants' verbal and haptic estimations of the displayed tilt were highly accurate. As expected, tilt estimations depend on the actual tilt",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tilt perception through a haptic human system interface is experimentally investigated. Tactile feedback is provided by vibration motors and proprioceptive feedback by the Cybergrasp exoskeleton. Enriching mere vibrotactile feedback by additional constant force feedback has not been found to influence human tilt perception. Participants' verbal and haptic estimations of the displayed tilt were highly accurate. As expected, tilt estimations depend on the actual tilt",
"fno": "27380342",
"keywords": [
"Force Feedback",
"Haptic Interfaces",
"Human Computer Interaction",
"Tactile Proprioceptive Feedback",
"Constant Proprioceptive Feedback",
"Haptic Human System Interface",
"Vibration Motors",
"Cybergrasp Exoskeleton",
"Vibrotactile Feedback",
"Force Feedback",
"Human Tilt Perception",
"Haptic Estimations",
"Tilt Estimations",
"Fingers",
"Force Feedback",
"Haptic Interfaces",
"Displays",
"Position Measurement",
"Human Factors",
"Automatic Control",
"Psychology",
"Exoskeletons",
"Anisotropic Magnetoresistance"
],
"authors": [
{
"affiliation": "Universität der Bundeswehr München",
"fullName": "Franziska K.B. Freyberger",
"givenName": "Franziska K.B.",
"surname": "Freyberger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universitat Munchen",
"fullName": "Martin Kuschel",
"givenName": "Martin",
"surname": "Kuschel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universitat der Bundeswehr Munchen",
"fullName": "Berthold Farber",
"givenName": "Berthold",
"surname": "Farber",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universitat Munchen",
"fullName": "Martin Buss",
"givenName": "Martin",
"surname": "Buss",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Roberta L. Klatzky",
"givenName": "Roberta L.",
"surname": "Klatzky",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "whc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "342-347",
"year": "2007",
"issn": null,
"isbn": "0-7695-2738-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "04145186",
"articleId": "12OmNxVDuQ4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27380274",
"articleId": "12OmNweBUOK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2004/2112/0/21120208",
"title": "Can Haptic Feedback Improve the Perception of Self-Motion in Virtual Reality?",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2004/21120208/12OmNBzRNsV",
"parentPublication": {
"id": "proceedings/haptics/2004/2112/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2009/3858/0/04810845",
"title": "Stiffness discrimination with visual and proprioceptive cues",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2009/04810845/12OmNClQ0q5",
"parentPublication": {
"id": "proceedings/whc/2009/3858/0",
"title": "World Haptics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145197",
"title": "Role of vision on haptic length perception",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145197/12OmNClQ0yd",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a215",
"title": "EmotionAir: Perception of Emotions from Air Jet Based Tactile Stimulation",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a215/12OmNrMZpEF",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/27380409",
"title": "A fingertip haptic display for improving local perception of shape cues",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/27380409/12OmNwcl7KO",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479915",
"title": "Influence of visuomotor action on visual-haptic simultaneous perception: A psychophysical study",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479915/12OmNyQph4i",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444791",
"title": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444791/12OmNyoAA64",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2013/04/tth2013040453",
"title": "Human Detection and Discrimination of Tactile Repeatability, Mechanical Backlash, and Temporal Delay in a Combined Tactile-Kinesthetic Haptic Display System",
"doi": null,
"abstractUrl": "/journal/th/2013/04/tth2013040453/13rRUyeCkau",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798372",
"title": "Player Perception Augmentation for Beginners Using Visual and Haptic Feedback in Ball Game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798372/1cJ0JDMI0ZW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a575",
"title": "The Effect of the Virtual Object Size on Weight Perception Augmented with Pseudo-Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a575/1tnWwW9JGXC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwB2dXt",
"title": "2007 International Conference on Cyberworlds (CW'07)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwDj1fx",
"doi": "10.1109/CW.2007.17",
"title": "Perception of Compliant Environments through a Visual-Haptic Human System Interface",
"normalizedTitle": "Perception of Compliant Environments through a Visual-Haptic Human System Interface",
"abstract": "Perception of compliant environments through a human system interface with visual and proprioceptive feedback is investigated. Participants had to explore the virtual environment by gripping with two fingers. Haptically, compliance was generated by an admittance control scheme. Perception of compliance under conflicting multimodal information was analyzed using an adaptive staircase method. Inter alia, experiments showed that conflicts could be detected when compliances by both modalities differ more than 55%.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Perception of compliant environments through a human system interface with visual and proprioceptive feedback is investigated. Participants had to explore the virtual environment by gripping with two fingers. Haptically, compliance was generated by an admittance control scheme. Perception of compliance under conflicting multimodal information was analyzed using an adaptive staircase method. Inter alia, experiments showed that conflicts could be detected when compliances by both modalities differ more than 55%.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Perception of compliant environments through a human system interface with visual and proprioceptive feedback is investigated. Participants had to explore the virtual environment by gripping with two fingers. Haptically, compliance was generated by an admittance control scheme. Perception of compliance under conflicting multimodal information was analyzed using an adaptive staircase method. Inter alia, experiments showed that conflicts could be detected when compliances by both modalities differ more than 55%.",
"fno": "04390935",
"keywords": [
"Feedback",
"Haptic Interfaces",
"Human Computer Interaction",
"Mechanoception",
"Compliant Environments",
"Visual Haptic Human System Interface",
"Proprioceptive Feedback",
"Admittance Control Scheme",
"Multimodal Information",
"Haptic Interfaces",
"Feedback",
"Fingers",
"Psychology",
"Testing",
"Object Detection",
"Human Factors",
"Automatic Control",
"Virtual Environment",
"Admittance",
"Multimodal Integration",
"Boundaries Of Integration",
"Psychophysics",
"Data Reduction",
"Compliance"
],
"authors": [
{
"affiliation": "Univ. der Bundeswehr Munchen, Munich",
"fullName": "Franziska K.B. Freyberger",
"givenName": "Franziska K.B.",
"surname": "Freyberger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tech. Univ. Munchen, Munich",
"fullName": "Martin Kuschel",
"givenName": "Martin",
"surname": "Kuschel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tech. Univ. Munchen, Munich",
"fullName": "Martin Buss",
"givenName": "Martin",
"surname": "Buss",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Univ. der Bundeswehr Munchen, Munich",
"fullName": "Berthold Farber",
"givenName": "Berthold",
"surname": "Farber",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-10-01T00:00:00",
"pubType": "proceedings",
"pages": "314-321",
"year": "2007",
"issn": null,
"isbn": "0-7695-3005-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "30050179",
"articleId": "12OmNzC5SOV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "30050187",
"articleId": "12OmNCdTeP8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/whc/2007/2738/0/04145197",
"title": "Role of vision on haptic length perception",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145197/12OmNClQ0yd",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/27380342",
"title": "Tilt Perception by Constant Tactile and Constant Proprioceptive Feedback through a Human System Interface",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/27380342/12OmNro0IcJ",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479917",
"title": "Visual-Haptic Perception of Compliance: Fusion of Visual and Haptic Information",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479917/12OmNvAAth8",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1999/0234/0/02340029",
"title": "Development of Stereoscopic-Haptic Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1999/02340029/12OmNwHz090",
"parentPublication": {
"id": "proceedings/cbms/1999/0234/0",
"title": "Proceedings 12th IEEE Symposium on Computer-Based Medical Systems (Cat. No.99CB36365)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/27380409",
"title": "A fingertip haptic display for improving local perception of shape cues",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/27380409/12OmNwcl7KO",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145153",
"title": "Texture Gradients and Perceptual Constancy under Haptic Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145153/12OmNyUnEDQ",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2009/04/tth2009040189",
"title": "Cues for Haptic Perception of Compliance",
"doi": null,
"abstractUrl": "/journal/th/2009/04/tth2009040189/13rRUEgarsN",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2010/04/tth2010040234",
"title": "Combination and Integration in the Perception of Visual-Haptic Compliance Information",
"doi": null,
"abstractUrl": "/journal/th/2010/04/tth2010040234/13rRUwd9CGb",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/01/tth2011010028",
"title": "Perception and Haptic Rendering of Friction Moments",
"doi": null,
"abstractUrl": "/journal/th/2011/01/tth2011010028/13rRUygBw7j",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798214",
"title": "Human Perception of a Haptic Shape-changing Interface with Variable Rigidity and Size",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798214/1cJ0QSLRO6Y",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAYXWAF",
"title": "2016 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyqzLXv",
"doi": "10.1109/VR.2016.7504686",
"title": "A lightweight electrotactile feedback device for grasp improvement in immersive virtual environments",
"normalizedTitle": "A lightweight electrotactile feedback device for grasp improvement in immersive virtual environments",
"abstract": "An immersive virtual environment is the ideal platform for the planning and training of on-orbit servicing missions, as it provides a flexible and safe environment. In such kind of virtual assembly simulation, grasping virtual objects is one of the most common and natural interactions. However, unlike grasping objects in the real world, it is a non-trivial task in virtual environments, where the primary feedback is visual only. A lot of research investigated ways to provide haptic feedback, such as force, vibrational and electrotactile feedback. Such devices, however, are usually uncomfortable and hard to integrate in projection-based immersive YR systems. In this paper, we present a novel, small and lightweight electro-tactile feedback device, specifically designed for immersive virtual environments. It consists of a small tactor with eight electrodes for each finger and a signal generator attached to the user's hand or arm. Our device can be easily integrated with an existing optical finger tracking system. The study presented in this paper assesses the feasibility and usability of the interaction device. An experiment was conducted in a repeated measures design using the electrotactile feedback modality as independent variable. As benchmark, we chose three typical assembly tasks of a YR simulation for satellite on-orbit servicing missions, including pressing a button, switching a lever switch, and pulling a module from its slot. Results show that electrotactile feedback improved the user's grasping in our virtual on-orbit servicing scenario. The task completion time was significantly lower for all three tasks and the precision of the user's interaction was higher. The workload reported by the participants was significantly lower when using electrotactile feedback. Additionally, users were more confident with their performance while completing the tasks with electrotactile feedback. We describe the device, outline the user study and report the results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An immersive virtual environment is the ideal platform for the planning and training of on-orbit servicing missions, as it provides a flexible and safe environment. In such kind of virtual assembly simulation, grasping virtual objects is one of the most common and natural interactions. However, unlike grasping objects in the real world, it is a non-trivial task in virtual environments, where the primary feedback is visual only. A lot of research investigated ways to provide haptic feedback, such as force, vibrational and electrotactile feedback. Such devices, however, are usually uncomfortable and hard to integrate in projection-based immersive YR systems. In this paper, we present a novel, small and lightweight electro-tactile feedback device, specifically designed for immersive virtual environments. It consists of a small tactor with eight electrodes for each finger and a signal generator attached to the user's hand or arm. Our device can be easily integrated with an existing optical finger tracking system. The study presented in this paper assesses the feasibility and usability of the interaction device. An experiment was conducted in a repeated measures design using the electrotactile feedback modality as independent variable. As benchmark, we chose three typical assembly tasks of a YR simulation for satellite on-orbit servicing missions, including pressing a button, switching a lever switch, and pulling a module from its slot. Results show that electrotactile feedback improved the user's grasping in our virtual on-orbit servicing scenario. The task completion time was significantly lower for all three tasks and the precision of the user's interaction was higher. The workload reported by the participants was significantly lower when using electrotactile feedback. Additionally, users were more confident with their performance while completing the tasks with electrotactile feedback. We describe the device, outline the user study and report the results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An immersive virtual environment is the ideal platform for the planning and training of on-orbit servicing missions, as it provides a flexible and safe environment. In such kind of virtual assembly simulation, grasping virtual objects is one of the most common and natural interactions. However, unlike grasping objects in the real world, it is a non-trivial task in virtual environments, where the primary feedback is visual only. A lot of research investigated ways to provide haptic feedback, such as force, vibrational and electrotactile feedback. Such devices, however, are usually uncomfortable and hard to integrate in projection-based immersive YR systems. In this paper, we present a novel, small and lightweight electro-tactile feedback device, specifically designed for immersive virtual environments. It consists of a small tactor with eight electrodes for each finger and a signal generator attached to the user's hand or arm. Our device can be easily integrated with an existing optical finger tracking system. The study presented in this paper assesses the feasibility and usability of the interaction device. An experiment was conducted in a repeated measures design using the electrotactile feedback modality as independent variable. As benchmark, we chose three typical assembly tasks of a YR simulation for satellite on-orbit servicing missions, including pressing a button, switching a lever switch, and pulling a module from its slot. Results show that electrotactile feedback improved the user's grasping in our virtual on-orbit servicing scenario. The task completion time was significantly lower for all three tasks and the precision of the user's interaction was higher. The workload reported by the participants was significantly lower when using electrotactile feedback. Additionally, users were more confident with their performance while completing the tasks with electrotactile feedback. We describe the device, outline the user study and report the results.",
"fno": "07504686",
"keywords": [
"Artificial Satellites",
"Haptic Interfaces",
"Human Computer Interaction",
"Virtual Reality",
"Task Completion Time",
"Virtual On Orbit Servicing Scenario",
"Module Pulling",
"Lever Switch",
"Button Pressing",
"Satellite On Orbit Servicing Missions",
"VR Simulation",
"Electrotactile Feedback Modality",
"Repeated Measure Design",
"Interaction Device Feasibility Assessment",
"Interaction Device Usability Assessment",
"Optical Finger Tracking System",
"User Arm",
"User Hand",
"Signal Generator",
"Electrodes",
"Tactor",
"Immersive Virtual Environments",
"Virtual Object Grasping",
"Virtual Assembly Simulation",
"On Orbit Servicing Mission Training",
"On Orbit Servicing Mission Planning",
"Lightweight Electrotactile Feedback Device",
"Electrodes",
"Virtual Environments",
"Grasping",
"Skin",
"Thumb",
"Voltage Control",
"B 4 2 Input Output Devices Channels And Controllers",
"H 3 4 Systems And Software Performance Evaluation",
"I 3 7 Three Dimensional Graphics And Realism Virtual Reality"
],
"authors": [
{
"affiliation": "German Aerospace Center",
"fullName": "Johannes Hummel",
"givenName": "Johannes",
"surname": "Hummel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "German Aerospace Center",
"fullName": "Janki Dodiya",
"givenName": "Janki",
"surname": "Dodiya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "German Aerospace Center",
"fullName": "German Aerospace Center",
"givenName": "German Aerospace",
"surname": "Center",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Kaiserslautern, Germany",
"fullName": "Laura Eckardt",
"givenName": "Laura",
"surname": "Eckardt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "German Aerospace Center",
"fullName": "Robin Wolff",
"givenName": "Robin",
"surname": "Wolff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "German Aerospace Center",
"fullName": "Andreas Gerndf",
"givenName": "Andreas",
"surname": "Gerndf",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visual Computing Institute, RWTH Aachen University, Germany, and JARA - High Performance Computing",
"fullName": "Torsten W. Kuhlen",
"givenName": "Torsten W.",
"surname": "Kuhlen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-03-01T00:00:00",
"pubType": "proceedings",
"pages": "39-48",
"year": "2016",
"issn": "2375-5334",
"isbn": "978-1-5090-0836-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07504685",
"articleId": "12OmNB1eJxj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07504687",
"articleId": "12OmNy2Jt9E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wacvw/2017/4941/0/07912207",
"title": "Measuring Grasp Posture Using an Embedded Camera",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2017/07912207/12OmNBqMDvx",
"parentPublication": {
"id": "proceedings/wacvw/2017/4941/0",
"title": "2017 IEEE Winter Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802075",
"title": "Design and evaluation of visual feedback for virtual grasp",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802075/12OmNzC5SMj",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550202",
"title": "An evaluation of two simple methods for representing heaviness in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550202/12OmNzVXO16",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/02/06615899",
"title": "HyVE—Hybrid Vibro-Electrotactile Stimulation—Is an Efficient Approach to Multi-Channel Sensory Feedback",
"doi": null,
"abstractUrl": "/journal/th/2014/02/06615899/13rRUwj7cpl",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/08/08392385",
"title": "Realtime Hand-Object Interaction Using Learned Grasp Space for Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2019/08/08392385/13rRUx0geq3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/06/07159098",
"title": "Design and Evaluation of Visual Interpenetration Cues in Virtual Grasping",
"doi": null,
"abstractUrl": "/journal/tg/2016/06/07159098/13rRUxBJhvx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/03/06822601",
"title": "Analysis of Human Grasping Behavior: Object Characteristics and Grasp Type",
"doi": null,
"abstractUrl": "/journal/th/2014/03/06822601/13rRUxBrGhb",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798155",
"title": "Grasping objects in immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798155/1cJ0SxJIrrW",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089499",
"title": "Precise and realistic grasping and manipulation in Virtual Reality without force feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089499/1jIx82saxUY",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a391",
"title": "A Grasp on Reality: Understanding Grasping Patterns for Object Interaction in Real and Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a391/1yeQOxGvsPK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0Mqz1QOI",
"doi": "10.1109/VR.2019.8798026",
"title": "Evaluation of Visual Perception Manipulation in Virtual Reality Training Environments to Improve Golf Performance",
"normalizedTitle": "Evaluation of Visual Perception Manipulation in Virtual Reality Training Environments to Improve Golf Performance",
"abstract": "In the real world, prior research in the field of perception and action has shown that individuals visually perceive objects differently based on their actual performance of an action on those objects, especially for sporting activities. For example, a golfer who performs well on putting a ball into the hole, will perceive that ball as larger (easier to hit) and the hole as larger (easier to put the ball in). We asked the following research question, can manipulation of visual perception of objects influence actual performance in the real world? Virtual objects are easily manipulated in Virtual Reality environments, therefore we investigated the use of Virtual Reality training where the properties of objects, such as size, were manipulated to influence perception on a golf putting task. In this paper, we present the results of our experimental user study. Putting performance increased after virtual reality training exposure when virtual objects were larger (perceived as easier to hit) and decreased when virtual objects were smaller (more difficult to hit). Our research has the potential to broaden the study of how virtual reality training can be used to improve sports training in a unique way.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the real world, prior research in the field of perception and action has shown that individuals visually perceive objects differently based on their actual performance of an action on those objects, especially for sporting activities. For example, a golfer who performs well on putting a ball into the hole, will perceive that ball as larger (easier to hit) and the hole as larger (easier to put the ball in). We asked the following research question, can manipulation of visual perception of objects influence actual performance in the real world? Virtual objects are easily manipulated in Virtual Reality environments, therefore we investigated the use of Virtual Reality training where the properties of objects, such as size, were manipulated to influence perception on a golf putting task. In this paper, we present the results of our experimental user study. Putting performance increased after virtual reality training exposure when virtual objects were larger (perceived as easier to hit) and decreased when virtual objects were smaller (more difficult to hit). Our research has the potential to broaden the study of how virtual reality training can be used to improve sports training in a unique way.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the real world, prior research in the field of perception and action has shown that individuals visually perceive objects differently based on their actual performance of an action on those objects, especially for sporting activities. For example, a golfer who performs well on putting a ball into the hole, will perceive that ball as larger (easier to hit) and the hole as larger (easier to put the ball in). We asked the following research question, can manipulation of visual perception of objects influence actual performance in the real world? Virtual objects are easily manipulated in Virtual Reality environments, therefore we investigated the use of Virtual Reality training where the properties of objects, such as size, were manipulated to influence perception on a golf putting task. In this paper, we present the results of our experimental user study. Putting performance increased after virtual reality training exposure when virtual objects were larger (perceived as easier to hit) and decreased when virtual objects were smaller (more difficult to hit). Our research has the potential to broaden the study of how virtual reality training can be used to improve sports training in a unique way.",
"fno": "08798026",
"keywords": [
"Computer Based Training",
"Sport",
"Virtual Reality",
"Visual Perception",
"Virtual Objects",
"Sports Training",
"Visual Perception Manipulation",
"Golf Putting Task",
"Putting Performance",
"Virtual Reality Training Environments",
"Golf Performance Improvement",
"Training",
"Task Analysis",
"Sports",
"Visualization",
"Virtual Environments",
"Human Computer Interaction",
"Virtual Reality Training",
"Perception And Action",
"Visual Manipulation",
"Virtual Environments",
"Sports Training",
"Change In Size Of Virtual Objects",
"Sports Performance",
"Golf Task",
"CCS Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality",
"Human Centered Computing CCS Human Computer Interaction HCI Empirical Studies In HCI",
"CCS Software And Its Engineering Software Organization And Properties Virtual Worlds Software Virtual Worlds Training Simulation"
],
"authors": [
{
"affiliation": "University of Wyoming",
"fullName": "Anushka Godse",
"givenName": "Anushka",
"surname": "Godse",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wyoming",
"fullName": "Rajiv Khadka",
"givenName": "Rajiv",
"surname": "Khadka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Wyoming, Idaho National Laboratory",
"fullName": "Amy Banic",
"givenName": "Amy",
"surname": "Banic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1807-1812",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798056",
"articleId": "1cJ0Ld02sXS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797962",
"articleId": "1cJ0Qo05MTm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/computationworld/2009/3862/0/3862a460",
"title": "Depth Perception within Virtual Environments: A Comparative Study Between Wide Screen Stereoscopic Displays and Head Mounted Devices",
"doi": null,
"abstractUrl": "/proceedings-article/computationworld/2009/3862a460/12OmNwJPN1e",
"parentPublication": {
"id": "proceedings/computationworld/2009/3862/0",
"title": "Future Computing, Service Computation, Cognitive, Adaptive, Content, Patterns, Computation World",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07829434",
"title": "The Martian: Examining Human Physical Judgments across Virtual Gravity Fields",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07829434/13rRUxjQyvr",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a711",
"title": "Kicking in Virtual Reality: The Influence of Foot Visibility on the Shooting Experience and Accuracy",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a711/1CJc667vUJ2",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiars/2022/5457/0/545700a078",
"title": "Virtual Shooting Action Simulation System Based on Intelligent VR Technology",
"doi": null,
"abstractUrl": "/proceedings-article/aiars/2022/545700a078/1J2XP0ZiAsE",
"parentPublication": {
"id": "proceedings/aiars/2022/5457/0",
"title": "2022 International Conference on Artificial Intelligence and Autonomous Robot Systems (AIARS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a845",
"title": "Virtual Reality Sonification Training System Can Improve a Novice's Forehand Return of Serve in Tennis",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a845/1J7Wlv8mvKM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798056",
"title": "Physical Objects in AR Games – Offering a Tangible Experience",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798056/1cJ0Ld02sXS",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798309",
"title": "Improve the Decision-making Skill of Basketball Players by an Action-aware VR Training System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798309/1cJ1adrlIoo",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089620",
"title": "Virtual Big Heads: Analysis of Human Perception and Comfort of Head Scales in Social Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089620/1jIxaTvTkm4",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090452",
"title": "Detection Thresholds of Tactile Perception in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090452/1jIxnAwbrSo",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090419",
"title": "Prop-Based Egocentric and Exocentric Virtual Object Storage Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090419/1jIxnZlNJok",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ0QSLRO6Y",
"doi": "10.1109/VR.2019.8798214",
"title": "Human Perception of a Haptic Shape-changing Interface with Variable Rigidity and Size",
"normalizedTitle": "Human Perception of a Haptic Shape-changing Interface with Variable Rigidity and Size",
"abstract": "This paper studies the characteristics of the human perception of a haptic shape-changing interface, capable of altering its size and rigidity simultaneously for presenting characteristics of virtual objects physically. The haptic interface is composed of an array of computer-controlled balloons, with two mechanisms, one for changing size and one for changing rigidity. We manufactured two balloons and conducted psychophysical experiments with twenty subjects to measure perceived sensory thresholds and haptic perception of the change of size and rigidity. The results show that subjects can correctly discriminate different conditions with an acceptable level of accuracy. Our results also suggest that the proposed system can present an ample range of rigidities and variations of the size in a way that is compatible with the human haptic perception of physical materials. Currently, shape-changing interfaces do not hold a defined position in the current VR / AR research. Our results provide basic knowledge for developing novel types of haptic interfaces that can enhance the haptic perception of virtual objects, allowing rich embodied interactions, and synchronize the virtual and the physical world through computationally-controlled materiality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper studies the characteristics of the human perception of a haptic shape-changing interface, capable of altering its size and rigidity simultaneously for presenting characteristics of virtual objects physically. The haptic interface is composed of an array of computer-controlled balloons, with two mechanisms, one for changing size and one for changing rigidity. We manufactured two balloons and conducted psychophysical experiments with twenty subjects to measure perceived sensory thresholds and haptic perception of the change of size and rigidity. The results show that subjects can correctly discriminate different conditions with an acceptable level of accuracy. Our results also suggest that the proposed system can present an ample range of rigidities and variations of the size in a way that is compatible with the human haptic perception of physical materials. Currently, shape-changing interfaces do not hold a defined position in the current VR / AR research. Our results provide basic knowledge for developing novel types of haptic interfaces that can enhance the haptic perception of virtual objects, allowing rich embodied interactions, and synchronize the virtual and the physical world through computationally-controlled materiality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper studies the characteristics of the human perception of a haptic shape-changing interface, capable of altering its size and rigidity simultaneously for presenting characteristics of virtual objects physically. The haptic interface is composed of an array of computer-controlled balloons, with two mechanisms, one for changing size and one for changing rigidity. We manufactured two balloons and conducted psychophysical experiments with twenty subjects to measure perceived sensory thresholds and haptic perception of the change of size and rigidity. The results show that subjects can correctly discriminate different conditions with an acceptable level of accuracy. Our results also suggest that the proposed system can present an ample range of rigidities and variations of the size in a way that is compatible with the human haptic perception of physical materials. Currently, shape-changing interfaces do not hold a defined position in the current VR / AR research. Our results provide basic knowledge for developing novel types of haptic interfaces that can enhance the haptic perception of virtual objects, allowing rich embodied interactions, and synchronize the virtual and the physical world through computationally-controlled materiality.",
"fno": "08798214",
"keywords": [
"Haptic Interfaces",
"Virtual Reality",
"Visual Perception",
"Human Perception",
"Haptic Shape Changing Interface",
"Virtual Objects",
"Haptic Interface",
"Computer Controlled Balloons",
"Human Haptic Perception",
"Shape Changing Interfaces",
"Variable Rigidity",
"Haptic Interfaces",
"Indexes",
"Virtual Reality",
"Measurement",
"Task Analysis",
"Informatics",
"Human Computer Interaction",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Devices X 2014 Haptic Devices X 0027 Treemaps",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Empirical Studies In HCI"
],
"authors": [
{
"affiliation": "Empowerment Informatics, University of Tsukuba",
"fullName": "Alberto Boem",
"givenName": "Alberto",
"surname": "Boem",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virtual Reality Lab, University of Tsukuba",
"fullName": "Yuuki Enzaki",
"givenName": "Yuuki",
"surname": "Enzaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Empowerment Informatics, University of Tsukuba",
"fullName": "Hiroaki Yano",
"givenName": "Hiroaki",
"surname": "Yano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Empowerment Informatics, University of Tsukuba",
"fullName": "Hiroo Iwata",
"givenName": "Hiroo",
"surname": "Iwata",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "858-859",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797679",
"articleId": "1cJ187OlfvG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798238",
"articleId": "1cJ0Qw94bi8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/haptics/2006/0226/0/02260054",
"title": "Development of a High-resolution Surface Type Haptic interface for Rigidity Distribution Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2006/02260054/12OmNC8dglW",
"parentPublication": {
"id": "proceedings/haptics/2006/0226/0",
"title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479917",
"title": "Visual-Haptic Perception of Compliance: Fusion of Visual and Haptic Information",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479917/12OmNvAAth8",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptic/2006/0226/0/01627135",
"title": "Development of a High-resolution Surface Type Haptic interface for Rigidity Distribution Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/haptic/2006/01627135/12OmNxZ2Gld",
"parentPublication": {
"id": "proceedings/haptic/2006/0226/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/04/08012404",
"title": "Encountered-Type Haptic Interface for Representation of Shape and Rigidity of 3D Virtual Objects",
"doi": null,
"abstractUrl": "/journal/th/2017/04/08012404/13rRUwbaqV1",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2010/04/tth2010040234",
"title": "Combination and Integration in the Perception of Visual-Haptic Compliance Information",
"doi": null,
"abstractUrl": "/journal/th/2010/04/tth2010040234/13rRUwd9CGb",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07833030",
"title": "Shifty: A Weight-Shifting Dynamic Passive Haptic Proxy to Enhance Object Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07833030/13rRUwgQpqL",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873974",
"title": "Action-Specific Perception & Performance on a Fitts's Law Task in Virtual Reality: The Role of Haptic Feedback",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873974/1GjwIr0uAfu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797809",
"title": "Peripersonal Visual-Haptic Size Estimation in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797809/1cJ0IzCt7by",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a575",
"title": "The Effect of the Virtual Object Size on Weight Perception Augmented with Pseudo-Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a575/1tnWwW9JGXC",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1dKraais",
"doi": "10.1109/VR.2019.8797744",
"title": "Effects of Stereoscopic Viewing and Haptic Feedback, Sensory-Motor Congruence and Calibration on Near-Field Fine Motor Perception-Action Coordination in Virtual Reality",
"normalizedTitle": "Effects of Stereoscopic Viewing and Haptic Feedback, Sensory-Motor Congruence and Calibration on Near-Field Fine Motor Perception-Action Coordination in Virtual Reality",
"abstract": "We present an empirical evaluation on how stereoscopic viewing and haptic feedback deferentially affects fine motor perception-action coordination in a pick-and-place task in Virtual Reality (VR). The factors considered were stereoscopic viewing, haptic feedback, sensory-motor congruence and mismatch, and calibration on perception-action coordination in near field fine motor task performance in VR. Quantitative measures of placement error, distance, collision, and time to complete trials were recorded and analyzed. Overall, we found that participants' manual dexterous task performance was enhanced in the presence of both stereoscopic viewing and haptic feedback. However, we found that time to complete task was greatly enhanced by the presence of haptic feedback, and economy and efficiency of movement of the end effector as well as the manipulated object was enhanced by the presence of both haptic feedback and stereoscopic viewing. Whereas, number of collisions and placement accuracy were greatly enhanced by the presence of stereoscopic viewing in near-field fine motor perception-action coordination. Our research additionally shows that mismatch in sensory-motor stimuli can detrimentally affect the number of collisions, and efficiency of end effector and object movements in near-field fine motor activities, and can be further negatively affected by the absence of haptic feedback and stereoscopic viewing. In spite of reduced cue situations in VR, and the absence or presence of stereoscopic viewing and haptic feedback, we found that participants tend to calibrate or adapt their perception-action coordination rapidly with a set of at least 5 trials.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an empirical evaluation on how stereoscopic viewing and haptic feedback deferentially affects fine motor perception-action coordination in a pick-and-place task in Virtual Reality (VR). The factors considered were stereoscopic viewing, haptic feedback, sensory-motor congruence and mismatch, and calibration on perception-action coordination in near field fine motor task performance in VR. Quantitative measures of placement error, distance, collision, and time to complete trials were recorded and analyzed. Overall, we found that participants' manual dexterous task performance was enhanced in the presence of both stereoscopic viewing and haptic feedback. However, we found that time to complete task was greatly enhanced by the presence of haptic feedback, and economy and efficiency of movement of the end effector as well as the manipulated object was enhanced by the presence of both haptic feedback and stereoscopic viewing. Whereas, number of collisions and placement accuracy were greatly enhanced by the presence of stereoscopic viewing in near-field fine motor perception-action coordination. Our research additionally shows that mismatch in sensory-motor stimuli can detrimentally affect the number of collisions, and efficiency of end effector and object movements in near-field fine motor activities, and can be further negatively affected by the absence of haptic feedback and stereoscopic viewing. In spite of reduced cue situations in VR, and the absence or presence of stereoscopic viewing and haptic feedback, we found that participants tend to calibrate or adapt their perception-action coordination rapidly with a set of at least 5 trials.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an empirical evaluation on how stereoscopic viewing and haptic feedback deferentially affects fine motor perception-action coordination in a pick-and-place task in Virtual Reality (VR). The factors considered were stereoscopic viewing, haptic feedback, sensory-motor congruence and mismatch, and calibration on perception-action coordination in near field fine motor task performance in VR. Quantitative measures of placement error, distance, collision, and time to complete trials were recorded and analyzed. Overall, we found that participants' manual dexterous task performance was enhanced in the presence of both stereoscopic viewing and haptic feedback. However, we found that time to complete task was greatly enhanced by the presence of haptic feedback, and economy and efficiency of movement of the end effector as well as the manipulated object was enhanced by the presence of both haptic feedback and stereoscopic viewing. Whereas, number of collisions and placement accuracy were greatly enhanced by the presence of stereoscopic viewing in near-field fine motor perception-action coordination. Our research additionally shows that mismatch in sensory-motor stimuli can detrimentally affect the number of collisions, and efficiency of end effector and object movements in near-field fine motor activities, and can be further negatively affected by the absence of haptic feedback and stereoscopic viewing. In spite of reduced cue situations in VR, and the absence or presence of stereoscopic viewing and haptic feedback, we found that participants tend to calibrate or adapt their perception-action coordination rapidly with a set of at least 5 trials.",
"fno": "08797744",
"keywords": [
"Feedback",
"Haptic Interfaces",
"Human Factors",
"Stereo Image Processing",
"Virtual Reality",
"Visual Perception",
"Stereoscopic Viewing",
"Sensory Motor Congruence",
"Near Field Fine Motor Perception Action Coordination",
"Virtual Reality",
"Field Fine Motor Task Performance",
"Sensory Motor Stimuli",
"Near Field Fine Motor Activities",
"Haptic Feedback",
"End Effector",
"Object Movement",
"Haptic Interfaces",
"Stereo Image Processing",
"Task Analysis",
"Surgery",
"Three Dimensional Displays",
"Training",
"Solid Modeling",
"Human Centered Computing X 2014 Visualization X 2014 Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "Clemson University",
"fullName": "David Brickler",
"givenName": "David",
"surname": "Brickler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Matias Volonte",
"givenName": "Matias",
"surname": "Volonte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Jeffrey W. Bertrand",
"givenName": "Jeffrey W.",
"surname": "Bertrand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Andrew T. Duchowski",
"givenName": "Andrew T.",
"surname": "Duchowski",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Sabarish V. Babu",
"givenName": "Sabarish V.",
"surname": "Babu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "28-37",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08797974",
"articleId": "1cJ0NcRFX5m",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798300",
"articleId": "1cJ0MMhliKI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2011/0844/0/06027293",
"title": "Stereoscopic viewing facilitates the perception of crowds",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2011/06027293/12OmNBEpnya",
"parentPublication": {
"id": "proceedings/avss/2011/0844/0",
"title": "2011 8th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06238904",
"title": "The measurement of eyestrain caused from diverse binocular disparities, viewing time and display sizes in watching stereoscopic 3D content",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06238904/12OmNqJHFuT",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145145",
"title": "Haptic Feedback Enhances Force Skill Learning",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145145/12OmNrNh0Ci",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479998",
"title": "A Haptic Interface with Motor/Brake System for Colonoscopy Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479998/12OmNscxj1I",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460069",
"title": "A part-task haptic simulator for ophthalmic surgical training",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460069/12OmNwAKCQd",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1999/0234/0/02340029",
"title": "Development of Stereoscopic-Haptic Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1999/02340029/12OmNwHz090",
"parentPublication": {
"id": "proceedings/cbms/1999/0234/0",
"title": "Proceedings 12th IEEE Symposium on Computer-Based Medical Systems (Cat. No.99CB36365)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446227",
"title": "Towards Evaluating the Effects of Stereoscopic Viewing and Haptic Interaction on Perception-Action Coordination",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446227/13bd1eTtWZ2",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/02/06701132",
"title": "Motor Learning Perspectives on Haptic Training for the Upper Extremities",
"doi": null,
"abstractUrl": "/journal/th/2014/02/06701132/13rRUNvyaf8",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/03/tth2011030210",
"title": "Tool Contact Acceleration Feedback for Telerobotic Surgery",
"doi": null,
"abstractUrl": "/journal/th/2011/03/tth2011030210/13rRUwcS1D6",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/05/07118242",
"title": "Towards An Understanding of Mobile Touch Navigation in a Stereoscopic Viewing Environment for 3D Data Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2016/05/07118242/13rRUxlgxOk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxnAwbrSo",
"doi": "10.1109/VRW50115.2020.00156",
"title": "Detection Thresholds of Tactile Perception in Virtual Environments",
"normalizedTitle": "Detection Thresholds of Tactile Perception in Virtual Environments",
"abstract": "We investigate the detection thresholds of tactile perception in virtual environments. We estimate the absolute detection thresholds for electrovibration stimuli under different parameters of excitation signals (waveform and frequency). Experimental results and subjective comments suggest that participants’ tactile sensitivities to electrovibration stimuli are decreased in virtual environments. The frequency significantly affects the absolute detection thresholds, and lower voltages can be perceived under 60Hz for the square wave than the sinusoidal wave. Our findings provide a foundation to future studies aiming at modelling and simulating desired tactile perception of objects in virtual reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We investigate the detection thresholds of tactile perception in virtual environments. We estimate the absolute detection thresholds for electrovibration stimuli under different parameters of excitation signals (waveform and frequency). Experimental results and subjective comments suggest that participants’ tactile sensitivities to electrovibration stimuli are decreased in virtual environments. The frequency significantly affects the absolute detection thresholds, and lower voltages can be perceived under 60Hz for the square wave than the sinusoidal wave. Our findings provide a foundation to future studies aiming at modelling and simulating desired tactile perception of objects in virtual reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We investigate the detection thresholds of tactile perception in virtual environments. We estimate the absolute detection thresholds for electrovibration stimuli under different parameters of excitation signals (waveform and frequency). Experimental results and subjective comments suggest that participants’ tactile sensitivities to electrovibration stimuli are decreased in virtual environments. The frequency significantly affects the absolute detection thresholds, and lower voltages can be perceived under 60Hz for the square wave than the sinusoidal wave. Our findings provide a foundation to future studies aiming at modelling and simulating desired tactile perception of objects in virtual reality.",
"fno": "09090452",
"keywords": [
"Virtual Environments",
"Haptic Interfaces",
"Sensitivity",
"Tactile Sensors",
"Human Computer Interaction",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Virtual Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Devices",
"Haptic Devices"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology,Beijing Engineering Research Center of Mixed Reality and Advanced Display, School of Optics and Photonics",
"fullName": "Lu Zhao",
"givenName": "Lu",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology,Beijing Engineering Research Center of Mixed Reality and Advanced Display, School of Optics and Photonics",
"fullName": "Yue Liu",
"givenName": "Yue",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology,Beijing Engineering Research Center of Mixed Reality and Advanced Display, School of Optics and Photonics",
"fullName": "Dejiang Ye",
"givenName": "Dejiang",
"surname": "Ye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology,Beijing Engineering Research Center of Mixed Reality and Advanced Display, School of Optics and Photonics",
"fullName": "Zhuoluo Ma",
"givenName": "Zhuoluo",
"surname": "Ma",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "610-611",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090575",
"articleId": "1jIxjTlmHOU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090605",
"articleId": "1jIxlXjkMuc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802106",
"title": "An ungrounded tactile feedback device to portray force and torque-like interactions in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802106/12OmNAg7jYE",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2014/4261/0/4261a230",
"title": "Tactile Interface for Navigation in Underground Mines",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2014/4261a230/12OmNqBtiLI",
"parentPublication": {
"id": "proceedings/svr/2014/4261/0",
"title": "2014 XVI Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759481",
"title": "Design of a tactile display to support materials perception in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759481/12OmNs59JKh",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2009/3890/0/3890a425",
"title": "Touchable Video and Tactile Audio",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2009/3890a425/12OmNxTmHIC",
"parentPublication": {
"id": "proceedings/ism/2009/3890/0",
"title": "2009 11th IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444791",
"title": "Influence of tactile feedback and presence on egocentric distance perception in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444791/12OmNyoAA64",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446569",
"title": "Spatial Asynchronous Visuo-Tactile Stimuli Influence Ownership of Virtual Wings",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446569/13bd1ftOBCI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/04/07476866",
"title": "Vibrotactile Force Perception – Absolute and Differential Thresholds and External Influences",
"doi": null,
"abstractUrl": "/journal/th/2016/04/07476866/13rRUxAASTl",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a588",
"title": "Effects of Tactile Feedback on Conceptual Understanding of Electromagnetism in a Virtual Reality Experience",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a588/1J7W7VSYX84",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798036",
"title": "Vibro-tactile Feedback for Real-world Awareness in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798036/1cJ15HGOeqc",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382899",
"title": "Tactile Perceptual Thresholds of Electrovibration in VR",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382899/1saZD2BiBMc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyQYtf2",
"title": "2017 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAqCtOp",
"doi": "10.1109/3DV.2017.00029",
"title": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"normalizedTitle": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"abstract": "Efficient algorithms exist to obtain a sparse 3D representation of the environment. Bundle adjustment (BA) and structure from motion (SFM) are techniques used to estimate both the camera poses and the set of sparse points in the environment. Many applications require such reconstruction to be performed online, while acquiring the data, and produce an updated result every step. Furthermore, using active feedback about the quality of the reconstruction can help selecting the best views to increase the accuracy as well as to maintain a reasonable size of the collected data. This paper provides novel and efficient solutions to solving the associated NLS incrementally, and to compute not only the optimal solution, but also the associated uncertainty. The proposed technique highly increases the efficiency of the incremental BA solver for long camera trajectory applications, and provides extremely fast covariance recovery.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Efficient algorithms exist to obtain a sparse 3D representation of the environment. Bundle adjustment (BA) and structure from motion (SFM) are techniques used to estimate both the camera poses and the set of sparse points in the environment. Many applications require such reconstruction to be performed online, while acquiring the data, and produce an updated result every step. Furthermore, using active feedback about the quality of the reconstruction can help selecting the best views to increase the accuracy as well as to maintain a reasonable size of the collected data. This paper provides novel and efficient solutions to solving the associated NLS incrementally, and to compute not only the optimal solution, but also the associated uncertainty. The proposed technique highly increases the efficiency of the incremental BA solver for long camera trajectory applications, and provides extremely fast covariance recovery.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Efficient algorithms exist to obtain a sparse 3D representation of the environment. Bundle adjustment (BA) and structure from motion (SFM) are techniques used to estimate both the camera poses and the set of sparse points in the environment. Many applications require such reconstruction to be performed online, while acquiring the data, and produce an updated result every step. Furthermore, using active feedback about the quality of the reconstruction can help selecting the best views to increase the accuracy as well as to maintain a reasonable size of the collected data. This paper provides novel and efficient solutions to solving the associated NLS incrementally, and to compute not only the optimal solution, but also the associated uncertainty. The proposed technique highly increases the efficiency of the incremental BA solver for long camera trajectory applications, and provides extremely fast covariance recovery.",
"fno": "261001a175",
"keywords": [
"Cameras",
"Computational Complexity",
"Covariance Matrices",
"Image Reconstruction",
"Image Representation",
"Least Squares Approximations",
"Pose Estimation",
"Fast Incremental Bundle Adjustment",
"Sparse 3 D Representation",
"SFM",
"Sparse Points",
"Active Feedback",
"Optimal Solution",
"Incremental BA Solver",
"Long Camera Trajectory Applications",
"Extremely Fast Covariance Recovery",
"Structure From Motion",
"NLS",
"Covariance Recovery",
"Cameras",
"Uncertainty",
"Simultaneous Localization And Mapping",
"Bundle Adjustment",
"Sparse Matrices",
"Noise Measurement",
"Structure From Motion",
"Bundle Adjustment",
"Incremental Solvers",
"Schur Complement"
],
"authors": [
{
"affiliation": null,
"fullName": "Viorela Ila",
"givenName": "Viorela",
"surname": "Ila",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Lukas Polok",
"givenName": "Lukas",
"surname": "Polok",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marek Solony",
"givenName": "Marek",
"surname": "Solony",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Klemen Istenic",
"givenName": "Klemen",
"surname": "Istenic",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "175-184",
"year": "2017",
"issn": "2475-7888",
"isbn": "978-1-5386-2610-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "261001a165",
"articleId": "12OmNzTH0VQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "261001a185",
"articleId": "12OmNzmtWvT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2018/6580/0/658001a048",
"title": "The Comparison between FTF-VO and MF-VO for High Accuracy Mobile Robot Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2018/658001a048/12OmNz2kqeN",
"parentPublication": {
"id": "proceedings/icris/2018/6580/0",
"title": "2018 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g230",
"title": "Learning to Bundle-adjust: A Graph Network Approach to Faster Optimization of Bundle Adjustment for Vehicular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g230/1BmEFEemuFG",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873988",
"title": "CoLi-BA: Compact Linearization based Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873988/1GjwJTUXo6A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fccm/2019/1131/0/113100a100",
"title": "π-BA: Bundle Adjustment Acceleration on Embedded FPGAs with Co-observation Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/fccm/2019/113100a100/1aPv1CATXTW",
"parentPublication": {
"id": "proceedings/fccm/2019/1131/0",
"title": "2019 IEEE 27th Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2020/07/09055175",
"title": "<inline-formula><tex-math notation=\"LaTeX\">Z_$\\pi$_Z</tex-math></inline-formula>-BA: Bundle Adjustment Hardware Accelerator Based on Distribution of 3D-Point Observations",
"doi": null,
"abstractUrl": "/journal/tc/2020/07/09055175/1iHrnARikhi",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093265",
"title": "Two-Grid Preconditioned Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093265/1jPbpxzpGdG",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a136",
"title": "An Efficient Planar Bundle Adjustment Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a136/1pysxyD7bDG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a140",
"title": "Distributed Photometric Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a140/1qyxmS4qxIA",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1718",
"title": "Square Root Bundle Adjustment for Large-Scale Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1718/1yeI4knbTmU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwdbV00",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAsk4zp",
"doi": "10.1109/CVPR.2012.6247831",
"title": "Rolling shutter bundle adjustment",
"normalizedTitle": "Rolling shutter bundle adjustment",
"abstract": "This paper introduces a bundle adjustment (BA) method that obtains accurate structure and motion from rolling shutter (RS) video sequences: RSBA. When a classical BA algorithm processes a rolling shutter video, the resultant camera trajectory is brittle, and complete failures are not uncommon. We exploit the temporal continuity of the camera motion to define residuals of image point trajectories with respect to the camera trajectory. We compare the camera trajectories from RSBA to those from classical BA, and from classical BA on rectified videos. The comparisons are done on real video sequences from an iPhone 4, with ground truth obtained from a global shutter camera, rigidly mounted to the iPhone 4. Compared to classical BA, the rolling shutter model requires just six extra parameters. It also degrades the sparsity of the system Jacobian slightly, but as we demonstrate, the increase in computation time is moderate. Decisive advantages are that RSBA succeeds in cases where competing methods diverge, and consistently produces more accurate results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces a bundle adjustment (BA) method that obtains accurate structure and motion from rolling shutter (RS) video sequences: RSBA. When a classical BA algorithm processes a rolling shutter video, the resultant camera trajectory is brittle, and complete failures are not uncommon. We exploit the temporal continuity of the camera motion to define residuals of image point trajectories with respect to the camera trajectory. We compare the camera trajectories from RSBA to those from classical BA, and from classical BA on rectified videos. The comparisons are done on real video sequences from an iPhone 4, with ground truth obtained from a global shutter camera, rigidly mounted to the iPhone 4. Compared to classical BA, the rolling shutter model requires just six extra parameters. It also degrades the sparsity of the system Jacobian slightly, but as we demonstrate, the increase in computation time is moderate. Decisive advantages are that RSBA succeeds in cases where competing methods diverge, and consistently produces more accurate results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces a bundle adjustment (BA) method that obtains accurate structure and motion from rolling shutter (RS) video sequences: RSBA. When a classical BA algorithm processes a rolling shutter video, the resultant camera trajectory is brittle, and complete failures are not uncommon. We exploit the temporal continuity of the camera motion to define residuals of image point trajectories with respect to the camera trajectory. We compare the camera trajectories from RSBA to those from classical BA, and from classical BA on rectified videos. The comparisons are done on real video sequences from an iPhone 4, with ground truth obtained from a global shutter camera, rigidly mounted to the iPhone 4. Compared to classical BA, the rolling shutter model requires just six extra parameters. It also degrades the sparsity of the system Jacobian slightly, but as we demonstrate, the increase in computation time is moderate. Decisive advantages are that RSBA succeeds in cases where competing methods diverge, and consistently produces more accurate results.",
"fno": "181P2A31",
"keywords": [
"Video Signal Processing",
"Cameras",
"Image Sequences",
"Jacobian System",
"Rolling Shutter Bundle Adjustment",
"Bundle Adjustment Method",
"RSBA",
"Temporal Continuity",
"Image Point Trajectories",
"Rectified Videos",
"I Phone 4",
"Global Shutter Camera",
"Cameras",
"Jacobian Matrices",
"Barium",
"Trajectory",
"Indexes",
"Solid Modeling",
"Geometry"
],
"authors": [
{
"affiliation": "Dept. of Electr. Eng., Linkoping Univ., Linkoping, Sweden",
"fullName": "E. Ringaby",
"givenName": "E.",
"surname": "Ringaby",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Linkoping Univ., Linkoping, Sweden",
"fullName": "M. Felsberg",
"givenName": "M.",
"surname": "Felsberg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Linkoping Univ., Linkoping, Sweden",
"fullName": "J. Hedborg",
"givenName": "J.",
"surname": "Hedborg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. Eng., Linkoping Univ., Linkoping, Sweden",
"fullName": "P-E Forssen",
"givenName": "P-E",
"surname": "Forssen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1434-1441",
"year": "2012",
"issn": "1063-6919",
"isbn": "978-1-4673-1226-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "180P2A30",
"articleId": "12OmNx7ouTk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "182P2A32",
"articleId": "12OmNyrZLAd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2015/6964/0/07298760",
"title": "Rolling shutter motion deblurring",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2015/07298760/12OmNAkWvdx",
"parentPublication": {
"id": "proceedings/cvpr/2015/6964/0",
"title": "2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2017/2610/0/261001a175",
"title": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a175/12OmNAqCtOp",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2013/2840/0/2840a465",
"title": "Rolling Shutter Stereo",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2013/2840a465/12OmNqFrGvu",
"parentPublication": {
"id": "proceedings/iccv/2013/2840/0",
"title": "2013 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851d337",
"title": "Sparse to Dense 3D Reconstruction from Rolling Shutter Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851d337/12OmNvAAtiF",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601a903",
"title": "Minimal Solvers for Monocular Rolling Shutter Compensation Under Ackermann Motion",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601a903/12OmNwDACeB",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08325527",
"title": "Accurate 3D Reconstruction from Small Motion Clip for Rolling Shutter Cameras",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08325527/13rRUxlgxXO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/06/08621045",
"title": "Rolling Shutter Camera Absolute Pose",
"doi": null,
"abstractUrl": "/journal/tp/2020/06/08621045/17D45XERmmI",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200e521",
"title": "SUNet: Symmetric Undistortion Network for Rolling Shutter Correction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e521/1BmL5Z4I1wY",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093265",
"title": "Two-Grid Preconditioned Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093265/1jPbpxzpGdG",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a136",
"title": "An Efficient Planar Bundle Adjustment Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a136/1pysxyD7bDG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy9Prj1",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrJiCX9",
"doi": "10.1109/ICCVW.2017.72",
"title": "A Factorization Approach for Enabling Structure-from-Motion/SLAM Using Integer Arithmetic",
"normalizedTitle": "A Factorization Approach for Enabling Structure-from-Motion/SLAM Using Integer Arithmetic",
"abstract": "SLAM and SfM algorithms typically involve minimization of a cost-function by non-linear least-squares methods. The matrices involved are typically very poorly conditioned, making the procedure sensitive to numerical precision effects. Ensuring accuracy therefore entails the use of high-precision floating-point data-types for representation and compute. In this paper, a square-root filtering approach to EKF-based SfM is presented and is shown to be capable of operating with lower-precision arithmetic than the EKF, while sacrificing only a little in accuracy. Specifically, we demonstrate a prototype that is capable of operating with integer arithmetic rather than floating-point - the first such implementation to the best of our knowledge. This is important given the increasing need to implement advanced vision-based capabilities on low-power embedded and mobile processors, some of which might not even support floating-point arithmetic for reasons of cost and power. Furthermore, an evaluation of the computational complexity shows that the proposed approach typically requires fewer computations than the EKF in practice, resulting in an algorithm that is both numerically more robust and computationally less intensive.",
"abstracts": [
{
"abstractType": "Regular",
"content": "SLAM and SfM algorithms typically involve minimization of a cost-function by non-linear least-squares methods. The matrices involved are typically very poorly conditioned, making the procedure sensitive to numerical precision effects. Ensuring accuracy therefore entails the use of high-precision floating-point data-types for representation and compute. In this paper, a square-root filtering approach to EKF-based SfM is presented and is shown to be capable of operating with lower-precision arithmetic than the EKF, while sacrificing only a little in accuracy. Specifically, we demonstrate a prototype that is capable of operating with integer arithmetic rather than floating-point - the first such implementation to the best of our knowledge. This is important given the increasing need to implement advanced vision-based capabilities on low-power embedded and mobile processors, some of which might not even support floating-point arithmetic for reasons of cost and power. Furthermore, an evaluation of the computational complexity shows that the proposed approach typically requires fewer computations than the EKF in practice, resulting in an algorithm that is both numerically more robust and computationally less intensive.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "SLAM and SfM algorithms typically involve minimization of a cost-function by non-linear least-squares methods. The matrices involved are typically very poorly conditioned, making the procedure sensitive to numerical precision effects. Ensuring accuracy therefore entails the use of high-precision floating-point data-types for representation and compute. In this paper, a square-root filtering approach to EKF-based SfM is presented and is shown to be capable of operating with lower-precision arithmetic than the EKF, while sacrificing only a little in accuracy. Specifically, we demonstrate a prototype that is capable of operating with integer arithmetic rather than floating-point - the first such implementation to the best of our knowledge. This is important given the increasing need to implement advanced vision-based capabilities on low-power embedded and mobile processors, some of which might not even support floating-point arithmetic for reasons of cost and power. Furthermore, an evaluation of the computational complexity shows that the proposed approach typically requires fewer computations than the EKF in practice, resulting in an algorithm that is both numerically more robust and computationally less intensive.",
"fno": "1034a554",
"keywords": [
"Covariance Matrices",
"Simultaneous Localization And Mapping",
"Jacobian Matrices",
"Real Time Systems",
"Kalman Filters",
"Mobile Communication",
"Program Processors"
],
"authors": [
{
"affiliation": null,
"fullName": "Nilesh A. Ahuja",
"givenName": "Nilesh A.",
"surname": "Ahuja",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mahesh Subedar",
"givenName": "Mahesh",
"surname": "Subedar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Omesh Tickoo",
"givenName": "Omesh",
"surname": "Tickoo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yeongseon Lee",
"givenName": "Yeongseon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "554-562",
"year": "2017",
"issn": "2473-9944",
"isbn": "978-1-5386-1034-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1034a545",
"articleId": "12OmNzmLxOI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1034a563",
"articleId": "12OmNzcPAC1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/case/2012/0430/0/06386480",
"title": "Fast randomized planner for SLAM automation",
"doi": null,
"abstractUrl": "/proceedings-article/case/2012/06386480/12OmNqzu6R9",
"parentPublication": {
"id": "proceedings/case/2012/0430/0",
"title": "2012 IEEE International Conference on Automation Science and Engineering (CASE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2015/1986/0/1986a023",
"title": "The Battle for Filter Supremacy: A Comparative Study of the Multi-State Constraint Kalman Filter and the Sliding Window Filter",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2015/1986a023/12OmNrNh0Ep",
"parentPublication": {
"id": "proceedings/crv/2015/1986/0",
"title": "2015 12th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ci/2013/3194/0/06855911",
"title": "Multilayer Perceptron Use in a Mapping Task by Cooperating Robots",
"doi": null,
"abstractUrl": "/proceedings-article/ci/2013/06855911/12OmNvy256d",
"parentPublication": {
"id": "proceedings/ci/2013/3194/0",
"title": "2013 BRICS Congress on Computational Intelligence & 11th Brazilian Congress on Computational Intelligence (BRICS-CCI & CBIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icseng/2017/0610/0/0610a057",
"title": "Performance Comparison of Extended Kalman Filter and Unscented Kalman Filter for the Control Moment Gyroscope Inverted Pendulum",
"doi": null,
"abstractUrl": "/proceedings-article/icseng/2017/0610a057/12OmNyQ7FJH",
"parentPublication": {
"id": "proceedings/icseng/2017/0610/0",
"title": "2017 25th International Conference on Systems Engineering (ICSEng)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130262",
"title": "Adapting a real-time monocular visual SLAM from conventional to omnidirectional cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130262/12OmNzVoBv6",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2017/2818/0/2818a321",
"title": "Developing a Cubature Multi-state Constraint Kalman Filter for Visual-Inertial Navigation System",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2017/2818a321/12OmNzhnaa2",
"parentPublication": {
"id": "proceedings/crv/2017/2818/0",
"title": "2017 14th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/inciscos/2018/7612/0/761200a049",
"title": "Robust MPC Tuning by Quadratic Weights Online Estimation of the Cost Function through Extended Kalman Filter",
"doi": null,
"abstractUrl": "/proceedings-article/inciscos/2018/761200a049/17D45Xh13rl",
"parentPublication": {
"id": "proceedings/inciscos/2018/7612/0",
"title": "2018 International Conference on Information Systems and Computer Science (INCISCOS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2022/11/09709160",
"title": "NITI: Training Integer Neural Networks Using Integer-Only Arithmetic",
"doi": null,
"abstractUrl": "/journal/td/2022/11/09709160/1AR0ut9OJqw",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200h808",
"title": "Integer-arithmetic-only Certified Robustness for Quantized Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200h808/1BmJM3AA9HO",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873988",
"title": "CoLi-BA: Compact Linearization based Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873988/1GjwJTUXo6A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XH89pm",
"doi": "10.1109/CVPR.2018.00211",
"title": "ICE-BA: Incremental, Consistent and Efficient Bundle Adjustment for Visual-Inertial SLAM",
"normalizedTitle": "ICE-BA: Incremental, Consistent and Efficient Bundle Adjustment for Visual-Inertial SLAM",
"abstract": "Modern visual-inertial SLAM (VI-SLAM) achieves higher accuracy and robustness than pure visual SLAM, thanks to the complementariness of visual features and inertial measurements. However, jointly using visual and inertial measurements to optimize SLAM objective functions is a problem of high computational complexity. In many VI-SLAM applications, the conventional optimization solvers can only use a very limited number of recent measurements for real time pose estimation, at the cost of suboptimal localization accuracy. In this work, we renovate the numerical solver for VI-SLAM. Compared to conventional solvers, our proposal provides an exact solution with significantly higher computational efficiency. Our solver allows us to use remarkably larger number of measurements to achieve higher accuracy and robustness. Furthermore, our method resolves the global consistency problem that is unaddressed by many state-of-the-art SLAM systems: to guarantee the minimization of re-projection function and inertial constraint function during loop closure. Experiments demonstrate our novel formulation renders lower localization error and more than 10x speedup compared to alternatives. We release the source code of our implementation to benefit the community1.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Modern visual-inertial SLAM (VI-SLAM) achieves higher accuracy and robustness than pure visual SLAM, thanks to the complementariness of visual features and inertial measurements. However, jointly using visual and inertial measurements to optimize SLAM objective functions is a problem of high computational complexity. In many VI-SLAM applications, the conventional optimization solvers can only use a very limited number of recent measurements for real time pose estimation, at the cost of suboptimal localization accuracy. In this work, we renovate the numerical solver for VI-SLAM. Compared to conventional solvers, our proposal provides an exact solution with significantly higher computational efficiency. Our solver allows us to use remarkably larger number of measurements to achieve higher accuracy and robustness. Furthermore, our method resolves the global consistency problem that is unaddressed by many state-of-the-art SLAM systems: to guarantee the minimization of re-projection function and inertial constraint function during loop closure. Experiments demonstrate our novel formulation renders lower localization error and more than 10x speedup compared to alternatives. We release the source code of our implementation to benefit the community1.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Modern visual-inertial SLAM (VI-SLAM) achieves higher accuracy and robustness than pure visual SLAM, thanks to the complementariness of visual features and inertial measurements. However, jointly using visual and inertial measurements to optimize SLAM objective functions is a problem of high computational complexity. In many VI-SLAM applications, the conventional optimization solvers can only use a very limited number of recent measurements for real time pose estimation, at the cost of suboptimal localization accuracy. In this work, we renovate the numerical solver for VI-SLAM. Compared to conventional solvers, our proposal provides an exact solution with significantly higher computational efficiency. Our solver allows us to use remarkably larger number of measurements to achieve higher accuracy and robustness. Furthermore, our method resolves the global consistency problem that is unaddressed by many state-of-the-art SLAM systems: to guarantee the minimization of re-projection function and inertial constraint function during loop closure. Experiments demonstrate our novel formulation renders lower localization error and more than 10x speedup compared to alternatives. We release the source code of our implementation to benefit the community1.",
"fno": "642000b974",
"keywords": [
"Computational Complexity",
"Mobile Robots",
"Optimisation",
"Pose Estimation",
"Robot Vision",
"SLAM Robots",
"Higher Computational Efficiency",
"Remarkably Larger Number",
"Global Consistency Problem",
"State Of The Art SLAM Systems",
"Inertial Constraint Function",
"Modern Visual Inertial SLAM",
"Pure Visual SLAM",
"Visual Features",
"Inertial Measurements",
"SLAM Objective Functions",
"High Computational Complexity",
"VI SLAM Applications",
"Conventional Optimization Solvers",
"Recent Measurements",
"Suboptimal Localization Accuracy",
"Numerical Solver",
"Simultaneous Localization And Mapping",
"Visualization",
"Optimization",
"Task Analysis",
"Microsoft Windows",
"Time Measurement",
"Cameras"
],
"authors": [
{
"affiliation": null,
"fullName": "Haomin Liu",
"givenName": "Haomin",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Mingyu Chen",
"givenName": "Mingyu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guofeng Zhang",
"givenName": "Guofeng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hujun Bao",
"givenName": "Hujun",
"surname": "Bao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yingze Bao",
"givenName": "Yingze",
"surname": "Bao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "1974-1982",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000b965",
"articleId": "17D45VObpQ4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000b983",
"articleId": "17D45VVho4Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icarsc/2016/2255/0/07781977",
"title": "Indoor SLAM for Micro Aerial Vehicles Control Using Monocular Camera and Sensor Fusion",
"doi": null,
"abstractUrl": "/proceedings-article/icarsc/2016/07781977/12OmNCfjesr",
"parentPublication": {
"id": "proceedings/icarsc/2016/2255/0",
"title": "2016 International Conference on Autonomous Robot Systems and Competitions (ICARSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a456",
"title": "Texture-Aware SLAM Using Stereo Imagery and Inertial Information",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a456/12OmNx1IwdI",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2015/9711/0/5720a148",
"title": "Fusion of Inertial and Visual Measurements for RGB-D SLAM on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2015/5720a148/12OmNy7h36Q",
"parentPublication": {
"id": "proceedings/iccvw/2015/9711/0",
"title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2018/7459/0/745900a037",
"title": "Visual-Inertial SLAM Initialization: A General Linear Formulation and a Gravity-Observing Non-Linear Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2018/745900a037/17D45XuDNEt",
"parentPublication": {
"id": "proceedings/ismar/2018/7459/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a210",
"title": "A Comparative Analysis of Visual-Inertial SLAM for Assisted Wayfinding of the Visually Impaired",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a210/18j8P4rWFdm",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a031",
"title": "EGO-SLAM: A Robust Monocular SLAM for Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a031/18j8QSyEfja",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412466",
"title": "AV-SLAM: Autonomous Vehicle SLAM with Gravity Direction Initialization",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412466/1tmj8Cfx70Y",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c559",
"title": "DC-VINS: Dynamic Camera Visual Inertial Navigation System with Online Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c559/1yNi2d28Ej6",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900c066",
"title": "Visual SLAM for Asteroid Relative Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900c066/1yVA0JPsfJK",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a171",
"title": "COVINS: Visual-Inertial SLAM for Centralized Collaboration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a171/1yeQzCgd2GQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1BmEezmpGrm",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1BmEFEemuFG",
"doi": "10.1109/ICCV48922.2021.00619",
"title": "Learning to Bundle-adjust: A Graph Network Approach to Faster Optimization of Bundle Adjustment for Vehicular SLAM",
"normalizedTitle": "Learning to Bundle-adjust: A Graph Network Approach to Faster Optimization of Bundle Adjustment for Vehicular SLAM",
"abstract": "Bundle adjustment (BA) occupies a large portion of the execution time of SfM and visual SLAM. Local BA over the latest several keyframes plays a crucial role in visual SLAM. Its execution time should be sufficiently short for robust tracking; this is especially critical for embedded systems with a limited computational resource. This study proposes a learning-based bundle adjuster using a graph network. It works faster and can be used instead of conventional optimization-based BA. The graph network operates on a graph consisting of the nodes of keyframes and landmarks and the edges representing the landmarks’ visibility. The graph network receives the parameters’ initial values as inputs and predicts their updates to the optimal values. It internally uses an intermediate representation of inputs which we design inspired by the normal equation of the Levenberg-Marquardt method. It is trained using the sum of reprojection errors as a loss function. The experiments show that the proposed method outputs parameter estimates with slightly inferior accuracy in 1/60–1/10 of time compared with the conventional BA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Bundle adjustment (BA) occupies a large portion of the execution time of SfM and visual SLAM. Local BA over the latest several keyframes plays a crucial role in visual SLAM. Its execution time should be sufficiently short for robust tracking; this is especially critical for embedded systems with a limited computational resource. This study proposes a learning-based bundle adjuster using a graph network. It works faster and can be used instead of conventional optimization-based BA. The graph network operates on a graph consisting of the nodes of keyframes and landmarks and the edges representing the landmarks’ visibility. The graph network receives the parameters’ initial values as inputs and predicts their updates to the optimal values. It internally uses an intermediate representation of inputs which we design inspired by the normal equation of the Levenberg-Marquardt method. It is trained using the sum of reprojection errors as a loss function. The experiments show that the proposed method outputs parameter estimates with slightly inferior accuracy in 1/60–1/10 of time compared with the conventional BA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Bundle adjustment (BA) occupies a large portion of the execution time of SfM and visual SLAM. Local BA over the latest several keyframes plays a crucial role in visual SLAM. Its execution time should be sufficiently short for robust tracking; this is especially critical for embedded systems with a limited computational resource. This study proposes a learning-based bundle adjuster using a graph network. It works faster and can be used instead of conventional optimization-based BA. The graph network operates on a graph consisting of the nodes of keyframes and landmarks and the edges representing the landmarks’ visibility. The graph network receives the parameters’ initial values as inputs and predicts their updates to the optimal values. It internally uses an intermediate representation of inputs which we design inspired by the normal equation of the Levenberg-Marquardt method. It is trained using the sum of reprojection errors as a loss function. The experiments show that the proposed method outputs parameter estimates with slightly inferior accuracy in 1/60–1/10 of time compared with the conventional BA.",
"fno": "281200g230",
"keywords": [
"Bundle Adjustment",
"Training",
"Visualization",
"Simultaneous Localization And Mapping",
"Neural Networks",
"Estimation",
"Mathematical Models",
"Stereo",
"3 D From Multiview And Other Sensors",
"Machine Learning Architectures And Formulations"
],
"authors": [
{
"affiliation": "Socionext Inc",
"fullName": "Tetsuya Tanaka",
"givenName": "Tetsuya",
"surname": "Tanaka",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Socionext Inc",
"fullName": "Yukihiro Sasagawa",
"givenName": "Yukihiro",
"surname": "Sasagawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tohoku University / RIKEN Center for AIP,Graduate School of Information Sciences",
"fullName": "Takayuki Okatani",
"givenName": "Takayuki",
"surname": "Okatani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "6230-6239",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-2812-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "281200g219",
"articleId": "1BmEybxUSnC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "281200g240",
"articleId": "1BmKNdrgGze",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a175",
"title": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a175/12OmNAqCtOp",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icris/2018/6580/0/658001a048",
"title": "The Comparison between FTF-VO and MF-VO for High Accuracy Mobile Robot Localization",
"doi": null,
"abstractUrl": "/proceedings-article/icris/2018/658001a048/12OmNz2kqeN",
"parentPublication": {
"id": "proceedings/icris/2018/6580/0",
"title": "2018 International Conference on Robots & Intelligent System (ICRIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500a031",
"title": "EGO-SLAM: A Robust Monocular SLAM for Egocentric Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500a031/18j8QSyEfja",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873988",
"title": "CoLi-BA: Compact Linearization based Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873988/1GjwJTUXo6A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300a134",
"title": "BAD SLAM: Bundle Adjusted Direct RGB-D SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300a134/1gyr8GIX9E4",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c413",
"title": "Bundle Adjustment on a Graph Processor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c413/1m3osvztWI8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a136",
"title": "An Efficient Planar Bundle Adjustment Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a136/1pysxyD7bDG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a140",
"title": "Distributed Photometric Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a140/1qyxmS4qxIA",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1718",
"title": "Square Root Bundle Adjustment for Large-Scale Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1718/1yeI4knbTmU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysxyD7bDG",
"doi": "10.1109/ISMAR50242.2020.00035",
"title": "An Efficient Planar Bundle Adjustment Algorithm",
"normalizedTitle": "An Efficient Planar Bundle Adjustment Algorithm",
"abstract": "This paper presents an efficient algorithm for the least-squares problem using the point-to-plane cost, which aims to jointly optimize depth sensor poses and plane parameters for 3D reconstruction. We call this least-squares problem Planar Bundle Adjustment (PBA), due to the similarity between this problem and the original Bundle Adjustment (BA) in visual reconstruction. As planes ubiquitously exist in the man-made environment, they are generally used as landmarks in SLAM algorithms for various depth sensors. PBA is important to reduce drift and improve the quality of the map. However, directly adopting the well-established BA framework in visual reconstruction will result in a very inefficient solution for PBA. This is because a 3D point only has one observation at a camera pose. In contrast, a depth sensor can record hundreds of points in a plane at a time, which results in a very large nonlinear least-squares problem even for a small-scale space. The main contribution of this paper is an efficient solution for the PBA problem using the point-to-plane cost. We introduce a reduced Jacobian matrix and a reduced residual vector, and prove that they can replace the original Jacobian matrix and residual vector in the generally adopted Levenberg-Marquardt (LM) algorithm. This significantly reduces the computational cost. Besides, when planes are combined with other features for 3D reconstruction, the reduced Jacobian matrix and residual vector can also replace the corresponding parts derived from planes. Our experimental results show that our algorithm can significantly reduce the computational time compared to the solution using the traditional BA framework. In addition, our algorithm is faster, more accurate, and more robust to initialization errors compared to the start-of-the-art solution using the plane-to-plane cost [3].",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents an efficient algorithm for the least-squares problem using the point-to-plane cost, which aims to jointly optimize depth sensor poses and plane parameters for 3D reconstruction. We call this least-squares problem Planar Bundle Adjustment (PBA), due to the similarity between this problem and the original Bundle Adjustment (BA) in visual reconstruction. As planes ubiquitously exist in the man-made environment, they are generally used as landmarks in SLAM algorithms for various depth sensors. PBA is important to reduce drift and improve the quality of the map. However, directly adopting the well-established BA framework in visual reconstruction will result in a very inefficient solution for PBA. This is because a 3D point only has one observation at a camera pose. In contrast, a depth sensor can record hundreds of points in a plane at a time, which results in a very large nonlinear least-squares problem even for a small-scale space. The main contribution of this paper is an efficient solution for the PBA problem using the point-to-plane cost. We introduce a reduced Jacobian matrix and a reduced residual vector, and prove that they can replace the original Jacobian matrix and residual vector in the generally adopted Levenberg-Marquardt (LM) algorithm. This significantly reduces the computational cost. Besides, when planes are combined with other features for 3D reconstruction, the reduced Jacobian matrix and residual vector can also replace the corresponding parts derived from planes. Our experimental results show that our algorithm can significantly reduce the computational time compared to the solution using the traditional BA framework. In addition, our algorithm is faster, more accurate, and more robust to initialization errors compared to the start-of-the-art solution using the plane-to-plane cost [3].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents an efficient algorithm for the least-squares problem using the point-to-plane cost, which aims to jointly optimize depth sensor poses and plane parameters for 3D reconstruction. We call this least-squares problem Planar Bundle Adjustment (PBA), due to the similarity between this problem and the original Bundle Adjustment (BA) in visual reconstruction. As planes ubiquitously exist in the man-made environment, they are generally used as landmarks in SLAM algorithms for various depth sensors. PBA is important to reduce drift and improve the quality of the map. However, directly adopting the well-established BA framework in visual reconstruction will result in a very inefficient solution for PBA. This is because a 3D point only has one observation at a camera pose. In contrast, a depth sensor can record hundreds of points in a plane at a time, which results in a very large nonlinear least-squares problem even for a small-scale space. The main contribution of this paper is an efficient solution for the PBA problem using the point-to-plane cost. We introduce a reduced Jacobian matrix and a reduced residual vector, and prove that they can replace the original Jacobian matrix and residual vector in the generally adopted Levenberg-Marquardt (LM) algorithm. This significantly reduces the computational cost. Besides, when planes are combined with other features for 3D reconstruction, the reduced Jacobian matrix and residual vector can also replace the corresponding parts derived from planes. Our experimental results show that our algorithm can significantly reduce the computational time compared to the solution using the traditional BA framework. In addition, our algorithm is faster, more accurate, and more robust to initialization errors compared to the start-of-the-art solution using the plane-to-plane cost [3].",
"fno": "850800a136",
"keywords": [
"Backpropagation",
"Cameras",
"Image Reconstruction",
"Jacobian Matrices",
"Least Squares Approximations",
"Mobile Robots",
"Neural Nets",
"Optimisation",
"Pose Estimation",
"SLAM Robots",
"Reduced Jacobian Matrix",
"Plane To Plane Cost",
"Efficient Planar Bundle Adjustment Algorithm",
"Point To Plane Cost",
"Depth Sensor Poses",
"Plane Parameters",
"Visual Reconstruction",
"SLAM Algorithms",
"Inefficient Solution",
"PBA Problem",
"Reduced Residual Vector",
"Generally Adopted Levenberg Marquardt Algorithm",
"Computational Cost",
"Original Bundle Adjustment",
"Least Squares Problem Planar Bundle Adjustment",
"Jacobian Matrices",
"Bundle Adjustment",
"Visualization",
"Simultaneous Localization And Mapping",
"Runtime",
"Computational Efficiency",
"Optimization",
"Bundle Adjustment",
"Nonlinear Optimization",
"SLAM",
"Depth Sensor"
],
"authors": [
{
"affiliation": "Magic Leap",
"fullName": "Lipu Zhou",
"givenName": "Lipu",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magic Leap",
"fullName": "Daniel Koppel",
"givenName": "Daniel",
"surname": "Koppel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magic Leap",
"fullName": "Hul Ju",
"givenName": "Hul",
"surname": "Ju",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Magic Leap",
"fullName": "Frank Steinbruecker",
"givenName": "Frank",
"surname": "Steinbruecker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carnegie Mellon University",
"fullName": "Michael Kaess",
"givenName": "Michael",
"surname": "Kaess",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "136-145",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a127",
"articleId": "1pysv4MOhNK",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a146",
"articleId": "1pysy3NAjaU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a175",
"title": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a175/12OmNAqCtOp",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a616",
"title": "Multi-planar Monocular Reconstruction of Manhattan Indoor Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a616/17D45XvMcbo",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g230",
"title": "Learning to Bundle-adjust: A Graph Network Approach to Faster Optimization of Bundle Adjustment for Vehicular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g230/1BmEFEemuFG",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873988",
"title": "CoLi-BA: Compact Linearization based Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873988/1GjwJTUXo6A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fccm/2019/1131/0/113100a100",
"title": "π-BA: Bundle Adjustment Acceleration on Embedded FPGAs with Co-observation Optimization",
"doi": null,
"abstractUrl": "/proceedings-article/fccm/2019/113100a100/1aPv1CATXTW",
"parentPublication": {
"id": "proceedings/fccm/2019/1131/0",
"title": "2019 IEEE 27th Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2020/07/09055175",
"title": "<inline-formula><tex-math notation=\"LaTeX\">Z_$\\pi$_Z</tex-math></inline-formula>-BA: Bundle Adjustment Hardware Accelerator Based on Distribution of 3D-Point Observations",
"doi": null,
"abstractUrl": "/journal/tc/2020/07/09055175/1iHrnARikhi",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093265",
"title": "Two-Grid Preconditioned Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093265/1jPbpxzpGdG",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c413",
"title": "Bundle Adjustment on a Graph Processor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c413/1m3osvztWI8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a140",
"title": "Distributed Photometric Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a140/1qyxmS4qxIA",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qyxi3OgORy",
"title": "2020 International Conference on 3D Vision (3DV)",
"acronym": "3dv",
"groupId": "1800494",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qyxmS4qxIA",
"doi": "10.1109/3DV50981.2020.00024",
"title": "Distributed Photometric Bundle Adjustment",
"normalizedTitle": "Distributed Photometric Bundle Adjustment",
"abstract": "In this paper we demonstrate that global photometric bundle adjustment (PBA) over all past keyframes can significantly improve the global accuracy of a monocular SLAM map compared to geometric techniques such as pose-graph optimization or traditional (geometric) bundle adjustment. However, PBA is computationally expensive in runtime, and memory usage can be prohibitively high. In order to address this scalability issue, we formulate PBA as an approximate consensus program. Due to its decomposable structure, the problem can be solved with block coordinate descent in parallel across multiple independent workers, each having lower requirements on memory and computational resources. For improved accuracy and convergence, we propose a novel gauge aware consensus update. Our experiments on real-world data show an average error reduction of 62% compared to odometry and 33% compared to intermediate pose-graph optimization, and that compared to the central optimization on a single machine, our distributed PBA achieves competitive pose-accuracy and cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we demonstrate that global photometric bundle adjustment (PBA) over all past keyframes can significantly improve the global accuracy of a monocular SLAM map compared to geometric techniques such as pose-graph optimization or traditional (geometric) bundle adjustment. However, PBA is computationally expensive in runtime, and memory usage can be prohibitively high. In order to address this scalability issue, we formulate PBA as an approximate consensus program. Due to its decomposable structure, the problem can be solved with block coordinate descent in parallel across multiple independent workers, each having lower requirements on memory and computational resources. For improved accuracy and convergence, we propose a novel gauge aware consensus update. Our experiments on real-world data show an average error reduction of 62% compared to odometry and 33% compared to intermediate pose-graph optimization, and that compared to the central optimization on a single machine, our distributed PBA achieves competitive pose-accuracy and cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we demonstrate that global photometric bundle adjustment (PBA) over all past keyframes can significantly improve the global accuracy of a monocular SLAM map compared to geometric techniques such as pose-graph optimization or traditional (geometric) bundle adjustment. However, PBA is computationally expensive in runtime, and memory usage can be prohibitively high. In order to address this scalability issue, we formulate PBA as an approximate consensus program. Due to its decomposable structure, the problem can be solved with block coordinate descent in parallel across multiple independent workers, each having lower requirements on memory and computational resources. For improved accuracy and convergence, we propose a novel gauge aware consensus update. Our experiments on real-world data show an average error reduction of 62% compared to odometry and 33% compared to intermediate pose-graph optimization, and that compared to the central optimization on a single machine, our distributed PBA achieves competitive pose-accuracy and cost.",
"fno": "812800a140",
"keywords": [
"Gradient Methods",
"Optimisation",
"Photometry",
"SLAM Robots",
"Solid Modelling",
"Distributed PBA",
"Global Photometric Bundle Adjustment",
"Global Accuracy",
"Monocular SLAM Map",
"Memory Usage",
"Scalability Issue",
"Approximate Consensus Program",
"Decomposable Structure",
"Computational Resources",
"Convergence",
"Gauge Aware Consensus Update",
"Distributed Photometric Bundle Adjustment",
"Block Coordinate Descent",
"Optimization",
"Simultaneous Localization And Mapping",
"Cameras",
"Bundle Adjustment",
"Brightness",
"Real Time Systems",
"Memory Management",
"Photometric Bundle Adjustment",
"SLAM",
"Structure From Motion",
"Direct Method",
"Distributed Optimization",
"Mapping",
"Splitting Method",
"Penalty Method",
"Loop Closure",
"Odometry",
"Consensus Optimization"
],
"authors": [
{
"affiliation": "Technical University of Munich",
"fullName": "Nikolaus Demmel",
"givenName": "Nikolaus",
"surname": "Demmel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Maolin Gao",
"givenName": "Maolin",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Emanuel Laude",
"givenName": "Emanuel",
"surname": "Laude",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Tao Wu",
"givenName": "Tao",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Daniel Cremers",
"givenName": "Daniel",
"surname": "Cremers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "140-149",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-8128-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "812800a130",
"articleId": "1qyxkH3kcJW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "812800a150",
"articleId": "1qyxk8duo12",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a175",
"title": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a175/12OmNAqCtOp",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2012/1226/0/181P2A31",
"title": "Rolling shutter bundle adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2012/181P2A31/12OmNAsk4zp",
"parentPublication": {
"id": "proceedings/cvpr/2012/1226/0",
"title": "2012 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dimpvt/2012/4873/0/4873a348",
"title": "Towards Bundle Adjustment with GIS Constraints for Online Geo-Localization of a Vehicle in Urban Center",
"doi": null,
"abstractUrl": "/proceedings-article/3dimpvt/2012/4873a348/12OmNyQGS7j",
"parentPublication": {
"id": "proceedings/3dimpvt/2012/4873/0",
"title": "2012 Second International Conference on 3D Imaging, Modeling, Processing, Visualization & Transmission",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109a290",
"title": "Fast Odometry Integration in Local Bundle Adjustment-Based Visual SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109a290/12OmNzcPAf8",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/02/08365816",
"title": "Distributed Very Large Scale Bundle Adjustment by Global Camera Consensus",
"doi": null,
"abstractUrl": "/journal/tp/2020/02/08365816/13rRUwInvCe",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000b876",
"title": "pOSE: Pseudo Object Space Error for Initialization-Free Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000b876/17D45VVho3g",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g230",
"title": "Learning to Bundle-adjust: A Graph Network Approach to Faster Optimization of Bundle Adjustment for Vehicular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g230/1BmEFEemuFG",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c413",
"title": "Bundle Adjustment on a Graph Processor",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c413/1m3osvztWI8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a136",
"title": "An Efficient Planar Bundle Adjustment Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a136/1pysxyD7bDG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900l1718",
"title": "Square Root Bundle Adjustment for Large-Scale Reconstruction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900l1718/1yeI4knbTmU",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeI4knbTmU",
"doi": "10.1109/CVPR46437.2021.01155",
"title": "Square Root Bundle Adjustment for Large-Scale Reconstruction",
"normalizedTitle": "Square Root Bundle Adjustment for Large-Scale Reconstruction",
"abstract": "We propose a new formulation for the bundle adjustment problem which relies on nullspace marginalization of landmark variables by QR decomposition. Our approach, which we call square root bundle adjustment, is algebraically equivalent to the commonly used Schur complement trick, improves the numeric stability of computations, and allows for solving large-scale bundle adjustment problems with single-precision floating-point numbers. We show in real-world experiments with the BAL datasets that even in single precision the proposed solver achieves on average equally accurate solutions compared to Schur complement solvers using double precision. It runs significantly faster, but can require larger amounts of memory on dense problems. The proposed formulation relies on simple linear algebra operations and opens the way for efficient implementations of bundle adjustment on hardware platforms optimized for single-precision linear algebra processing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new formulation for the bundle adjustment problem which relies on nullspace marginalization of landmark variables by QR decomposition. Our approach, which we call square root bundle adjustment, is algebraically equivalent to the commonly used Schur complement trick, improves the numeric stability of computations, and allows for solving large-scale bundle adjustment problems with single-precision floating-point numbers. We show in real-world experiments with the BAL datasets that even in single precision the proposed solver achieves on average equally accurate solutions compared to Schur complement solvers using double precision. It runs significantly faster, but can require larger amounts of memory on dense problems. The proposed formulation relies on simple linear algebra operations and opens the way for efficient implementations of bundle adjustment on hardware platforms optimized for single-precision linear algebra processing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new formulation for the bundle adjustment problem which relies on nullspace marginalization of landmark variables by QR decomposition. Our approach, which we call square root bundle adjustment, is algebraically equivalent to the commonly used Schur complement trick, improves the numeric stability of computations, and allows for solving large-scale bundle adjustment problems with single-precision floating-point numbers. We show in real-world experiments with the BAL datasets that even in single precision the proposed solver achieves on average equally accurate solutions compared to Schur complement solvers using double precision. It runs significantly faster, but can require larger amounts of memory on dense problems. The proposed formulation relies on simple linear algebra operations and opens the way for efficient implementations of bundle adjustment on hardware platforms optimized for single-precision linear algebra processing.",
"fno": "450900l1718",
"keywords": [
"Floating Point Arithmetic",
"Image Processing",
"Linear Algebra",
"Numerical Stability",
"Schur Complement Solvers",
"Single Precision Linear Algebra Processing",
"Square Root Bundle Adjustment",
"Large Scale Reconstruction",
"Bundle Adjustment Problem",
"Schur Complement Trick",
"Large Scale Bundle Adjustment Problems",
"Single Precision Floating Point Numbers",
"QR Decomposition",
"Numeric Stability",
"BAL Datasets",
"Bundle Adjustment",
"Computer Vision",
"Simultaneous Localization And Mapping",
"Memory Management",
"Linear Algebra",
"Libraries",
"Hardware"
],
"authors": [
{
"affiliation": "Technical University of Munich",
"fullName": "Nikolaus Demmel",
"givenName": "Nikolaus",
"surname": "Demmel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Christiane Sommer",
"givenName": "Christiane",
"surname": "Sommer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Daniel Cremers",
"givenName": "Daniel",
"surname": "Cremers",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich",
"fullName": "Vladyslav Usenko",
"givenName": "Vladyslav",
"surname": "Usenko",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "11718-11727",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeI4gvCrAs",
"name": "pcvpr202145090-09577956s1-mm_450900l1718.zip",
"size": "1.47 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577956s1-mm_450900l1718.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900l1708",
"articleId": "1yeHS9oJtPW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900l1728",
"articleId": "1yeHX163Xnq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a175",
"title": "Fast Incremental Bundle Adjustment with Covariance Recovery",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a175/12OmNAqCtOp",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2004/2128/3/212830902",
"title": "3D Model Reconstruction by Constrained Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2004/212830902/12OmNCvLXXS",
"parentPublication": {
"id": "proceedings/icpr/2004/2128/3",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g230",
"title": "Learning to Bundle-adjust: A Graph Network Approach to Faster Optimization of Bundle Adjustment for Vehicular SLAM",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g230/1BmEFEemuFG",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3240",
"title": "Square Root Marginalization for Sliding-Window Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3240/1BmGJ6kP0EE",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873988",
"title": "CoLi-BA: Compact Linearization based Solver for Bundle Adjustment",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873988/1GjwJTUXo6A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2019/0801/0/08940181",
"title": "Parallel Bundle Adjustment of High-Resolution Satellite Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2019/08940181/1gjRQt11ako",
"parentPublication": {
"id": "proceedings/icis/2019/0801/0",
"title": "2019 IEEE/ACIS 18th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/02/09151354",
"title": "Spatiotemporal Bundle Adjustment for Dynamic 3D Human Reconstruction in the Wild",
"doi": null,
"abstractUrl": "/journal/tp/2022/02/09151354/1lPCkW5UbPG",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a136",
"title": "An Efficient Planar Bundle Adjustment Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a136/1pysxyD7bDG",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a663",
"title": "Visualizing Spectral Bundle Adjustment Uncertainty",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a663/1qyxkDucGpG",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800a140",
"title": "Distributed Photometric Bundle Adjustment",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800a140/1qyxmS4qxIA",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwdbUZa",
"title": "2010 13th IEEE International Conference on Computational Science and Engineering",
"acronym": "cse",
"groupId": "1002115",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNA0dMRS",
"doi": "10.1109/CSE.2010.66",
"title": "Augmented Reality System Design and Scenario Study for Location-Based Adaptive Mobile Learning",
"normalizedTitle": "Augmented Reality System Design and Scenario Study for Location-Based Adaptive Mobile Learning",
"abstract": "Augmented Reality allows the user to see the virtual objects superimposed upon or composited with the real world. This paper presented the system design of the Multi-Object Oriented Augmented Reality (MOOAR) system for location-based adaptive mobile learning environment and the scenario study. Moreover, the detailed rationales behind the MOOAR system are also discussed in this paper. The implementation of the MOOAR system is described with the designed scenario. Furthermore, the expected results of the scenario study are shown in this paper to demonstrate the advantages of using Augmented Reality in location-based adaptive mobile learning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality allows the user to see the virtual objects superimposed upon or composited with the real world. This paper presented the system design of the Multi-Object Oriented Augmented Reality (MOOAR) system for location-based adaptive mobile learning environment and the scenario study. Moreover, the detailed rationales behind the MOOAR system are also discussed in this paper. The implementation of the MOOAR system is described with the designed scenario. Furthermore, the expected results of the scenario study are shown in this paper to demonstrate the advantages of using Augmented Reality in location-based adaptive mobile learning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality allows the user to see the virtual objects superimposed upon or composited with the real world. This paper presented the system design of the Multi-Object Oriented Augmented Reality (MOOAR) system for location-based adaptive mobile learning environment and the scenario study. Moreover, the detailed rationales behind the MOOAR system are also discussed in this paper. The implementation of the MOOAR system is described with the designed scenario. Furthermore, the expected results of the scenario study are shown in this paper to demonstrate the advantages of using Augmented Reality in location-based adaptive mobile learning.",
"fno": "4323a020",
"keywords": [
"Augmented Reality Mobile Augmented Reality System Location Based Adaptive Mobile Learning"
],
"authors": [
{
"affiliation": null,
"fullName": "William Chang",
"givenName": "William",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qing Tan",
"givenName": "Qing",
"surname": "Tan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-12-01T00:00:00",
"pubType": "proceedings",
"pages": "20-27",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4323-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4323a014",
"articleId": "12OmNqI04CH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4323a028",
"articleId": "12OmNxWcH0L",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsc/2016/0662/0/0662a358",
"title": "Mobile Augmented Reality Authoring Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2014/4103/0/4103a211",
"title": "Development of Adaptive Information Visualization Systems with Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2014/4103a211/12OmNBzRNpJ",
"parentPublication": {
"id": "proceedings/iv/2014/4103/0",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/synasc/2010/4324/0/4324a502",
"title": "Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/synasc/2010/4324a502/12OmNyOq55c",
"parentPublication": {
"id": "proceedings/synasc/2010/4324/0",
"title": "2010 12th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2010/4055/0/4055a450",
"title": "Multi-Object Oriented Augmented Reality for Location-Based Adaptive Mobile Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2010/4055a450/12OmNyUWR9U",
"parentPublication": {
"id": "proceedings/icalt/2010/4055/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948444",
"title": "[Poster] Contextually panned and zoomed augmented reality interactions using COTS heads up displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948444/12OmNzcPAaG",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/06/07435333",
"title": "Towards Pervasive Augmented Reality: Context-Awareness in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/06/07435333/13rRUwfZBVq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/pc/2010/01/mpc2010010005",
"title": "Gaming and Augmented Reality Come to Location-Based Services",
"doi": null,
"abstractUrl": "/magazine/pc/2010/01/mpc2010010005/13rRUy08MBx",
"parentPublication": {
"id": "mags/pc",
"title": "IEEE Pervasive Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699316",
"title": "Inverse Augmented Reality: A Virtual Agent's Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699316/19F1UA1hw40",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797973",
"title": "A Comparison of Desktop and Augmented Reality Scenario Based Training Authoring Tools",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797973/1cJ0S2MS49O",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNy3iFul",
"title": "2014 18th International Conference on Information Visualisation (IV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBzRNpJ",
"doi": "10.1109/IV.2014.52",
"title": "Development of Adaptive Information Visualization Systems with Augmented Reality",
"normalizedTitle": "Development of Adaptive Information Visualization Systems with Augmented Reality",
"abstract": "Augmented Reality combined with adaptive hypermedia plays an important role on providing effective information visualization systems. In this paper, we propose a comprehensive architecture model in order to provide adaptive information visualization systems with augmented reality. We also provide a novel visual metaphor for real-valued, low-dimensional data with optimal values for each feature inspired on the pseudo-flower metaphor.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality combined with adaptive hypermedia plays an important role on providing effective information visualization systems. In this paper, we propose a comprehensive architecture model in order to provide adaptive information visualization systems with augmented reality. We also provide a novel visual metaphor for real-valued, low-dimensional data with optimal values for each feature inspired on the pseudo-flower metaphor.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality combined with adaptive hypermedia plays an important role on providing effective information visualization systems. In this paper, we propose a comprehensive architecture model in order to provide adaptive information visualization systems with augmented reality. We also provide a novel visual metaphor for real-valued, low-dimensional data with optimal values for each feature inspired on the pseudo-flower metaphor.",
"fno": "4103a211",
"keywords": [
"Adaptation Models",
"Data Visualization",
"Navigation",
"Augmented Reality",
"Visualization",
"Adaptive Systems",
"Image Color Analysis",
"System Architecture",
"Augmented Reality",
"Adaptive Systems",
"Information Visualization"
],
"authors": [
{
"affiliation": null,
"fullName": "Ezequiel R. Zorzal",
"givenName": "Ezequiel R.",
"surname": "Zorzal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Celso A.R. de Sousa",
"givenName": "Celso A.R. de",
"surname": "Sousa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alexandre Cardoso",
"givenName": "Alexandre",
"surname": "Cardoso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Claudio Kirner",
"givenName": "Claudio",
"surname": "Kirner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Edgard A. Lamouner",
"givenName": "Edgard A.",
"surname": "Lamouner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Marcos G. Quiles",
"givenName": "Marcos G.",
"surname": "Quiles",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "211-216",
"year": "2014",
"issn": "1550-6037",
"isbn": "978-1-4799-4103-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4103a202",
"articleId": "12OmNwxlrhT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4103a217",
"articleId": "12OmNAXPxYG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2012/4771/0/4771a445",
"title": "Augmented Reality Technology and Art: The Analysis and Visualization of Evolving Conceptual Models",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2012/4771a445/12OmNAObbCb",
"parentPublication": {
"id": "proceedings/iv/2012/4771/0",
"title": "2012 16th International Conference on Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402589",
"title": "Supervised classification for customized intraoperative augmented reality visualization",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402589/12OmNAS9zPH",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2007/2900/0/29000156",
"title": "Coordinated and Multiple Views in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2007/29000156/12OmNs4S8Jo",
"parentPublication": {
"id": "proceedings/iv/2007/2900/0",
"title": "2007 11th International Conference Information Visualization (IV '07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbi/2014/5779/2/5779b033",
"title": "Towards Big Data Visualization for Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cbi/2014/5779b033/12OmNwwd2TL",
"parentPublication": {
"id": "proceedings/cbi/2014/5779/2",
"title": "2014 IEEE 16th Conference on Business Informatics (CBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dvis/2014/6826/0/07160099",
"title": "Spatial augmented reality — A tool for 3D data visualization",
"doi": null,
"abstractUrl": "/proceedings-article/3dvis/2014/07160099/12OmNxxvAN7",
"parentPublication": {
"id": "proceedings/3dvis/2014/6826/0",
"title": "2014 IEEE VIS International Workshop on 3DVis (3DVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492802",
"title": "A study of depth visualization techniques for virtual annotations in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492802/12OmNyo1nO1",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a907",
"title": "ViCollAR: A Novel System for 3D Data Visualization using Collaborative Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a907/1J7Whap3vRS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09060980",
"title": "Visualization Techniques in Augmented Reality: A Taxonomy, Methods and Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09060980/1iRo7RmpTa0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2019/5584/0/558400b269",
"title": "Augmented Reality for Big Data Visualization: A Review",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2019/558400b269/1jdDXnKC1Vu",
"parentPublication": {
"id": "proceedings/csci/2019/5584/0",
"title": "2019 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09223669",
"title": "Personal Augmented Reality for Information Visualization on Large Interactive Displays",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09223669/1nV6cy8Xk5i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvs4vpU",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqBbHAA",
"doi": "10.1109/ICALT.2015.105",
"title": "Augmented Reality Laboratory for High School Electrochemistry Course",
"normalizedTitle": "Augmented Reality Laboratory for High School Electrochemistry Course",
"abstract": "The purpose of this study was to investigate the effects of types of augmented reality and guiding strategy on senior high school students' performance and motivation of electrochemistry concepts. The participants were 152 freshmen of senior high school. A 2X2 quasi-experimental design was employed and the independent variables were type of augmented reality (static-AR vs. Dynamic-AR) and type of guiding strategy (procedure-guided vs. Question-guided). Two types of augmented reality were employed, including the static augmented reality and the dynamic augmented reality, and two types of guiding strategies were cooperated, including procedure-guided strategy and question-guided strategy. The dependent variables were learning performance and motivation. The results revealed that (a) while receiving the static augmented reality learning, the procedural guidance group achieved better learning application performance than the question guidance group, (b) as for the knowledge understanding performance, the static augmented reality group outperformed the dynamic augmented reality group, and the procedural guidance group outperformed the question guidance group, and (c) students showed positive motivation toward learning Chemistry no matter which augmented reality type they used, especially students who used the static augmented reality revealed higher motivation than those who used the dynamic augmented reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The purpose of this study was to investigate the effects of types of augmented reality and guiding strategy on senior high school students' performance and motivation of electrochemistry concepts. The participants were 152 freshmen of senior high school. A 2X2 quasi-experimental design was employed and the independent variables were type of augmented reality (static-AR vs. Dynamic-AR) and type of guiding strategy (procedure-guided vs. Question-guided). Two types of augmented reality were employed, including the static augmented reality and the dynamic augmented reality, and two types of guiding strategies were cooperated, including procedure-guided strategy and question-guided strategy. The dependent variables were learning performance and motivation. The results revealed that (a) while receiving the static augmented reality learning, the procedural guidance group achieved better learning application performance than the question guidance group, (b) as for the knowledge understanding performance, the static augmented reality group outperformed the dynamic augmented reality group, and the procedural guidance group outperformed the question guidance group, and (c) students showed positive motivation toward learning Chemistry no matter which augmented reality type they used, especially students who used the static augmented reality revealed higher motivation than those who used the dynamic augmented reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The purpose of this study was to investigate the effects of types of augmented reality and guiding strategy on senior high school students' performance and motivation of electrochemistry concepts. The participants were 152 freshmen of senior high school. A 2X2 quasi-experimental design was employed and the independent variables were type of augmented reality (static-AR vs. Dynamic-AR) and type of guiding strategy (procedure-guided vs. Question-guided). Two types of augmented reality were employed, including the static augmented reality and the dynamic augmented reality, and two types of guiding strategies were cooperated, including procedure-guided strategy and question-guided strategy. The dependent variables were learning performance and motivation. The results revealed that (a) while receiving the static augmented reality learning, the procedural guidance group achieved better learning application performance than the question guidance group, (b) as for the knowledge understanding performance, the static augmented reality group outperformed the dynamic augmented reality group, and the procedural guidance group outperformed the question guidance group, and (c) students showed positive motivation toward learning Chemistry no matter which augmented reality type they used, especially students who used the static augmented reality revealed higher motivation than those who used the dynamic augmented reality.",
"fno": "7334a132",
"keywords": [
"Augmented Reality",
"Chemicals",
"Ions",
"Games",
"Education",
"Animation",
"Experiential Learning",
"Augmented Reality",
"Gamification",
"Learning Strategies"
],
"authors": [
{
"affiliation": null,
"fullName": "Ming-Puu Chen",
"givenName": "Ming-Puu",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ban-Chieh Liao",
"givenName": "Ban-Chieh",
"surname": "Liao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-07-01T00:00:00",
"pubType": "proceedings",
"pages": "132-136",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7334-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7334a127",
"articleId": "12OmNzQzqjh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7334a137",
"articleId": "12OmNzsrwj0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2012/4702/0/4702a728",
"title": "School of the Future: Using Augmented Reality for Contextual Information and Navigation in Academic Buildings",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a728/12OmNAkWvcI",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a353",
"title": "A Context-Aware Progressive Inquiry-Based Augmented Reality System to Improving Students' Investigation Learning Abilities for High School Geography Courses",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a353/12OmNvKePIH",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2017/3870/0/3870a469",
"title": "An Empirical Study of the Use of an Augmented Reality Simulator in a Face-to-Face Physics Course",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2017/3870a469/12OmNvUaNfj",
"parentPublication": {
"id": "proceedings/icalt/2017/3870/0",
"title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402558",
"title": "3D referencing techniques for physical objects in shared augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402558/12OmNxj239f",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2014/4231/0/06982557",
"title": "Design Research and Practice of Augmented Reality Textbook",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2014/06982557/12OmNxwENQg",
"parentPublication": {
"id": "proceedings/eitt/2014/4231/0",
"title": "2014 International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985b180",
"title": "Teachers' and Students' Perceptions toward Augmented Reality Materials",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985b180/12OmNyKa66B",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcs/1999/0253/1/02539195",
"title": "Haptics in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icmcs/1999/02539195/12OmNyQ7G3s",
"parentPublication": {
"id": "proceedings/icmcs/1999/0253/1",
"title": "Multimedia Computing and Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223429",
"title": "Augmented reality maintenance demonstrator and associated modelling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223429/12OmNylKAXJ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/06/07435333",
"title": "Towards Pervasive Augmented Reality: Context-Awareness in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/06/07435333/13rRUwfZBVq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/laclo/2021/2358/0/235800a397",
"title": "Augmented Reality Application “HardwareAR” to improve the learning of internal components of a computer",
"doi": null,
"abstractUrl": "/proceedings-article/laclo/2021/235800a397/1BzW96qPGs8",
"parentPublication": {
"id": "proceedings/laclo/2021/2358/0",
"title": "2021 XVI Latin American Conference on Learning Technologies (LACLO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvsm6yS",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyyO8PQ",
"doi": "10.1109/ISMARW.2015.23",
"title": "Efficient Texture-less Object Detection for Augmented Reality Guidance",
"normalizedTitle": "Efficient Texture-less Object Detection for Augmented Reality Guidance",
"abstract": "Real-time scalable detection of texture-less objects in 2D images is a highly relevant task for augmented reality applications such as assembly guidance. The paper presents a purely edge-based method based on the approach of Damen et al. (2012) [5]. The proposed method exploits the recent structured edge detector by Dollár and Zitnick (2013) [8], which uses supervised examples for improved object outline detection. It was experimentally shown to yield consistently better results than the standard Canny edge detector. The work has identified two other areas of improvement over the original method; proposing a Hough-based tracing, bringing a speed-up of more than 5 times, and a search for edgelets in stripes instead of wedges, achieving improved performance especially at lower rates of false positives per image. Experimental evaluation proves the proposed method to be faster and more robust. The method is also demonstrated to be suitable to support an augmented reality application for assembly guidance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Real-time scalable detection of texture-less objects in 2D images is a highly relevant task for augmented reality applications such as assembly guidance. The paper presents a purely edge-based method based on the approach of Damen et al. (2012) [5]. The proposed method exploits the recent structured edge detector by Dollár and Zitnick (2013) [8], which uses supervised examples for improved object outline detection. It was experimentally shown to yield consistently better results than the standard Canny edge detector. The work has identified two other areas of improvement over the original method; proposing a Hough-based tracing, bringing a speed-up of more than 5 times, and a search for edgelets in stripes instead of wedges, achieving improved performance especially at lower rates of false positives per image. Experimental evaluation proves the proposed method to be faster and more robust. The method is also demonstrated to be suitable to support an augmented reality application for assembly guidance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Real-time scalable detection of texture-less objects in 2D images is a highly relevant task for augmented reality applications such as assembly guidance. The paper presents a purely edge-based method based on the approach of Damen et al. (2012) [5]. The proposed method exploits the recent structured edge detector by Dollár and Zitnick (2013) [8], which uses supervised examples for improved object outline detection. It was experimentally shown to yield consistently better results than the standard Canny edge detector. The work has identified two other areas of improvement over the original method; proposing a Hough-based tracing, bringing a speed-up of more than 5 times, and a search for edgelets in stripes instead of wedges, achieving improved performance especially at lower rates of false positives per image. Experimental evaluation proves the proposed method to be faster and more robust. The method is also demonstrated to be suitable to support an augmented reality application for assembly guidance.",
"fno": "8471a081",
"keywords": [
"Image Edge Detection",
"Detectors",
"Constellation Diagram",
"Shape",
"Augmented Reality",
"Complexity Theory",
"Object Detection"
],
"authors": [
{
"affiliation": null,
"fullName": "Toma Hodan",
"givenName": "Toma",
"surname": "Hodan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dima Damen",
"givenName": "Dima",
"surname": "Damen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Walterio Mayol-Cuevas",
"givenName": "Walterio",
"surname": "Mayol-Cuevas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiri Matas",
"givenName": "Jiri",
"surname": "Matas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "81-86",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8471-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "8471a075",
"articleId": "12OmNxRF73U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "8471a087",
"articleId": "12OmNwtEECd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100032",
"title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/3941/1/3941a133",
"title": "Key Technique of Assembly System in an Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941a133/12OmNqC2uYI",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/3",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ibica/2011/4606/0/4606a316",
"title": "Design and Implement Augmented Reality for Supporting Driving Visual Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ibica/2011/4606a316/12OmNzahbSj",
"parentPublication": {
"id": "proceedings/ibica/2011/4606/0",
"title": "Innovations in Bio-inspired Computing and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/resacs/2018/8410/0/841000a044",
"title": "Towards Context-Aware Process Guidance in Cyber-Physical Systems with Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/resacs/2018/841000a044/17D45XzbnJN",
"parentPublication": {
"id": "proceedings/resacs/2018/8410/0",
"title": "2018 4th International Workshop on Requirements Engineering for Self-Adaptive, Collaborative, and Cyber Physical Systems (RESACS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873961",
"title": "Arigatō: Effects of Adaptive Guidance on Engagement and Performance in Augmented Reality Learning Environments",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873961/1GjwJIK4xB6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a441",
"title": "Label Guidance based Object Locating in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a441/1JrRbIVIzPG",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2017/2636/0/263600a433",
"title": "Mixed Reality Application: A Framework of Markerless Assembly Guidance System with Hololens Glass",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2017/263600a433/1ap5zkHZ9OU",
"parentPublication": {
"id": "proceedings/icvrv/2017/2636/0",
"title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a004",
"title": "A Scalable and Long-Term Wearable Augmented Reality System for Order Picking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a004/1gysmqM7SJW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199570",
"title": "Comparing Non-Visual and Visual Guidance Methods for Narrow Field of View Augmented Reality Displays",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199570/1ncgoC1SEMw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a189",
"title": "Manual PCB assembly using Augmented Reality towards Total Quality",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a189/1oZBzP5SgGk",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzTH0Hp",
"title": "2015 2nd International Conference on Information Science and Security (ICISS)",
"acronym": "iciss",
"groupId": "1001575",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzIUg42",
"doi": "10.1109/ICISSEC.2015.7371036",
"title": "The Effects of Learning Style on Mobile Augmented-Reality-Facilitated English Vocabulary Learning",
"normalizedTitle": "The Effects of Learning Style on Mobile Augmented-Reality-Facilitated English Vocabulary Learning",
"abstract": "The affordance of mobile-based learning, including supporting a more personalized, authentic, situated learning are obvious. Research also revealed that mobile learning had positive effects on Second Language (L2) learning. However, individual differences on learning styles and prior knowledge could significantly affect the learning outcomes. This study aimed to investigate the effects of learning styles (field independence/dependence, FI/FD) and prior English proficiency (high/low) in a mobile augmented reality (AR) facilitated English vocabulary learning. Target subjects were elementary school children learning English as L2 and learning objective was set to memorize/understanding a set of ten English vocabulary. An experiment was done with self-developed AR-facilitated instruction. The results indicated that that FD learners benefitted significantly better from the mobile AR instruction on learning outcome; there was a marginal significant difference between high and low English proficiency learners on learning outcome; and neither learning styles nor prior English proficiency affected learning motivation. These findings indicated that individual differences should be considered while mobile AR L2 vocabulary instruction was applied.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The affordance of mobile-based learning, including supporting a more personalized, authentic, situated learning are obvious. Research also revealed that mobile learning had positive effects on Second Language (L2) learning. However, individual differences on learning styles and prior knowledge could significantly affect the learning outcomes. This study aimed to investigate the effects of learning styles (field independence/dependence, FI/FD) and prior English proficiency (high/low) in a mobile augmented reality (AR) facilitated English vocabulary learning. Target subjects were elementary school children learning English as L2 and learning objective was set to memorize/understanding a set of ten English vocabulary. An experiment was done with self-developed AR-facilitated instruction. The results indicated that that FD learners benefitted significantly better from the mobile AR instruction on learning outcome; there was a marginal significant difference between high and low English proficiency learners on learning outcome; and neither learning styles nor prior English proficiency affected learning motivation. These findings indicated that individual differences should be considered while mobile AR L2 vocabulary instruction was applied.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The affordance of mobile-based learning, including supporting a more personalized, authentic, situated learning are obvious. Research also revealed that mobile learning had positive effects on Second Language (L2) learning. However, individual differences on learning styles and prior knowledge could significantly affect the learning outcomes. This study aimed to investigate the effects of learning styles (field independence/dependence, FI/FD) and prior English proficiency (high/low) in a mobile augmented reality (AR) facilitated English vocabulary learning. Target subjects were elementary school children learning English as L2 and learning objective was set to memorize/understanding a set of ten English vocabulary. An experiment was done with self-developed AR-facilitated instruction. The results indicated that that FD learners benefitted significantly better from the mobile AR instruction on learning outcome; there was a marginal significant difference between high and low English proficiency learners on learning outcome; and neither learning styles nor prior English proficiency affected learning motivation. These findings indicated that individual differences should be considered while mobile AR L2 vocabulary instruction was applied.",
"fno": "07371036",
"keywords": [
"Vocabulary",
"Mobile Communication",
"Mobile Handsets",
"Augmented Reality",
"Learning Systems",
"Cities And Towns",
"Context"
],
"authors": [
{
"affiliation": null,
"fullName": "Cheng-Ping Chen",
"givenName": "Cheng-Ping",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chang-Hwa Wang",
"givenName": "Chang-Hwa",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iciss",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-8611-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07371035",
"articleId": "12OmNxecRU1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07371037",
"articleId": "12OmNBNM99w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iset/2017/3031/0/08005387",
"title": "Using Augmented Reality to Teach Kindergarten Students English Vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/iset/2017/08005387/12OmNCdBDT9",
"parentPublication": {
"id": "proceedings/iset/2017/3031/0",
"title": "2017 International Symposium on Educational Technology (ISET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2015/9957/0/07373933",
"title": "Investigating the Effects of Mobile Learning with Cross-Age Peer Tutoring in English Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2015/07373933/12OmNqJHFEy",
"parentPublication": {
"id": "proceedings/iiai-aai/2015/9957/0",
"title": "2015 IIAI 4th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmute/2012/4662/0/4662a232",
"title": "Using Personalized VLS on Mobile English Vocabulary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wmute/2012/4662a232/12OmNwnYG4m",
"parentPublication": {
"id": "proceedings/wmute/2012/4662/0",
"title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2014/4038/0/4038a431",
"title": "Mobile-Based AR Application Helps to Promote EFL Children's Vocabulary Study",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2014/4038a431/12OmNxX3uPg",
"parentPublication": {
"id": "proceedings/icalt/2014/4038/0",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08457524",
"title": "ARbis Pictus: A Study of Vocabulary Learning with Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08457524/14M3E0wp25a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b092",
"title": "Application of Mobile Learning Software in College English Vocabulary Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b092/1C8FSCi7omA",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a256",
"title": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a256/1FUUfSsF2es",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09872027",
"title": "VocabulARy: Learning Vocabulary in AR Supported by Keyword Visualisations",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09872027/1GhRUPatDmU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2020/6090/0/09155848",
"title": "Impact of Gender on Motivation, Engagement and Interaction Behavior in Mobile assisted learning of English",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2020/09155848/1m1j37LJymY",
"parentPublication": {
"id": "proceedings/icalt/2020/6090/0",
"title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2020/6090/0/09155873",
"title": "Application of Educational Robots in the Elderly English Vocabulary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2020/09155873/1m1j43t88G4",
"parentPublication": {
"id": "proceedings/icalt/2020/6090/0",
"title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyxXlsu",
"title": "Innovations in Bio-inspired Computing and Applications, International Conference on",
"acronym": "ibica",
"groupId": "1800621",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzahbSj",
"doi": "10.1109/IBICA.2011.84",
"title": "Design and Implement Augmented Reality for Supporting Driving Visual Guidance",
"normalizedTitle": "Design and Implement Augmented Reality for Supporting Driving Visual Guidance",
"abstract": "It is an important issue that let drivers obtain driving information easily. There are many advanced electronic devices used for driving safety assistance. During driving a car, the driver receives the information passed by these systems. But with more functionality, more and more information will be generated and make the driving environment very complicated that makes it very difficult for the drivers to digest and response to all of the information. This highlights the importance of integrating all of the driving information. Augmented Reality (AR) with Head-Up Displays (HUDs) has recently attracted the attention in the field of automotive research. In this work, we design and implement AR for supporting driving visual guidance. We integrate driving information from Controller Area Network (CAN), Global Positioning System (GPS), navigation system, and other related information and use HUD technology to project it to the windscreen. This technique improves driver's situation awareness by dynamically combining more information imaging, called Point of Interest (POI), with global maps. The driver can easily see his/her driving guidance without having to turn his/her head off the driving direction using our augmented reality guidance, drivers can drive more easily.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It is an important issue that let drivers obtain driving information easily. There are many advanced electronic devices used for driving safety assistance. During driving a car, the driver receives the information passed by these systems. But with more functionality, more and more information will be generated and make the driving environment very complicated that makes it very difficult for the drivers to digest and response to all of the information. This highlights the importance of integrating all of the driving information. Augmented Reality (AR) with Head-Up Displays (HUDs) has recently attracted the attention in the field of automotive research. In this work, we design and implement AR for supporting driving visual guidance. We integrate driving information from Controller Area Network (CAN), Global Positioning System (GPS), navigation system, and other related information and use HUD technology to project it to the windscreen. This technique improves driver's situation awareness by dynamically combining more information imaging, called Point of Interest (POI), with global maps. The driver can easily see his/her driving guidance without having to turn his/her head off the driving direction using our augmented reality guidance, drivers can drive more easily.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It is an important issue that let drivers obtain driving information easily. There are many advanced electronic devices used for driving safety assistance. During driving a car, the driver receives the information passed by these systems. But with more functionality, more and more information will be generated and make the driving environment very complicated that makes it very difficult for the drivers to digest and response to all of the information. This highlights the importance of integrating all of the driving information. Augmented Reality (AR) with Head-Up Displays (HUDs) has recently attracted the attention in the field of automotive research. In this work, we design and implement AR for supporting driving visual guidance. We integrate driving information from Controller Area Network (CAN), Global Positioning System (GPS), navigation system, and other related information and use HUD technology to project it to the windscreen. This technique improves driver's situation awareness by dynamically combining more information imaging, called Point of Interest (POI), with global maps. The driver can easily see his/her driving guidance without having to turn his/her head off the driving direction using our augmented reality guidance, drivers can drive more easily.",
"fno": "4606a316",
"keywords": [
"Augmented Reality",
"Head Up Display",
"Controller Area Network",
"Global Positioning System",
"Point Of Interest"
],
"authors": [
{
"affiliation": null,
"fullName": "Jyh-Horng Lin",
"givenName": "Jyh-Horng",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cheng-Min Lin",
"givenName": "Cheng-Min",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chyi-Ren Dow",
"givenName": "Chyi-Ren",
"surname": "Dow",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cheng-Qian Wang",
"givenName": "Cheng-Qian",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ibica",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-12-01T00:00:00",
"pubType": "proceedings",
"pages": "316-319",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4606-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4606a313",
"articleId": "12OmNzX6coa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4606a320",
"articleId": "12OmNBQ2W0O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icitcs/2016/3765/0/07740328",
"title": "Effects of the Displaying Augmented-Reality Information on the Driving Behavior of the Drivers with Specific Psychological Characteristics",
"doi": null,
"abstractUrl": "/proceedings-article/icitcs/2016/07740328/12OmNCmGNYy",
"parentPublication": {
"id": "proceedings/icitcs/2016/3765/0",
"title": "2016 6th International Conference on IT Convergence and Security (ICITCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2012/4637/0/4637a589",
"title": "Traffic Route Dynamic Guidance Based on Coupling of Time Recursive and Artificial Neuron Network",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2012/4637a589/12OmNrAv3Mn",
"parentPublication": {
"id": "proceedings/icicta/2012/4637/0",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icis/2015/8679/0/07166640",
"title": "Analysis of driving behaviors based on GMM by using driving simulator with navigation plugin",
"doi": null,
"abstractUrl": "/proceedings-article/icis/2015/07166640/12OmNwp74r9",
"parentPublication": {
"id": "proceedings/icis/2015/8679/0",
"title": "2015 IEEE/ACIS 14th International Conference on Computer and Information Science (ICIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/2/4252b680",
"title": "Traffic Route Dynamic Guidance Based on Coupling of Time Recursive and Artificial Neuron Network",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252b680/12OmNxGSm12",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682280",
"title": "Real-Time Vehicle Route Guidance Based on Connected Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682280/12OmNzb7Zgv",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08466859",
"title": "Augmented Reality Interface Design Approaches for Goal-directed and Stimulus-driven Driving Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08466859/14M3E5b55mM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2021/2420/0/242000a333",
"title": "Reducing Traffic Accidents Using a V2V System that Disseminates Dangerous-Driving Information",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2021/242000a333/1Eb2BKIdA2s",
"parentPublication": {
"id": "proceedings/iiai-aai/2021/2420/0",
"title": "2021 10th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a144",
"title": "Comparative experiment of attention prompting methods using VR driving simulator",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a144/1KmFetCHntS",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090511",
"title": "Evaluating Automotive Augmented Reality Head-up Display Effects on Driver Performance and Distraction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090511/1jIxviTG03C",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2021/4106/0/410600a376",
"title": "An Analysis of Augmented Reality Aided Vehicle Operation",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2021/410600a376/1vK065QPpkY",
"parentPublication": {
"id": "proceedings/icalt/2021/4106/0",
"title": "2021 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKiqB",
"title": "2018 4th International Workshop on Requirements Engineering for Self-Adaptive, Collaborative, and Cyber Physical Systems (RESACS)",
"acronym": "resacs",
"groupId": "1828045",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XzbnJN",
"doi": "10.1109/RESACS.2018.00013",
"title": "Towards Context-Aware Process Guidance in Cyber-Physical Systems with Augmented Reality",
"normalizedTitle": "Towards Context-Aware Process Guidance in Cyber-Physical Systems with Augmented Reality",
"abstract": "Assembly, configuration, maintenance, and repair processes in cyber-physical systems (e.g., a press line in a plant) comprise a multitude of complex tasks, whose execution needs to be controlled, coordinated and monitored. Amongst others, a process-centric guidance of users (e.g. service operators) is required, taking the high variability in the assembly of cyber-physical systems (e.g. press line variability) into account. Moreover, the tasks to be performed along these processes may be related to physical components, sensors and actuators, which need to be properly recognized, integrated and operated. In order to digitize cyber-physical processes as well as to guide users in a process-centric way, therefore, we suggest integrating process management technology, sensor/actuator interfaces, and augmented reality techniques. The paper discusses fundamental requirements for such an integration and presents an approach for process-centric user guidance that combines context and process management with augmented reality enhanced tasks. For evaluation purposes, we analyzed the cyber-physical processes of pharmaceutical packaging machines and implemented selected ones based on the approach. Overall, we are able to demonstrate the usefulness of context-aware process management for the flexible support of cyber-physical processes in the Industrial Internet of Things.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Assembly, configuration, maintenance, and repair processes in cyber-physical systems (e.g., a press line in a plant) comprise a multitude of complex tasks, whose execution needs to be controlled, coordinated and monitored. Amongst others, a process-centric guidance of users (e.g. service operators) is required, taking the high variability in the assembly of cyber-physical systems (e.g. press line variability) into account. Moreover, the tasks to be performed along these processes may be related to physical components, sensors and actuators, which need to be properly recognized, integrated and operated. In order to digitize cyber-physical processes as well as to guide users in a process-centric way, therefore, we suggest integrating process management technology, sensor/actuator interfaces, and augmented reality techniques. The paper discusses fundamental requirements for such an integration and presents an approach for process-centric user guidance that combines context and process management with augmented reality enhanced tasks. For evaluation purposes, we analyzed the cyber-physical processes of pharmaceutical packaging machines and implemented selected ones based on the approach. Overall, we are able to demonstrate the usefulness of context-aware process management for the flexible support of cyber-physical processes in the Industrial Internet of Things.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Assembly, configuration, maintenance, and repair processes in cyber-physical systems (e.g., a press line in a plant) comprise a multitude of complex tasks, whose execution needs to be controlled, coordinated and monitored. Amongst others, a process-centric guidance of users (e.g. service operators) is required, taking the high variability in the assembly of cyber-physical systems (e.g. press line variability) into account. Moreover, the tasks to be performed along these processes may be related to physical components, sensors and actuators, which need to be properly recognized, integrated and operated. In order to digitize cyber-physical processes as well as to guide users in a process-centric way, therefore, we suggest integrating process management technology, sensor/actuator interfaces, and augmented reality techniques. The paper discusses fundamental requirements for such an integration and presents an approach for process-centric user guidance that combines context and process management with augmented reality enhanced tasks. For evaluation purposes, we analyzed the cyber-physical processes of pharmaceutical packaging machines and implemented selected ones based on the approach. Overall, we are able to demonstrate the usefulness of context-aware process management for the flexible support of cyber-physical processes in the Industrial Internet of Things.",
"fno": "841000a044",
"keywords": [
"Assembling",
"Augmented Reality",
"Internet Of Things",
"Maintenance Engineering",
"Production Engineering Computing",
"Ubiquitous Computing",
"Cyber Physical Systems",
"Repair Processes",
"Process Centric User Guidance",
"Context Aware Process Management",
"Augmented Reality",
"Context Aware Process Guidance",
"Maintenance",
"Assembly",
"Sensor Actuator Interfaces",
"Pharmaceutical Packaging Machines",
"Industrial Internet Of Things",
"Maintenance Engineering",
"Task Analysis",
"Presses",
"Sensors",
"Augmented Reality",
"Actuators",
"Cyber Physical System",
"Context Aware Process Management",
"Augmented Reality Enhanced Process",
"Cyber Physical Process"
],
"authors": [
{
"affiliation": "Inst. of Databases & Inf. Syst., Ulm Univ., Ulm, Germany",
"fullName": "Klaus Kammerer",
"givenName": "Klaus",
"surname": "Kammerer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Databases & Inf. Syst., Ulm Univ., Ulm, Germany",
"fullName": "Rüdiger Pryss",
"givenName": "Rüdiger",
"surname": "Pryss",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Uhlmann Pac-Syst. GmbH & Co. KG, Laupheim, Germany",
"fullName": "Kevin Sommer",
"givenName": "Kevin",
"surname": "Sommer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. of Databases & Inf. Syst., Ulm Univ., Ulm, Germany",
"fullName": "Manfred Reichert",
"givenName": "Manfred",
"surname": "Reichert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "resacs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-08-01T00:00:00",
"pubType": "proceedings",
"pages": "44-51",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8410-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "841000a036",
"articleId": "17D45WnnFY4",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682157",
"title": "Formal Specification of Cyber Physical Systems: Three Case Studies Based on Clock Theory",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682157/12OmNwx3Qdr",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2017/1792/0/1792c589",
"title": "Tracking Information Flow in Cyber-Physical Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2017/1792c589/12OmNxw5B27",
"parentPublication": {
"id": "proceedings/icdcs/2017/1792/0",
"title": "2017 IEEE 37th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc/2016/4297/0/07828535",
"title": "A Marker-Based Cyber-Physical Augmented-Reality Indoor Guidance System for Smart Campuses",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc/2016/07828535/12OmNy2Jt1s",
"parentPublication": {
"id": "proceedings/hpcc/2016/4297/0",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csf/2017/3217/0/3217a438",
"title": "A Formal Approach to Cyber-Physical Attacks",
"doi": null,
"abstractUrl": "/proceedings-article/csf/2017/3217a438/12OmNy6ZrXz",
"parentPublication": {
"id": "proceedings/csf/2017/3217/0",
"title": "2017 IEEE 30th Computer Security Foundations Symposium (CSF)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cyswater/2018/6744/0/674401a016",
"title": "Reducing Vulnerability to Cyber-Physical Attacks in Water Distribution Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cyswater/2018/674401a016/12OmNzmclpG",
"parentPublication": {
"id": "proceedings/cyswater/2018/6744/0",
"title": "2018 International Workshop on Cyber-physical Systems for Smart Water Networks (CySWater)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2017/03/07469860",
"title": "Ubii: Physical World Interaction Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tm/2017/03/07469860/13rRUxASuca",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2014/02/mco2014020070",
"title": "Challenges in Engineering Cyber-Physical Systems",
"doi": null,
"abstractUrl": "/magazine/co/2014/02/mco2014020070/13rRUyuegkj",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2011/938/0/05766958",
"title": "Spatial augmented reality support for design of complex physical environments",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2011/05766958/17D45WrVg9j",
"parentPublication": {
"id": "proceedings/percomw/2011/938/0",
"title": "2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops 2011). PerCom-Workshops 2011: 2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a129",
"title": "A Sense of Quality for Augmented Reality Assisted Process Guidance",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a129/1pBMg8t6jkY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nca/2020/8326/0/09306741",
"title": "Cyber-Resilience Evaluation of Cyber-Physical Systems",
"doi": null,
"abstractUrl": "/proceedings-article/nca/2020/09306741/1q8YmU7cJvW",
"parentPublication": {
"id": "proceedings/nca/2020/8326/0",
"title": "2020 IEEE 19th International Symposium on Network Computing and Applications (NCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1m1j0uKS6ZO",
"title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m1j7NOETSg",
"doi": "10.1109/ICALT49669.2020.00104",
"title": "Effects of Augmented Reality Assisted Learning Materials on Students’ Learning Outcomes",
"normalizedTitle": "Effects of Augmented Reality Assisted Learning Materials on Students’ Learning Outcomes",
"abstract": "This study explored the effects of augmented reality (AR) on students' learning outcomes. The participants of this study were 66 seventh-grade senior high school students from two classes in northern Taiwan. A quasi-experimental design with a control group (problem-based learning; PBL) and an experimental group (PBL with AR) was utilized. The results showed that the students who learned with the PBL and AR-assisted learning materials performed better than those who used the PBL approach in terms of answering questions that required their cognitive abilities of knowledge and comprehension, which are identified as lower cognitive ability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study explored the effects of augmented reality (AR) on students' learning outcomes. The participants of this study were 66 seventh-grade senior high school students from two classes in northern Taiwan. A quasi-experimental design with a control group (problem-based learning; PBL) and an experimental group (PBL with AR) was utilized. The results showed that the students who learned with the PBL and AR-assisted learning materials performed better than those who used the PBL approach in terms of answering questions that required their cognitive abilities of knowledge and comprehension, which are identified as lower cognitive ability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study explored the effects of augmented reality (AR) on students' learning outcomes. The participants of this study were 66 seventh-grade senior high school students from two classes in northern Taiwan. A quasi-experimental design with a control group (problem-based learning; PBL) and an experimental group (PBL with AR) was utilized. The results showed that the students who learned with the PBL and AR-assisted learning materials performed better than those who used the PBL approach in terms of answering questions that required their cognitive abilities of knowledge and comprehension, which are identified as lower cognitive ability.",
"fno": "09155919",
"keywords": [
"Augmented Reality",
"Cognition",
"Computer Aided Instruction",
"Teaching",
"Augmented Reality Assisted Learning Materials",
"Seventh Grade Senior High School Students",
"Quasiexperimental Design",
"AR Assisted Learning Materials",
"PBL Approach",
"Problem Based Learning",
"Cognitive Ability",
"Augmented Reality",
"Education",
"Taxonomy",
"Computers",
"Magnetic Fields",
"Conferences",
"Communications Technology",
"Problem Based Learning",
"Augmented Reality",
"Bloom X 2019 S Taxonomy"
],
"authors": [
{
"affiliation": "National Taipei University of Technology,Teacher Education Center,Taipei,Taiwan",
"fullName": "Pei-Shan Tsai",
"givenName": "Pei-Shan",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate Institute of Technological and Vocational Education, National Taipei University of Technology,Taipei,Taiwan",
"fullName": "Jie-Cun Chen",
"givenName": "Jie-Cun",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-07-01T00:00:00",
"pubType": "proceedings",
"pages": "325-326",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6090-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09155942",
"articleId": "1m1j30sq9DW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09155829",
"articleId": "1m1j34dsPqU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2016/3641/0/3641a107",
"title": "The Influence of using Augmented Reality on Textbook Support for Learners of Different Learning Styles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a107/12OmNBzAciw",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985b180",
"title": "Teachers' and Students' Perceptions toward Augmented Reality Materials",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985b180/12OmNyKa66B",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2017/0629/0/0629a081",
"title": "A PBL Teaching Model Based on Mobile Devices to Improve Primary School Students' Meta-Cognitive Awareness and Learning Achievement",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2017/0629a081/12OmNzlly0s",
"parentPublication": {
"id": "proceedings/eitt/2017/0629/0",
"title": "2017 International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictcs/2017/0527/0/0527a283",
"title": "Innovation in Education via Problem Based Learning from Complexity to Simplicity",
"doi": null,
"abstractUrl": "/proceedings-article/ictcs/2017/0527a283/12OmNzsJ7sK",
"parentPublication": {
"id": "proceedings/ictcs/2017/0527/0",
"title": "2017 International Conference on New Trends in Computing Sciences (ICTCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08658841",
"title": "Assessing the Development of Student Outcomes in Project-based Learning Engineering Design and Entrepreneurship Courses",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08658841/18j982KwtXy",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09766081",
"title": "How Augmented Reality (AR) Can Help and Hinder Collaborative Learning: A Study of AR in Electromagnetism Education",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09766081/1D34HQ1zUNa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a349",
"title": "The Effect of Role Assignment on Students’ Collaborative Inquiry-based Learning in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a349/1FUUe1UnEGc",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a260",
"title": "Fostering students’ engineering competence by adopting augmented reality: a proposed randomized controlled trial study",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a260/1KmFf85Xpkc",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2020/6090/0/09155934",
"title": "Seeing the unseen: user experience and technology acceptance in Augmented Reality science literacy",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2020/09155934/1m1j7K6aCfS",
"parentPublication": {
"id": "proceedings/icalt/2020/6090/0",
"title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a070",
"title": "Intention to use an interactive AR app for engineering education",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a070/1pBMfFVyWsM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCcKQmq",
"doi": "10.1109/VR.2017.7892261",
"title": "The AR-Rift 2 prototype",
"normalizedTitle": "The AR-Rift 2 prototype",
"abstract": "Video see-through augmented reality (VSAR) is an effective way of combing real and virtual scenes for head-mounted human computer interfaces. In this paper we present the AR-Rift 2 system, a cost-effective prototype VSAR system based around the Oculus Rift CV1 head-mounted display (HMD). Current consumer camera systems however typically have latencies far higher than the rendering pipeline of current consumer HMDs. They also have lower update rate than the display. We thus measure the latency of the video and implement a simple image-warping method to ensure smooth movement of the video.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Video see-through augmented reality (VSAR) is an effective way of combing real and virtual scenes for head-mounted human computer interfaces. In this paper we present the AR-Rift 2 system, a cost-effective prototype VSAR system based around the Oculus Rift CV1 head-mounted display (HMD). Current consumer camera systems however typically have latencies far higher than the rendering pipeline of current consumer HMDs. They also have lower update rate than the display. We thus measure the latency of the video and implement a simple image-warping method to ensure smooth movement of the video.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Video see-through augmented reality (VSAR) is an effective way of combing real and virtual scenes for head-mounted human computer interfaces. In this paper we present the AR-Rift 2 system, a cost-effective prototype VSAR system based around the Oculus Rift CV1 head-mounted display (HMD). Current consumer camera systems however typically have latencies far higher than the rendering pipeline of current consumer HMDs. They also have lower update rate than the display. We thus measure the latency of the video and implement a simple image-warping method to ensure smooth movement of the video.",
"fno": "07892261",
"keywords": [
"Augmented Reality",
"Prototypes",
"Streaming Media",
"Resists",
"Lenses",
"Webcams",
"Augmented Reality",
"Latency",
"Image Based Rendering"
],
"authors": [
{
"affiliation": "Department of Computer Science, University College London, United Kingdom",
"fullName": "Anthony Steed",
"givenName": "Anthony",
"surname": "Steed",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University College London, United Kingdom",
"fullName": "Yonathan Widya Adipradana",
"givenName": "Yonathan Widya",
"surname": "Adipradana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University College London, United Kingdom",
"fullName": "Sebastian Friston",
"givenName": "Sebastian",
"surname": "Friston",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "231-232",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892260",
"articleId": "12OmNvpNIkl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892262",
"articleId": "12OmNzlD98A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836518",
"title": "AR Tabletop Interface using a Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836518/12OmNyoiYW4",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cts/2016/2300/0/07871013",
"title": "Immersive Telerobotics Using the Oculus Rift and the 5DT Ultra Data Glove",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07871013/12OmNzA6GIG",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892302",
"title": "Estimating the motion-to-photon latency in head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892302/12OmNznkKb4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2021/02/08675340",
"title": "Immersive Virtual Reality Attacks and the Human Joystick",
"doi": null,
"abstractUrl": "/journal/tq/2021/02/08675340/18K0AX3AgRW",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699200",
"title": "Effective Free Field of View Scene Exploration in VR and AR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699305",
"title": "A First-Person Mentee Second-Person Mentor AR Interface for Surgical Telementoring",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699305/19F1TZ6RppS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699226",
"title": "A Virtual Boarding System of an Autonomous Vehicle for Investigating the Effect of an AR Display on Passenger Comfort",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699226/19F1TgkuQaQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797966",
"title": "A Mixed Presence Collaborative Mixed Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797966/1cJ19fldjVu",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090580",
"title": "A Study on the Effects of Head Mounted Displays Movement and Image Movement on Virtual Reality Sickness",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090580/1jIxns5TwxG",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a289",
"title": "AR Interfaces for Mid-Air 6-DoF Alignment: Ergonomics-Aware Design and Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a289/1pysuoUYBhK",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzX6ceq",
"title": "2017 International Symposium on Educational Technology (ISET)",
"acronym": "iset",
"groupId": "1812564",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCdBDT9",
"doi": "10.1109/ISET.2017.20",
"title": "Using Augmented Reality to Teach Kindergarten Students English Vocabulary",
"normalizedTitle": "Using Augmented Reality to Teach Kindergarten Students English Vocabulary",
"abstract": "Augmented Reality (AR) is a technology that augments the real physical world with computer-generated 3D virtual objects such that the users can interact with them using the screen of their mobile devices. This paper studies how to effectively use AR to enhance the learning experience of kindergarten students, while addressing parents' concern that a long-time usage of electronic devices may affect their child's health. We developed an AR mobile application prototype to teach kindergarten students English vocabulary in an interactive and attractive way. It allows kindergarten students to learn English vocabulary in any place and at any time using a mobile device. To address the parents' concern on health, we integrate a monitoring system into the application, which allows the parents to monitor their child's usage and stop the application in real time online. Preliminary evaluation shows that the effectiveness of the application is satisfactory. It is beneficial to use augmented reality for early childhood education if the usage time of the students is well monitored.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality (AR) is a technology that augments the real physical world with computer-generated 3D virtual objects such that the users can interact with them using the screen of their mobile devices. This paper studies how to effectively use AR to enhance the learning experience of kindergarten students, while addressing parents' concern that a long-time usage of electronic devices may affect their child's health. We developed an AR mobile application prototype to teach kindergarten students English vocabulary in an interactive and attractive way. It allows kindergarten students to learn English vocabulary in any place and at any time using a mobile device. To address the parents' concern on health, we integrate a monitoring system into the application, which allows the parents to monitor their child's usage and stop the application in real time online. Preliminary evaluation shows that the effectiveness of the application is satisfactory. It is beneficial to use augmented reality for early childhood education if the usage time of the students is well monitored.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality (AR) is a technology that augments the real physical world with computer-generated 3D virtual objects such that the users can interact with them using the screen of their mobile devices. This paper studies how to effectively use AR to enhance the learning experience of kindergarten students, while addressing parents' concern that a long-time usage of electronic devices may affect their child's health. We developed an AR mobile application prototype to teach kindergarten students English vocabulary in an interactive and attractive way. It allows kindergarten students to learn English vocabulary in any place and at any time using a mobile device. To address the parents' concern on health, we integrate a monitoring system into the application, which allows the parents to monitor their child's usage and stop the application in real time online. Preliminary evaluation shows that the effectiveness of the application is satisfactory. It is beneficial to use augmented reality for early childhood education if the usage time of the students is well monitored.",
"fno": "08005387",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Human Computer Interaction",
"Mobile Computing",
"Teaching",
"Vocabulary",
"Augmented Reality",
"Kindergarten Student",
"English Vocabulary Teaching",
"Computer Generated 3 D Virtual Objects",
"Mobile Devices",
"Learning Experience",
"Electronic Devices",
"AR Mobile Application Prototype",
"Monitoring System",
"Early Childhood Education",
"Vocabulary",
"Mobile Handsets",
"Prototypes",
"Pediatrics",
"Monitoring",
"Games",
"Education",
"Augmented Reality",
"English Learning",
"Early Childhood Education",
"Educational Games",
"Parental Perspectives"
],
"authors": [
{
"affiliation": null,
"fullName": "Lap-Kei Lee",
"givenName": "Lap-Kei",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cheuk-Him Chau",
"givenName": "Cheuk-Him",
"surname": "Chau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chun-Hin Chau",
"givenName": "Chun-Hin",
"surname": "Chau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Chun-Tim Ng",
"givenName": "Chun-Tim",
"surname": "Ng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iset",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-06-01T00:00:00",
"pubType": "proceedings",
"pages": "53-57",
"year": "2017",
"issn": null,
"isbn": "978-1-5090-3031-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08005386",
"articleId": "12OmNCm7BJU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08005388",
"articleId": "12OmNCeK28r",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2015/7334/0/7334a166",
"title": "Using NAO Humanoid Robot in Kindergarten: A Proposed System",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a166/12OmNC8uRtM",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2014/4038/0/4038a431",
"title": "Mobile-Based AR Application Helps to Promote EFL Children's Vocabulary Study",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2014/4038a431/12OmNxX3uPg",
"parentPublication": {
"id": "proceedings/icalt/2014/4038/0",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciss/2015/8611/0/07371036",
"title": "The Effects of Learning Style on Mobile Augmented-Reality-Facilitated English Vocabulary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iciss/2015/07371036/12OmNzIUg42",
"parentPublication": {
"id": "proceedings/iciss/2015/8611/0",
"title": "2015 2nd International Conference on Information Science and Security (ICISS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08457524",
"title": "ARbis Pictus: A Study of Vocabulary Learning with Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08457524/14M3E0wp25a",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a256",
"title": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a256/1FUUfSsF2es",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2022/9476/0/947600a154",
"title": "Poster: Design of AI-Powered Augmented Reality Games for Autistic Children",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2022/947600a154/1JjykxsvM2s",
"parentPublication": {
"id": "proceedings/chase/2022/9476/0",
"title": "2022 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a431",
"title": "Augmenting Communication Between Hearing Parents and Deaf Children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a431/1gyslDmdEJy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbdie/2020/5900/0/09150212",
"title": "Survey on the Application of Social Networking Services in Home-Kindergarten Communication——An Example of the Main Urban Area of Chongqing",
"doi": null,
"abstractUrl": "/proceedings-article/icbdie/2020/09150212/1lPGNL0nHMc",
"parentPublication": {
"id": "proceedings/icbdie/2020/5900/0",
"title": "2020 International Conference on Big Data and Informatization Education (ICBDIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaie/2020/6659/0/665900a347",
"title": "Research on Problems and Countermeasures of English Education Activities in Kindergartens --A case of kindergarten A in Dazu District of Chongqing",
"doi": null,
"abstractUrl": "/proceedings-article/icaie/2020/665900a347/1oZBLANsUda",
"parentPublication": {
"id": "proceedings/icaie/2020/6659/0",
"title": "2020 International Conference on Artificial Intelligence and Education (ICAIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2020/9638/0/963800a313",
"title": "Kindergarten Big Data System Solution Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2020/963800a313/1rxhyYLIeA0",
"parentPublication": {
"id": "proceedings/mlbdbi/2020/9638/0",
"title": "2020 2nd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNzcxZeH",
"title": "Proceedings of the 2003 International Conference on Machine Learning and Cybernetics",
"acronym": "icmlc",
"groupId": "1000424",
"volume": "5",
"displayVolume": "5",
"year": "2003",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNsd6viv",
"doi": "10.1109/ICMLC.2003.1260128",
"title": "Keyword spotting method based on speech feature space trace matching",
"normalizedTitle": "Keyword spotting method based on speech feature space trace matching",
"abstract": "Keyword spotting (KWS) has been an active research area in recent years. Based on the theory of feature space trace time normalization, an efficient keyword spotting method is proposed in this paper, with the effect of the fragment trace length probed. Experiments show that the performance of this method is close to that of manual spotting, having some practicability.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Keyword spotting (KWS) has been an active research area in recent years. Based on the theory of feature space trace time normalization, an efficient keyword spotting method is proposed in this paper, with the effect of the fragment trace length probed. Experiments show that the performance of this method is close to that of manual spotting, having some practicability.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Keyword spotting (KWS) has been an active research area in recent years. Based on the theory of feature space trace time normalization, an efficient keyword spotting method is proposed in this paper, with the effect of the fragment trace length probed. Experiments show that the performance of this method is close to that of manual spotting, having some practicability.",
"fno": "01260128",
"keywords": [
"Speaker Recognition",
"Speech Processing",
"Keyword Spotting Method",
"KWS Method",
"Speech Feature Space Trace Matching",
"Feature Space Trace Time Normalization",
"Fragment Trace Length",
"Manual Spotting",
"Speech Recognition",
"Hidden Markov Models",
"Speech Enhancement",
"Background Noise",
"Neural Networks",
"Testing",
"Speech Processing",
"Signal Processing",
"Vocabulary",
"Monitoring"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci. & Eng., Shanghai Jiao Tong Univ., China",
"fullName": "Ya-Dong Wu",
"givenName": null,
"surname": "Ya-Dong Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci. & Eng., Shanghai Jiao Tong Univ., China",
"fullName": "Bao-Long Liu",
"givenName": null,
"surname": "Bao-Long Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmlc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2003-01-01T00:00:00",
"pubType": "proceedings",
"pages": "3188,3189,3190,3191,3192",
"year": "2003",
"issn": null,
"isbn": "0-7803-7865-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01260127",
"articleId": "12OmNC9lEFl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01260129",
"articleId": "12OmNwF0BTg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icassp/2002/7402/4/05745682",
"title": "A keyword spotting method based on speech feature space trace matching",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745682/12OmNASraZ6",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aici/2009/3816/2/3816b361",
"title": "A Keyword Spotting Based Sports Type Determination System",
"doi": null,
"abstractUrl": "/proceedings-article/aici/2009/3816b361/12OmNqBtiDn",
"parentPublication": {
"id": "proceedings/aici/2009/3816/2",
"title": "2009 International Conference on Artificial Intelligence and Computational Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2017/3586/1/3586a714",
"title": "Ensembles for Graph-Based Keyword Spotting in Historical Handwritten Documents",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586a714/12OmNwErpy6",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2015/1805/0/07333858",
"title": "Probabilistic interpretation and improvements to the HMM-filler for handwritten keyword spotting",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2015/07333858/12OmNwc3wAa",
"parentPublication": {
"id": "proceedings/icdar/2015/1805/0",
"title": "2015 13th International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1995/2431/1/00479533",
"title": "Keyword spotting using supervised/unsupervised competitive learning",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1995/00479533/12OmNyQ7G4v",
"parentPublication": {
"id": "proceedings/icassp/1995/2431/1",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2007/2882/0/28820550",
"title": "A Novel Instance Matching Based Unsupervised Keyword Spotting System",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2007/28820550/12OmNykTNjH",
"parentPublication": {
"id": "proceedings/icicic/2007/2882/0",
"title": "2007 Second International Conference on Innovative Computing, Information and Control",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icii/2001/7010/3/00983089",
"title": "Utterance verification for spontaneous Mandarin speech keyword spotting",
"doi": null,
"abstractUrl": "/proceedings-article/icii/2001/00983089/12OmNzE54B2",
"parentPublication": {
"id": "proceedings/icii/2001/7010/3",
"title": "2001 International Conferences on Info-tech and Info-net. Proceedings",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsd/2019/2862/0/286200a313",
"title": "Keyword Spotting using Time-Domain Features in a Temporal Convolutional Network",
"doi": null,
"abstractUrl": "/proceedings-article/dsd/2019/286200a313/1ehBOsWEkEw",
"parentPublication": {
"id": "proceedings/dsd/2019/2862/0",
"title": "2019 22nd Euromicro Conference on Digital System Design (DSD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300m2650",
"title": "An Alternative Deep Feature Approach to Line Level Keyword Spotting",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300m2650/1gys3sAmr7i",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/si/2021/12/09591243",
"title": "Efficient Execution of Temporal Convolutional Networks for Embedded Keyword Spotting",
"doi": null,
"abstractUrl": "/journal/si/2021/12/09591243/1y2Fydjbryw",
"parentPublication": {
"id": "trans/si",
"title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCdk2YF",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwDAC7F",
"doi": "10.1109/ICALT.2013.73",
"title": "Game-Based Micro-learning Approach for Language Vocabulary Acquisition Using LingoSnacks",
"normalizedTitle": "Game-Based Micro-learning Approach for Language Vocabulary Acquisition Using LingoSnacks",
"abstract": "Acquisition of new vocabulary is an important element for language learning but it requires repeated and varied exposure to the new words and their usage. This paper reports the experience of designing and developing a game-based micro-learning platform (named LingoSnacks) for interactive learning of Arabic vocabulary. The LingoSnacks learning platform provides an environment of authoring learning content and delivering it to the learner in game-like interactive learning activities. Empirical testing results from students who used LingoSnacks indicate that the participants were able to increase their rate of vocabulary acquisition as the number of new vocabulary that they can recognize, recall and retain was significantly higher that participants who just used conventional lessons in a classroom.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Acquisition of new vocabulary is an important element for language learning but it requires repeated and varied exposure to the new words and their usage. This paper reports the experience of designing and developing a game-based micro-learning platform (named LingoSnacks) for interactive learning of Arabic vocabulary. The LingoSnacks learning platform provides an environment of authoring learning content and delivering it to the learner in game-like interactive learning activities. Empirical testing results from students who used LingoSnacks indicate that the participants were able to increase their rate of vocabulary acquisition as the number of new vocabulary that they can recognize, recall and retain was significantly higher that participants who just used conventional lessons in a classroom.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Acquisition of new vocabulary is an important element for language learning but it requires repeated and varied exposure to the new words and their usage. This paper reports the experience of designing and developing a game-based micro-learning platform (named LingoSnacks) for interactive learning of Arabic vocabulary. The LingoSnacks learning platform provides an environment of authoring learning content and delivering it to the learner in game-like interactive learning activities. Empirical testing results from students who used LingoSnacks indicate that the participants were able to increase their rate of vocabulary acquisition as the number of new vocabulary that they can recognize, recall and retain was significantly higher that participants who just used conventional lessons in a classroom.",
"fno": "5009a235",
"keywords": [
"Vocabulary",
"Mobile Communication",
"Usability",
"Games",
"Speech",
"Multimedia Communication",
"Memory Management",
"Lingo Snacks",
"Micro Learning",
"Mobile Assisted Language Learning MALL"
],
"authors": [
{
"affiliation": null,
"fullName": "Abdelkarim Erradi",
"givenName": "Abdelkarim",
"surname": "Erradi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hind Almerekhi",
"givenName": "Hind",
"surname": "Almerekhi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sajeda Nahia",
"givenName": "Sajeda",
"surname": "Nahia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "235-237",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5009-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5009a232",
"articleId": "12OmNBSBk9H",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5009a238",
"articleId": "12OmNB1wkMp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iiai-aai/2016/8985/0/8985a381",
"title": "Computer Assisted Vocabulary Learning: Examining English Language Learners' Vocabulary Notebooks",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985a381/12OmNCfjeBX",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2015/7334/0/7334a168",
"title": "A Mobile-Phone Camera Text-Recognition Game as an Alternative Assessment in Vocabulary Instruction for Learning Indonesian as a Foreign Language Classroom",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a168/12OmNrH1PG8",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a649",
"title": "Effects of Collaborative Multimedia Annotations on Elementary School Students' Vocabulary Learning Performance",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a649/12OmNvHoQou",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2014/4038/0/4038a209",
"title": "A Learning Version of Memory Match Game",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2014/4038a209/12OmNwe2Iyj",
"parentPublication": {
"id": "proceedings/icalt/2014/4038/0",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2007/2916/0/29160348",
"title": "Constructing the game-based learning environment on handheld devices to facilitate English vocabulary building",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2007/29160348/12OmNyO8tOs",
"parentPublication": {
"id": "proceedings/icalt/2007/2916/0",
"title": "2007 International Conference on Advanced Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a661",
"title": "Effects of Design Factors of Game-Based English Vocabulary Learning APP on Learning Performance, Sustained Attention, Emotional State, and Memory Retention",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a661/12OmNzw8j6x",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmute/2008/3108/0/3108a205",
"title": "The Game-Based Constructive Learning Environment to Increase English Vocabulary Acquisition: Implementing a Wireless Crossword Fan-Tan Game (WiCFG) as an Example",
"doi": null,
"abstractUrl": "/proceedings-article/wmute/2008/3108a205/12OmNzwpUbz",
"parentPublication": {
"id": "proceedings/wmute/2008/3108/0",
"title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icekim/2022/1666/0/166600a442",
"title": "An Empirical Study of the Co-relations Between Dictionary Use and Vocabulary Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/icekim/2022/166600a442/1KpBw1K2OVW",
"parentPublication": {
"id": "proceedings/icekim/2022/1666/0",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcime/2019/5586/0/558600a383",
"title": "The Influence of Embodied Interactive Action Games on Second Language Vocabulary Acquisition",
"doi": null,
"abstractUrl": "/proceedings-article/ijcime/2019/558600a383/1j9wAx1TVRe",
"parentPublication": {
"id": "proceedings/ijcime/2019/5586/0",
"title": "2019 International Joint Conference on Information, Media and Engineering (IJCIME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2020/6090/0/09155645",
"title": "L<sup>2</sup>- A Mini Game for Learning Indian Language Vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2020/09155645/1m1j7VNYVQQ",
"parentPublication": {
"id": "proceedings/icalt/2020/6090/0",
"title": "2020 IEEE 20th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBNM94a",
"title": "2014 IEEE 14th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxX3uPg",
"doi": "10.1109/ICALT.2014.129",
"title": "Mobile-Based AR Application Helps to Promote EFL Children's Vocabulary Study",
"normalizedTitle": "Mobile-Based AR Application Helps to Promote EFL Children's Vocabulary Study",
"abstract": "The advancement of mobile device is influencing the learning activities markedly. In this study, we attempt to use augmented reality (AR) technology to design and develop mobile-based English learning software for pre-school children in order to solve the problem of bored students and teachers' non-standard pronunciation, the mobile learning system is able to present the learning materials including virtual pictures, the meaning and pronunciation of words. A vivid picture will emerge when using mobile camera to identify an English word on card, which improves children's interests in learning. The 40 pre-school children who participated in this research were assigned to an experimental and a control group. From the pre-tests and post-tests as well as the interview of the English teacher, it was found that the students learning with mobile-based AR software had greater learning achievement than control group ones. Thus the teacher hold positive attitude to this software.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The advancement of mobile device is influencing the learning activities markedly. In this study, we attempt to use augmented reality (AR) technology to design and develop mobile-based English learning software for pre-school children in order to solve the problem of bored students and teachers' non-standard pronunciation, the mobile learning system is able to present the learning materials including virtual pictures, the meaning and pronunciation of words. A vivid picture will emerge when using mobile camera to identify an English word on card, which improves children's interests in learning. The 40 pre-school children who participated in this research were assigned to an experimental and a control group. From the pre-tests and post-tests as well as the interview of the English teacher, it was found that the students learning with mobile-based AR software had greater learning achievement than control group ones. Thus the teacher hold positive attitude to this software.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The advancement of mobile device is influencing the learning activities markedly. In this study, we attempt to use augmented reality (AR) technology to design and develop mobile-based English learning software for pre-school children in order to solve the problem of bored students and teachers' non-standard pronunciation, the mobile learning system is able to present the learning materials including virtual pictures, the meaning and pronunciation of words. A vivid picture will emerge when using mobile camera to identify an English word on card, which improves children's interests in learning. The 40 pre-school children who participated in this research were assigned to an experimental and a control group. From the pre-tests and post-tests as well as the interview of the English teacher, it was found that the students learning with mobile-based AR software had greater learning achievement than control group ones. Thus the teacher hold positive attitude to this software.",
"fno": "4038a431",
"keywords": [
"Mobile Communication",
"Mobile Handsets",
"Augmented Reality",
"Software",
"Vocabulary",
"Educational Institutions",
"EFL English As A Foreign Language",
"Augmented Reality",
"Mobile Platform",
"Pre School Children"
],
"authors": [
{
"affiliation": null,
"fullName": "Junjie He",
"givenName": "Junjie",
"surname": "He",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiali Ren",
"givenName": "Jiali",
"surname": "Ren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gaoxia Zhu",
"givenName": "Gaoxia",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Su Cai",
"givenName": "Su",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Guang Chen",
"givenName": "Guang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-07-01T00:00:00",
"pubType": "proceedings",
"pages": "431-433",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4038-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4038a426",
"articleId": "12OmNxjBfkQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4038a434",
"articleId": "12OmNxwWoI7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iset/2017/3031/0/08005387",
"title": "Using Augmented Reality to Teach Kindergarten Students English Vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/iset/2017/08005387/12OmNCdBDT9",
"parentPublication": {
"id": "proceedings/iset/2017/3031/0",
"title": "2017 International Symposium on Educational Technology (ISET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2015/9957/0/07373933",
"title": "Investigating the Effects of Mobile Learning with Cross-Age Peer Tutoring in English Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2015/07373933/12OmNqJHFEy",
"parentPublication": {
"id": "proceedings/iiai-aai/2015/9957/0",
"title": "2015 IIAI 4th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2017/0629/0/0629a316",
"title": "Android App Development for Teaching Reduced Forms of EFL Listening Comprehension to Decrease Cognitive Load",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2017/0629a316/12OmNvAAtqr",
"parentPublication": {
"id": "proceedings/eitt/2017/0629/0",
"title": "2017 International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmute/2012/4662/0/4662a232",
"title": "Using Personalized VLS on Mobile English Vocabulary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/wmute/2012/4662a232/12OmNwnYG4m",
"parentPublication": {
"id": "proceedings/wmute/2012/4662/0",
"title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciss/2015/8611/0/07371036",
"title": "The Effects of Learning Style on Mobile Augmented-Reality-Facilitated English Vocabulary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/iciss/2015/07371036/12OmNzIUg42",
"parentPublication": {
"id": "proceedings/iciss/2015/8611/0",
"title": "2015 2nd International Conference on Information Science and Security (ICISS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wmute/2012/4662/0/4662a102",
"title": "How We Can Entwine In-class Vocabulary Learning with Out-class One in English Course for Japanese EFL Learners",
"doi": null,
"abstractUrl": "/proceedings-article/wmute/2012/4662a102/12OmNzn38Ut",
"parentPublication": {
"id": "proceedings/wmute/2012/4662/0",
"title": "IEEE International Conference on Wireless, Mobile, and Ubiquitous Technology in Education",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900b092",
"title": "Application of Mobile Learning Software in College English Vocabulary Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900b092/1C8FSCi7omA",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a167",
"title": "A study of frequency-based and mobile terminal-based deep learning of college English vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a167/1CzeJ1uZTNK",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a256",
"title": "Using Thematic English Learning and Augmented Reality to Enhance Vocabulary Learning Motivation and Enjoyment of Elementary School Students",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a256/1FUUfSsF2es",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a150",
"title": "Words in Kitchen: An Instance of Leveraging Virtual Reality Technology to Learn Vocabulary",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a150/1gysnPldm9O",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAolGQD",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"acronym": "icassp",
"groupId": "1000002",
"volume": "1",
"displayVolume": "1",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzEmFEf",
"doi": "10.1109/ICASSP.2002.5743875",
"title": "Transcription of out-of-vocabulary words in large vocabulary speech recognition based on phoneme-to-grapheme conversion",
"normalizedTitle": "Transcription of out-of-vocabulary words in large vocabulary speech recognition based on phoneme-to-grapheme conversion",
"abstract": "In this paper, we describe a method to enhance the readability of the textual output in a large vocabulary continuous speech recognizer when out-of-vocabulary words occur.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we describe a method to enhance the readability of the textual output in a large vocabulary continuous speech recognizer when out-of-vocabulary words occur.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we describe a method to enhance the readability of the textual output in a large vocabulary continuous speech recognizer when out-of-vocabulary words occur.",
"fno": "05743875",
"keywords": [
"Speech",
"Speech Recognition",
"Vocabulary",
"Error Analysis"
],
"authors": [
{
"affiliation": "CNTS Language Technology Group, University of Antwerp, Belgium",
"fullName": "Bart Decadt",
"givenName": "Bart",
"surname": "Decadt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ESAT - PSI, Katholieke Universiteit Leuven, Belgium",
"fullName": "Jacques Duchateau",
"givenName": "Jacques",
"surname": "Duchateau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CNTS Language Technology Group, University of Antwerp, Belgium",
"fullName": "Walter Daelemans",
"givenName": "Walter",
"surname": "Daelemans",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "ESAT - PSI, Katholieke Universiteit Leuven, Belgium",
"fullName": "Patrick Wambacq",
"givenName": "Patrick",
"surname": "Wambacq",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icassp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-05-01T00:00:00",
"pubType": "proceedings",
"pages": "I-861-I-864",
"year": "2002",
"issn": "1520-6149",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05743874",
"articleId": "12OmNwE9Ot8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05743876",
"articleId": "12OmNrF2DG5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/das/2014/3244/0/3244a111",
"title": "The RWTH Large Vocabulary Arabic Handwriting Recognition System",
"doi": null,
"abstractUrl": "/proceedings-article/das/2014/3244a111/12OmNBQ2W25",
"parentPublication": {
"id": "proceedings/das/2014/3244/0",
"title": "2014 11th IAPR International Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196527",
"title": "Large-vocabulary speaker-independent continuous speech recognition using HMM",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196527/12OmNCbU3cW",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326093",
"title": "Hybrid language models for out of vocabulary word detection in large vocabulary conversational speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326093/12OmNvjyxFr",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196628",
"title": "Acoustic Markov models used in the Tangora speech recognition system",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196628/12OmNxdm4Co",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196626",
"title": "Obtaining candidate words by polling in a large vocabulary speech recognition system",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196626/12OmNyRPgOH",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196612",
"title": "Phoneme modelling using continuous mixture densities",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196612/12OmNzUgdfA",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196631",
"title": "Modeling acoustic-phonetic detail in an HMM-based large vocabulary speech recognizer",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196631/12OmNzYwcan",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01325970",
"title": "Vocabulary-independent search in spontaneous speech",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01325970/12OmNzt0IG5",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09872027",
"title": "VocabulARy: Learning Vocabulary in AR Supported by Keyword Visualisations",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09872027/1GhRUPatDmU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ewdts/2019/1003/0/08884449",
"title": "Comparison Of Grapheme-to-Phoneme Conversions For Spoken Document Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/ewdts/2019/08884449/1eEUZR2P3IA",
"parentPublication": {
"id": "proceedings/ewdts/2019/1003/0",
"title": "2019 IEEE East-West Design & Test Symposium (EWDTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJd5Up2emQ",
"doi": "10.1109/VRW55335.2022.00182",
"title": "A Tangible Augmented Reality Programming Learning Environment for Textual Languages",
"normalizedTitle": "A Tangible Augmented Reality Programming Learning Environment for Textual Languages",
"abstract": "We present a novel Tangible Augmented Reality Programming Learning Environment system that uses a head-mounted display (HMD) and physical manipulatives for teaching an Object-Oriented Programming (OOP) language. The system supports student understanding/recollection of terms, and construction of statements by enabling access to code components, terminology, and programming hints. It is designed to use the affordances of Augmented Reality (AR) and Tangible User Interfaces (TUIs) to provide a virtual workspace encouraging natural interaction with learning material. An interactive AR code template for physical manipulatives provides a building and testing environment for learners to practice statement construction and computational skills. The system bolsters active learning with a localised AR program visualisation and HMD-anchored AR glossary.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel Tangible Augmented Reality Programming Learning Environment system that uses a head-mounted display (HMD) and physical manipulatives for teaching an Object-Oriented Programming (OOP) language. The system supports student understanding/recollection of terms, and construction of statements by enabling access to code components, terminology, and programming hints. It is designed to use the affordances of Augmented Reality (AR) and Tangible User Interfaces (TUIs) to provide a virtual workspace encouraging natural interaction with learning material. An interactive AR code template for physical manipulatives provides a building and testing environment for learners to practice statement construction and computational skills. The system bolsters active learning with a localised AR program visualisation and HMD-anchored AR glossary.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel Tangible Augmented Reality Programming Learning Environment system that uses a head-mounted display (HMD) and physical manipulatives for teaching an Object-Oriented Programming (OOP) language. The system supports student understanding/recollection of terms, and construction of statements by enabling access to code components, terminology, and programming hints. It is designed to use the affordances of Augmented Reality (AR) and Tangible User Interfaces (TUIs) to provide a virtual workspace encouraging natural interaction with learning material. An interactive AR code template for physical manipulatives provides a building and testing environment for learners to practice statement construction and computational skills. The system bolsters active learning with a localised AR program visualisation and HMD-anchored AR glossary.",
"fno": "840200a662",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Computer Science Education",
"Helmet Mounted Displays",
"Object Oriented Programming",
"Program Visualisation",
"Teaching",
"User Interfaces",
"Virtual Reality",
"Head Mounted Display",
"Physical Manipulatives",
"Object Oriented Programming Language",
"Programming Hints",
"Tangible User Interfaces",
"Learning Material",
"Interactive AR Code Template",
"Building",
"Testing Environment",
"Statement Construction",
"Computational Skills",
"Active Learning",
"Localised AR Program Visualisation",
"Novel Tangible Augmented Reality Programming Learning Environment System",
"Visualization",
"Codes",
"Three Dimensional Displays",
"Terminology",
"Conferences",
"Resists",
"User Interfaces",
"Applied Computing X 2014 Education X 2014 Interactive Learning Environments",
"Human Centered Computing X 2014 Interaction Design"
],
"authors": [
{
"affiliation": "School of Information Technology and Mathematical Sciences, University of South Australia,Mawson Lakes,SA,Australia,5095",
"fullName": "Dmitry Resnyansky",
"givenName": "Dmitry",
"surname": "Resnyansky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Technology and Mathematical Sciences, University of South Australia,Mawson Lakes,SA,Australia,5095",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Technology and Mathematical Sciences, University of South Australia,Mawson Lakes,SA,Australia,5095",
"fullName": "Gun Lee",
"givenName": "Gun",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "662-663",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJd5PusDNC",
"name": "pvrw202284020-09757625s1-mm_840200a662.zip",
"size": "7.39 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757625s1-mm_840200a662.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a660",
"articleId": "1CJcC7q0PRu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a664",
"articleId": "1CJfp3SA9Ko",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2010/4124/0/4124a040",
"title": "ARtalet: Tangible User Interface Based Immersive Augmented Reality Authoring Tool for Digilog Book",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2010/4124a040/12OmNrEL2B8",
"parentPublication": {
"id": "proceedings/isuvr/2010/4124/0",
"title": "International Symposium on Ubiquitous Virtual Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538825",
"title": "A 3D Flexible and Tangible Magic Lens in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538825/12OmNwNwzHD",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538824",
"title": "Visual Hints for Tangible Gestures in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538824/12OmNyS6RCa",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448289",
"title": "Performance Envelopes of in-Air Direct and Smartwatch Indirect Control for Head-Mounted Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448289/13bd1fZBGcE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010004",
"title": "Opportunistic Tangible User Interfaces for Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010004/13rRUwvT9gn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a812",
"title": "Tangiball: Foot-Enabled Embodied Tangible Interaction with a Ball in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a812/1CJczvrAl0Y",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a584",
"title": "Investigating Display Position of a Head-Fixed Augmented Reality Notification for Dual-task",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a584/1CJd297BiDu",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a958",
"title": "[DC] A Tangible Augmented Reality Programming Learning Environment (TARPLE) for Active, Guided Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a958/1CJeVLWmIgw",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089453",
"title": "A Tangible Spherical Proxy for Object Manipulation in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089453/1jIxguSW9va",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a024",
"title": "Catching the Drone - A Tangible Augmented Reality Game in Superhuman Sports",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a024/1pBMeMETmdW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1dPofs0eSGI",
"title": "2019 IEEE 21st International Conference on High Performance Computing and Communications; IEEE 17th International Conference on Smart City; IEEE 5th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"acronym": "hpcc-smartcity-dss",
"groupId": "1002461",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1dPowf6VBvO",
"doi": "10.1109/HPCC/SmartCity/DSS.2019.00309",
"title": "Word Image Representation Based on Sequence to Sequence Model with Attention Mechanism for Out-of-Vocabulary Keyword Spotting",
"normalizedTitle": "Word Image Representation Based on Sequence to Sequence Model with Attention Mechanism for Out-of-Vocabulary Keyword Spotting",
"abstract": "To realize keyword spotting by means of query-by-example, learning efficient representation for word images is an essential issue. However, the amount of vocabulary at the training stage is often far less than the complete vocabulary of a certain language in various learning based representation approaches. Thus, unseen vocabularies might be taken as query keywords which may not exist in training set. Therefore, out-of-vocabulary (OOV) is frequently occurred in keyword spotting. In this paper, a sequence to sequence model with attention mechanism has been proposed to generate representation vectors of word images for solving the problem of OOV. After that, similarities can be calculated between each word image and a given query keyword image on their representation vectors. And then, a ranking list can be formed in descending order of the similarities for a collection of word images. Experimental results demonstrate that the proposed representation approach can be competent for the task of OOV keyword spotting and outperforms various baseline and state-of-the-art methods.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To realize keyword spotting by means of query-by-example, learning efficient representation for word images is an essential issue. However, the amount of vocabulary at the training stage is often far less than the complete vocabulary of a certain language in various learning based representation approaches. Thus, unseen vocabularies might be taken as query keywords which may not exist in training set. Therefore, out-of-vocabulary (OOV) is frequently occurred in keyword spotting. In this paper, a sequence to sequence model with attention mechanism has been proposed to generate representation vectors of word images for solving the problem of OOV. After that, similarities can be calculated between each word image and a given query keyword image on their representation vectors. And then, a ranking list can be formed in descending order of the similarities for a collection of word images. Experimental results demonstrate that the proposed representation approach can be competent for the task of OOV keyword spotting and outperforms various baseline and state-of-the-art methods.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To realize keyword spotting by means of query-by-example, learning efficient representation for word images is an essential issue. However, the amount of vocabulary at the training stage is often far less than the complete vocabulary of a certain language in various learning based representation approaches. Thus, unseen vocabularies might be taken as query keywords which may not exist in training set. Therefore, out-of-vocabulary (OOV) is frequently occurred in keyword spotting. In this paper, a sequence to sequence model with attention mechanism has been proposed to generate representation vectors of word images for solving the problem of OOV. After that, similarities can be calculated between each word image and a given query keyword image on their representation vectors. And then, a ranking list can be formed in descending order of the similarities for a collection of word images. Experimental results demonstrate that the proposed representation approach can be competent for the task of OOV keyword spotting and outperforms various baseline and state-of-the-art methods.",
"fno": "205800c224",
"keywords": [
"Image Representation",
"Learning Artificial Intelligence",
"Optical Character Recognition",
"Query Processing",
"Vocabulary",
"Word Image Representation",
"Attention Mechanism",
"Query By Example",
"Complete Vocabulary",
"Unseen Vocabularies",
"Representation Vectors",
"Representation Approach",
"OOV Keyword Spotting",
"Sequence To Sequence Model",
"Learning Based Representation",
"Query Keyword Image",
"Out Of Vocabulary Keyword Spotting",
"Visualization",
"Task Analysis",
"Logic Gates",
"Training",
"Vocabulary",
"Image Segmentation",
"Decoding",
"Long Short Term Memory Out Of Vocabulary Query By Example Representation Vector Attention Mechanism"
],
"authors": [
{
"affiliation": "Inner Mongolia University",
"fullName": "Hongxi Wei",
"givenName": "Hongxi",
"surname": "Wei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inner Mongolia University",
"fullName": "Yanke Kang",
"givenName": "Yanke",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inner Mongolia University",
"fullName": "Hui Zhang",
"givenName": "Hui",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hpcc-smartcity-dss",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2224-2231",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-2058-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "205800c218",
"articleId": "1dPogq0Hiyk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "205800c232",
"articleId": "1dPotuQry6s",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/das/2016/1792/0/1792a411",
"title": "Keyword Spotting in Handwritten Documents Using Projections of Oriented Gradients",
"doi": null,
"abstractUrl": "/proceedings-article/das/2016/1792a411/12OmNAObbMs",
"parentPublication": {
"id": "proceedings/das/2016/1792/0",
"title": "2016 12th IAPR Workshop on Document Analysis Systems (DAS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c035",
"title": "Word-Graph-Based Handwriting Keyword Spotting of Out-of-Vocabulary Queries",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c035/12OmNrNh0NW",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326093",
"title": "Hybrid language models for out of vocabulary word detection in large vocabulary conversational speech recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326093/12OmNvjyxFr",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2017/3586/1/3586a971",
"title": "Assisted Transcription of Historical Documents by Keyword Spotting: A Performance Model",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2017/3586a971/12OmNvxKu2i",
"parentPublication": {
"id": "proceedings/icdar/2017/3586/1",
"title": "2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460134",
"title": "Multilingual word spotting in offline handwritten documents",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460134/12OmNwBT1mv",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfhr/2014/4335/0/06981017",
"title": "Word-Graph and Character-Lattice Combination for KWS in Handwritten Documents",
"doi": null,
"abstractUrl": "/proceedings-article/icfhr/2014/06981017/12OmNxEBzgX",
"parentPublication": {
"id": "proceedings/icfhr/2014/4335/0",
"title": "2014 14th International Conference on Frontiers in Handwriting Recognition (ICFHR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfhr/2016/0981/0/0981a283",
"title": "Zoning Aggregated Hypercolumns for Keyword Spotting",
"doi": null,
"abstractUrl": "/proceedings-article/icfhr/2016/0981a283/12OmNxwENtF",
"parentPublication": {
"id": "proceedings/icfhr/2016/0981/0",
"title": "2016 15th International Conference on Frontiers in Handwriting Recognition (ICFHR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfhr/2012/2262/0/06424459",
"title": "Keyword Spotting Framework Using Dynamic Background Model",
"doi": null,
"abstractUrl": "/proceedings-article/icfhr/2012/06424459/12OmNz5s0P2",
"parentPublication": {
"id": "proceedings/icfhr/2012/2262/0",
"title": "2012 International Conference on Frontiers in Handwriting Recognition (ICFHR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956211",
"title": "Robust Representations for Keyword Spotting Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956211/1IHpVFHAxdC",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2020/9228/0/922800a413",
"title": "Deep Features Representation of Word Image for Keyword Spotting in Historical Mongolian Document Images",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2020/922800a413/1pP3wNsnjdC",
"parentPublication": {
"id": "proceedings/ictai/2020/9228/0",
"title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysm0mzZlK",
"doi": "10.1109/ISMAR-Adjunct.2019.00024",
"title": "AR Tips: Augmented First-Person View Task Instruction Videos",
"normalizedTitle": "AR Tips: Augmented First-Person View Task Instruction Videos",
"abstract": "This research investigates applying Augmented Reality (AR) visualisation of spatial cues in first-person view task instruction videos. Instructional videos are becoming popular, and are not only used in formal education and training, but even in everyday life as more people seek for how-to videos when they need help with instructions. However, video clips are 2D visualisation of the task space, sometimes making it hard for the viewer to follow and match the objects in the video to those in the real-world task space. We propose augmenting task instruction videos with 3D visualisation of spatial cues to overcome this problem, focusing on creating and viewing first-person view instruction videos. As a proof of concept, we designed and implemented a prototype system, called AR Tips, which allows users to capture and watch first-person view instructional videos on a wearable AR device, augmented with 3D visual cues shown in-situ at the task environment. Initial feedback from potential end users indicate that the prototype system is very easy to use and could be applied to various scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This research investigates applying Augmented Reality (AR) visualisation of spatial cues in first-person view task instruction videos. Instructional videos are becoming popular, and are not only used in formal education and training, but even in everyday life as more people seek for how-to videos when they need help with instructions. However, video clips are 2D visualisation of the task space, sometimes making it hard for the viewer to follow and match the objects in the video to those in the real-world task space. We propose augmenting task instruction videos with 3D visualisation of spatial cues to overcome this problem, focusing on creating and viewing first-person view instruction videos. As a proof of concept, we designed and implemented a prototype system, called AR Tips, which allows users to capture and watch first-person view instructional videos on a wearable AR device, augmented with 3D visual cues shown in-situ at the task environment. Initial feedback from potential end users indicate that the prototype system is very easy to use and could be applied to various scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This research investigates applying Augmented Reality (AR) visualisation of spatial cues in first-person view task instruction videos. Instructional videos are becoming popular, and are not only used in formal education and training, but even in everyday life as more people seek for how-to videos when they need help with instructions. However, video clips are 2D visualisation of the task space, sometimes making it hard for the viewer to follow and match the objects in the video to those in the real-world task space. We propose augmenting task instruction videos with 3D visualisation of spatial cues to overcome this problem, focusing on creating and viewing first-person view instruction videos. As a proof of concept, we designed and implemented a prototype system, called AR Tips, which allows users to capture and watch first-person view instructional videos on a wearable AR device, augmented with 3D visual cues shown in-situ at the task environment. Initial feedback from potential end users indicate that the prototype system is very easy to use and could be applied to various scenarios.",
"fno": "476500a034",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Augmented Reality Visualisation",
"Spatial Cues",
"Formal Education",
"Video Clips",
"Real World Task Space",
"Creating Viewing First Person View Instruction Videos",
"First Person View Instructional Videos",
"3 D Visual Cues",
"Task Environment",
"Augmented First Person View Task Instruction Videos",
"Videos",
"Task Analysis",
"Visualization",
"Prototypes",
"Augmented Reality",
"Three Dimensional Displays",
"Annotations",
"Augmented Task Guidance",
"Instructional Video",
"Spatial Cue"
],
"authors": [
{
"affiliation": "University of South Australia",
"fullName": "Gun Lee",
"givenName": "Gun",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Seungjun Ahn",
"givenName": "Seungjun",
"surname": "Ahn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Colorado School of Mines",
"fullName": "William Hoff",
"givenName": "William",
"surname": "Hoff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "34-36",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a028",
"articleId": "1gysmKtgeju",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a037",
"articleId": "1gyskTjecsE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/svr/2012/4725/0/4725a116",
"title": "From VR to AR: Adding AR Functionality to an Existing VR Software Framework",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a116/12OmNAYoKsE",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836460",
"title": "An Augmented Reality Guide for Assisting Forklift Operation",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836460/12OmNvwTGFS",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836523",
"title": "Human Attention and fatigue for AR Head-Up Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836523/12OmNwFidbp",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/latice/2015/9967/0/9967a022",
"title": "Teaching High School Computer Science with Videos of Historical Figures -- An Augmented Reality Approach",
"doi": null,
"abstractUrl": "/proceedings-article/latice/2015/9967a022/12OmNzUxOcF",
"parentPublication": {
"id": "proceedings/latice/2015/9967/0",
"title": "2015 International Conference on Learning and Teaching in Computing and Engineering (LaTiCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699229",
"title": "The Effect of AR Based Emotional Interaction Among Personified Physical Objects in Manual Operation",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699229/19F1LS1YWuA",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699328",
"title": "Compact Object Representation of a Non-Rigid Object for Real-Time Tracking in AR Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699328/19F1QGFHn8Y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a039",
"title": "Designing a Multitasking Interface for Object-aware AR applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a039/1pBMfjaOy08",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a498",
"title": "Enhancing First-Person View Task Instruction Videos with Augmented Reality Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a498/1pyswTqrkZ2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a159",
"title": "Exploring the Effect of Visual Cues on Eye Gaze During AR-Guided Picking and Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a159/1yeQM18rD7G",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2021/4254/0/425400a034",
"title": "Current Status and Prospects of Mobile AR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2021/425400a034/1ziP9KHT7Vu",
"parentPublication": {
"id": "proceedings/iccst/2021/4254/0",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1t90eDjCtkk",
"title": "2021 Asia-Pacific Conference on Communications Technology and Computer Science (ACCTCS)",
"acronym": "acctcs",
"groupId": "1841365",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1t90i2jxGZG",
"doi": "10.1109/ACCTCS52002.2021.00075",
"title": "Research on image text generation based on word2vec visual vocabulary attention",
"normalizedTitle": "Research on image text generation based on word2vec visual vocabulary attention",
"abstract": "A method of image text generation based on the combination of word2vec keyword extraction and attention mechanism is proposed. First, the co-occurring words with visual entities in the description set were extracted for each image in the dataset; Then the similarity was calculated for the extracted keywords, the similar words were filtered out to expand the keyword list, and the words in the vocabulary were retained to create new descriptions for the images. Finally, the test set images were combined with attention mechanism to generate description text. The experiments prove that the method proposed in this paper can achieve automatic annotation of images and can effectively solve the attention diffusion problem in the process of image text generation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A method of image text generation based on the combination of word2vec keyword extraction and attention mechanism is proposed. First, the co-occurring words with visual entities in the description set were extracted for each image in the dataset; Then the similarity was calculated for the extracted keywords, the similar words were filtered out to expand the keyword list, and the words in the vocabulary were retained to create new descriptions for the images. Finally, the test set images were combined with attention mechanism to generate description text. The experiments prove that the method proposed in this paper can achieve automatic annotation of images and can effectively solve the attention diffusion problem in the process of image text generation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A method of image text generation based on the combination of word2vec keyword extraction and attention mechanism is proposed. First, the co-occurring words with visual entities in the description set were extracted for each image in the dataset; Then the similarity was calculated for the extracted keywords, the similar words were filtered out to expand the keyword list, and the words in the vocabulary were retained to create new descriptions for the images. Finally, the test set images were combined with attention mechanism to generate description text. The experiments prove that the method proposed in this paper can achieve automatic annotation of images and can effectively solve the attention diffusion problem in the process of image text generation.",
"fno": "153800a344",
"keywords": [
"Deep Learning Artificial Intelligence",
"Image Retrieval",
"Natural Language Processing",
"Text Analysis",
"Test Set Images",
"Attention Diffusion Problem",
"Automatic Annotation",
"Description Text",
"Keyword List",
"Description Set",
"Visual Entities",
"Attention Mechanism",
"Word 2 Vec Keyword Extraction",
"Word 2 Vec Visual Vocabulary Attention",
"Image Text Generation",
"Computer Science",
"Vocabulary",
"Visualization",
"Annotations",
"Communications Technology",
"Word 2 Vec",
"Image 2 Text",
"Image Captions",
"Attention"
],
"authors": [
{
"affiliation": "Intelligent Information Processing Lab., Yanbian University,Dept. of Computer Science & Technology,Yanji,Jilin,China",
"fullName": "Danyang Li",
"givenName": "Danyang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intelligent Information Processing Lab., Yanbian University,Dept. of Computer Science & Technology,Yanji,Jilin,China",
"fullName": "Yahui Zhao",
"givenName": "Yahui",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intelligent Information Processing Lab., Yanbian University,Dept. of Computer Science & Technology,Yanji,Jilin,China",
"fullName": "Rongyi Cui",
"givenName": "Rongyi",
"surname": "Cui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Intelligent Information Processing Lab., Yanbian University,Dept. of Computer Science & Technology,Yanji,Jilin,China",
"fullName": "Linlin Zhao",
"givenName": "Linlin",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acctcs",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-01-01T00:00:00",
"pubType": "proceedings",
"pages": "344-348",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1538-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "153800a338",
"articleId": "1t90kmg9PC8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "153800a349",
"articleId": "1t90icEtWHm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2001/1263/0/12630260",
"title": "On the Influence of Vocabulary Size and Language Models in Unconstrained Handwritten Text Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2001/12630260/12OmNB8kHYj",
"parentPublication": {
"id": "proceedings/icdar/2001/1263/0",
"title": "Proceedings of Sixth International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2003/1960/2/196021101",
"title": "Offline Recognition of Large Vocabulary Cursive Handwritten Text",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2003/196021101/12OmNBqv2dB",
"parentPublication": {
"id": "proceedings/icdar/2003/1960/2",
"title": "Seventh International Conference on Document Analysis and Recognition, 2003. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccicc/2016/3846/0/07862087",
"title": "A novel method for document summarization using Word2Vec",
"doi": null,
"abstractUrl": "/proceedings-article/iccicc/2016/07862087/12OmNClQ0so",
"parentPublication": {
"id": "proceedings/iccicc/2016/3846/0",
"title": "2016 IEEE 15th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200g784",
"title": "Toward a Visual Concept Vocabulary for GAN Latent Space",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200g784/1BmH8Z43Guc",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09872027",
"title": "VocabulARy: Learning Vocabulary in AR Supported by Keyword Visualisations",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09872027/1GhRUPatDmU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccict/2022/7224/0/722400a236",
"title": "Abstractive Text Summarization Using Attention-based Stacked LSTM",
"doi": null,
"abstractUrl": "/proceedings-article/ccict/2022/722400a236/1HpDT9jZauI",
"parentPublication": {
"id": "proceedings/ccict/2022/7224/0",
"title": "2022 Fifth International Conference on Computational Intelligence and Communication Technologies (CCICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a669",
"title": "A Multi-oriented Chinese Keyword Spotter Guided by Text Line Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a669/1h81uCWWl2w",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800l1422",
"title": "On Vocabulary Reliance in Scene Text Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800l1422/1m3nQz9XRyo",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2021/3931/0/393100a206",
"title": "KeywordMap: Attention-based Visual Exploration for Keyword Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2021/393100a206/1tTtpeWwWuQ",
"parentPublication": {
"id": "proceedings/pacificvis/2021/3931/0",
"title": "2021 IEEE 14th Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icwcsg/2021/2598/0/259800a561",
"title": "Research on Text Information Mining Technology of Substation Inspection Based on Improved Jieba",
"doi": null,
"abstractUrl": "/proceedings-article/icwcsg/2021/259800a561/1yQBmgCP5pm",
"parentPublication": {
"id": "proceedings/icwcsg/2021/2598/0",
"title": "2021 International Conference on Wireless Communications and Smart Grid (ICWCSG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNvAS4s4",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"acronym": "robot",
"groupId": "1000639",
"volume": "0",
"displayVolume": "0",
"year": "1992",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8MsH7",
"doi": "10.1109/ROBOT.1992.220032",
"title": "Modeling of a computer-controlled zoom lens",
"normalizedTitle": "Modeling of a computer-controlled zoom lens",
"abstract": "The authors present calibration techniques to determine the relationships that map the parameters associated with a camera viewpoint to the parameters that can actually be controlled in a reconfigurable vision system. These relationships can be used to achieve the desired values of the camera viewpoint parameters by setting the controllable parameters to the appropriate values. The sensor setup consisted of a camera lens with zoom, focus, and aperture control. The calibration techniques were applied to the H 6*12.5 R Fujinon zoom lens and experimental results are shown.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors present calibration techniques to determine the relationships that map the parameters associated with a camera viewpoint to the parameters that can actually be controlled in a reconfigurable vision system. These relationships can be used to achieve the desired values of the camera viewpoint parameters by setting the controllable parameters to the appropriate values. The sensor setup consisted of a camera lens with zoom, focus, and aperture control. The calibration techniques were applied to the H 6*12.5 R Fujinon zoom lens and experimental results are shown.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors present calibration techniques to determine the relationships that map the parameters associated with a camera viewpoint to the parameters that can actually be controlled in a reconfigurable vision system. These relationships can be used to achieve the desired values of the camera viewpoint parameters by setting the controllable parameters to the appropriate values. The sensor setup consisted of a camera lens with zoom, focus, and aperture control. The calibration techniques were applied to the H 6*12.5 R Fujinon zoom lens and experimental results are shown.",
"fno": "00220032",
"keywords": [
"Calibration",
"Cameras",
"Computerised Control",
"Image Sensors",
"Computer Controlled Zoom Lens",
"Calibration",
"Reconfigurable Vision System",
"H 6 12 5 R Fujinon Zoom Lens",
"Lenses",
"Optical Sensors",
"Optical Refraction",
"Optical Variables Control",
"Apertures",
"Machine Vision",
"Calibration",
"Cameras",
"Computer Vision",
"Sensor Systems"
],
"authors": [
{
"affiliation": "IBM Thomas J. Watson Res. Center, Yorktown Heights, NY, USA",
"fullName": "K. Tarabanis",
"givenName": "K.",
"surname": "Tarabanis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IBM Thomas J. Watson Res. Center, Yorktown Heights, NY, USA",
"fullName": "R.Y. Tsai",
"givenName": "R.Y.",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IBM Thomas J. Watson Res. Center, Yorktown Heights, NY, USA",
"fullName": "D.S. Goodman",
"givenName": "D.S.",
"surname": "Goodman",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "robot",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1992-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1545,1546,1547,1548,1549,1550,1551",
"year": "1992",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00220031",
"articleId": "12OmNzTYBUE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00220033",
"articleId": "12OmNyLiuwo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iwar/1999/0359/0/03590103",
"title": "Registration with a Zoom Lens Camera for Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iwar/1999/03590103/12OmNBkfRhh",
"parentPublication": {
"id": "proceedings/iwar/1999/0359/0",
"title": "Augmented Reality, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119403",
"title": "A Compact, Large-Aperture Tunable Lens with Adaptive Spherical Correction",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119403/12OmNwe2IvH",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2000/0662/1/06621403",
"title": "A Neural Optimization Framework for Zoom Lens Camera Calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2000/06621403/12OmNxFsmJB",
"parentPublication": {
"id": "proceedings/cvpr/2000/0662/1",
"title": "Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No.PR00662)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2000/0750/4/07504495",
"title": "Camera Calibration with a Motorized Zoom Lens",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2000/07504495/12OmNxcMSfo",
"parentPublication": {
"id": "proceedings/icpr/2000/0750/4",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457a125",
"title": "A Practical Method for Fully Automatic Intrinsic Camera Calibration Using Directionally Encoded Light",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a125/12OmNxxdZCj",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1991/2148/0/00139763",
"title": "A stereoscopic camera employing a single main lens",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1991/00139763/12OmNz5JCfI",
"parentPublication": {
"id": "proceedings/cvpr/1991/2148/0",
"title": "Proceedings. 1991 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2007/1153/0/04341832",
"title": "Modeling the Variation of the Intrinsic Parameters of an Automatic Zoom Camera System using Moving Least-Squares",
"doi": null,
"abstractUrl": "/proceedings-article/case/2007/04341832/12OmNzvhvDb",
"parentPublication": {
"id": "proceedings/case/2007/1153/0",
"title": "3rd Annual IEEE Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1996/11/i1105",
"title": "Some Aspects of Zoom Lens Camera Calibration",
"doi": null,
"abstractUrl": "/journal/tp/1996/11/i1105/13rRUNvgz5p",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/08/ttp2013081994",
"title": "Keeping a Pan-Tilt-Zoom Camera Calibrated",
"doi": null,
"abstractUrl": "/journal/tp/2013/08/ttp2013081994/13rRUxDItio",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797912",
"title": "Edible Lens Made of Agar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797912/1cJ1gTRZdIs",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNynsbwy",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvxsSTw",
"doi": "10.1109/ICCPHOT.2009.5559009",
"title": "Image destabilization: Programmable defocus using lens and sensor motion",
"normalizedTitle": "Image destabilization: Programmable defocus using lens and sensor motion",
"abstract": "We propose a novel camera setup in which both the lens and the sensor are perturbed during the exposure. We analyze the defocus effects produced by such a setup, and use it to demonstrate new methods for simulating a lens with a larger effective aperture size (i.e., shallower depth of field) and methods for achieving approximately depth-independent defocus blur size. We achieve exaggerated, programmable, and pleasing bokeh with relatively small aperture sizes such as those found on cell phone cameras. Destabilizing the standard alignment of the sensor and lens allows us to introduce programmable defocus effects and achieve greater flexibility in the image capture process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel camera setup in which both the lens and the sensor are perturbed during the exposure. We analyze the defocus effects produced by such a setup, and use it to demonstrate new methods for simulating a lens with a larger effective aperture size (i.e., shallower depth of field) and methods for achieving approximately depth-independent defocus blur size. We achieve exaggerated, programmable, and pleasing bokeh with relatively small aperture sizes such as those found on cell phone cameras. Destabilizing the standard alignment of the sensor and lens allows us to introduce programmable defocus effects and achieve greater flexibility in the image capture process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel camera setup in which both the lens and the sensor are perturbed during the exposure. We analyze the defocus effects produced by such a setup, and use it to demonstrate new methods for simulating a lens with a larger effective aperture size (i.e., shallower depth of field) and methods for achieving approximately depth-independent defocus blur size. We achieve exaggerated, programmable, and pleasing bokeh with relatively small aperture sizes such as those found on cell phone cameras. Destabilizing the standard alignment of the sensor and lens allows us to introduce programmable defocus effects and achieve greater flexibility in the image capture process.",
"fno": "05559009",
"keywords": [
"Cameras",
"Focusing",
"Image Restoration",
"Lenses",
"Image Destabilization",
"Programmable Defocus",
"Lens Aperture Size",
"Sensor Motion",
"Depth Independent Defocus Blur Size",
"Programmable Defocus Effects",
"Image Capture Process",
"Lenses",
"Apertures",
"Cameras",
"Equations",
"Synchronization",
"Photography",
"Mathematical Model"
],
"authors": [
{
"affiliation": "MIT Media Lab, USA",
"fullName": "Ankit Mohan",
"givenName": "Ankit",
"surname": "Mohan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Brown University, USA",
"fullName": "Douglas Lanman",
"givenName": "Douglas",
"surname": "Lanman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT Media Lab, USA",
"fullName": "Shinsaku Hiura",
"givenName": "Shinsaku",
"surname": "Hiura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MIT Media Lab, USA",
"fullName": "Ramesh Raskar",
"givenName": "Ramesh",
"surname": "Raskar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2009",
"issn": null,
"isbn": "978-1-4244-4534-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05559012",
"articleId": "12OmNyRPgVx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05559010",
"articleId": "12OmNBiygwn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/robot/1992/2720/0/00220032",
"title": "Modeling of a computer-controlled zoom lens",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220032/12OmNC8MsH7",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a326",
"title": "Optimal Camera Parameters for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a326/12OmNC8MsHb",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2013/3022/0/3022a037",
"title": "External Mask Based Depth and Light Field Camera",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2013/3022a037/12OmNCctfnA",
"parentPublication": {
"id": "proceedings/iccvw/2013/3022/0",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a281",
"title": "Blur Calibration for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a281/12OmNs59JP3",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a370",
"title": "Video Depth-from-Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a370/12OmNy5hRoj",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559018",
"title": "What are good apertures for defocus deblurring?",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559018/12OmNzWx050",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2019/1975/0/197500b337",
"title": "Defocus Magnification Using Conditional Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2019/197500b337/18j8PMi7sNa",
"parentPublication": {
"id": "proceedings/wacv/2019/1975/0",
"title": "2019 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2022/6946/0/694600q6283",
"title": "Learning to Deblur using Light Field Generated and Real Defocus Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600q6283/1H1j506VWA8",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b206",
"title": "A Novel Depth from Defocus Framework Based on a Thick Lens Camera Model",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b206/1qyxoyCLZbW",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2021/1952/0/09466261",
"title": "Depth from Defocus with Learned Optics for Imaging and Occlusion-aware Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2021/09466261/1uSSVCuXE7C",
"parentPublication": {
"id": "proceedings/iccp/2021/1952/0",
"title": "2021 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBzRNsl",
"title": "2014 Fifth International Symposium on Electronic System Design (ISED)",
"acronym": "ised",
"groupId": "1800292",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwdtw8Y",
"doi": "10.1109/ISED.2014.13",
"title": "Plasmonic Lens Based on Elliptically Tapered Metallic Nano Slits",
"normalizedTitle": "Plasmonic Lens Based on Elliptically Tapered Metallic Nano Slits",
"abstract": "Plasmonic lens using elliptically tapered nanoslits in a gold film to focus light is proposed. Two lenses with designed focal lengths of 2μm and 4μm respectively at 650nm were studied. Simulation results show that the intensity at the focal point of the proposed lens is enhanced as compared to a conventional plasmonic planar lens.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Plasmonic lens using elliptically tapered nanoslits in a gold film to focus light is proposed. Two lenses with designed focal lengths of 2μm and 4μm respectively at 650nm were studied. Simulation results show that the intensity at the focal point of the proposed lens is enhanced as compared to a conventional plasmonic planar lens.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Plasmonic lens using elliptically tapered nanoslits in a gold film to focus light is proposed. Two lenses with designed focal lengths of 2μm and 4μm respectively at 650nm were studied. Simulation results show that the intensity at the focal point of the proposed lens is enhanced as compared to a conventional plasmonic planar lens.",
"fno": "6965a025",
"keywords": [
"Lenses",
"Plasmons",
"Optical Films",
"Focusing",
"Couplings",
"Optical Waveguides",
"Optimized Production Technology",
"Nanophotonic Device",
"Surface Plasmon Polaritons",
"Extraordinary Optical Transmission",
"Optical Phase Front Control",
"Metallic Grating"
],
"authors": [
{
"affiliation": null,
"fullName": "U. Aparna",
"givenName": "U.",
"surname": "Aparna",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "H.S. Mruthyunjaya",
"givenName": "H.S.",
"surname": "Mruthyunjaya",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Sathish Kumar",
"givenName": "M. Sathish",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ised",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-12-01T00:00:00",
"pubType": "proceedings",
"pages": "25-28",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6965-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6965a020",
"articleId": "12OmNwbukfy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6965a029",
"articleId": "12OmNx38vLp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iqec/2005/9240/0/01561007",
"title": "Guiding properties of index-guided two-dimensional optical waveguides",
"doi": null,
"abstractUrl": "/proceedings-article/iqec/2005/01561007/12OmNA0dMH0",
"parentPublication": {
"id": "proceedings/iqec/2005/9240/0",
"title": "International Quantum Electronics Conference, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iqec/2005/9240/0/01561029",
"title": "Ultrafast nonlinear optics in metallic photonic crystals",
"doi": null,
"abstractUrl": "/proceedings-article/iqec/2005/01561029/12OmNApLGOE",
"parentPublication": {
"id": "proceedings/iqec/2005/9240/0",
"title": "International Quantum Electronics Conference, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119451",
"title": "Electro-optic Effect in Polydimethylsiloxane-Cellulose Nanocrystal Composite for Reconfigurable Lens",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119451/12OmNBUS7aB",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a966",
"title": "Focal Track: Depth and Accommodation with Oscillating Lens Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a966/12OmNBrlPxE",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119403",
"title": "A Compact, Large-Aperture Tunable Lens with Adaptive Spherical Correction",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119403/12OmNwe2IvH",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uic-atc-scalcom/2015/7211/0/07518459",
"title": "Tunable Plasmon-Induced Transparency in Plasmonic Bus Waveguide with Side-Coupled Nano-cylinder Cavity",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518459/12OmNwogh9X",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom/2015/7211/0",
"title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rtcsa/2014/3953/0/06910552",
"title": "A plasmonic refractive index sensor based on a MIM waveguide with a side-coupled nanodisk resonator",
"doi": null,
"abstractUrl": "/proceedings-article/rtcsa/2014/06910552/12OmNxFsmsX",
"parentPublication": {
"id": "proceedings/rtcsa/2014/3953/0",
"title": "2014 IEEE 20th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iqec/2005/9240/0/01560941",
"title": "Chip-scale photonics with plasmonic components",
"doi": null,
"abstractUrl": "/proceedings-article/iqec/2005/01560941/12OmNxvNZWw",
"parentPublication": {
"id": "proceedings/iqec/2005/9240/0",
"title": "International Quantum Electronics Conference, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1990/2038/0/00138219",
"title": "Propagation characteristics and losses in graded tapered slab optimal coupler",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1990/00138219/12OmNzWx06q",
"parentPublication": {
"id": "proceedings/ssst/1990/2038/0",
"title": "Proceedings The Twenty-Second Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567514",
"title": "Near-field-light lens for nano-focusing of atoms",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567514/12OmNzvz6Ip",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAR1b0Z",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwtn3En",
"doi": "10.1109/CVPRW.2017.223",
"title": "Optimizing the Lens Selection Process for Multi-focus Plenoptic Cameras and Numerical Evaluation",
"normalizedTitle": "Optimizing the Lens Selection Process for Multi-focus Plenoptic Cameras and Numerical Evaluation",
"abstract": "The last years have seen a quick rise of digital photography. Plenoptic cameras provide extended capabilities with respect to previous models. Multi-focus cameras enlarge the depth-of-field of the pictures using different focal lengths in the lens composing the array, but questions still arise on how to select and use these lenses. In this work a further insight on the lens selection was made, and a novel method was developed in order to choose the best available lens combination for the disparity estimation. We test different lens combinations, ranking them based on the error and the number of different lenses used, creating a mapping function that relates the virtual depth with the combination that achieves the best result. The results are then organized in a look up table that can be tuned to trade off between performances and accuracy. This allows for fast and accurate lens selection. Moreover, new synthetic images with respective ground truth are provided, in order to confirm that this work performs better than the current state of the art in efficiency and accuracy of the results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The last years have seen a quick rise of digital photography. Plenoptic cameras provide extended capabilities with respect to previous models. Multi-focus cameras enlarge the depth-of-field of the pictures using different focal lengths in the lens composing the array, but questions still arise on how to select and use these lenses. In this work a further insight on the lens selection was made, and a novel method was developed in order to choose the best available lens combination for the disparity estimation. We test different lens combinations, ranking them based on the error and the number of different lenses used, creating a mapping function that relates the virtual depth with the combination that achieves the best result. The results are then organized in a look up table that can be tuned to trade off between performances and accuracy. This allows for fast and accurate lens selection. Moreover, new synthetic images with respective ground truth are provided, in order to confirm that this work performs better than the current state of the art in efficiency and accuracy of the results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The last years have seen a quick rise of digital photography. Plenoptic cameras provide extended capabilities with respect to previous models. Multi-focus cameras enlarge the depth-of-field of the pictures using different focal lengths in the lens composing the array, but questions still arise on how to select and use these lenses. In this work a further insight on the lens selection was made, and a novel method was developed in order to choose the best available lens combination for the disparity estimation. We test different lens combinations, ranking them based on the error and the number of different lenses used, creating a mapping function that relates the virtual depth with the combination that achieves the best result. The results are then organized in a look up table that can be tuned to trade off between performances and accuracy. This allows for fast and accurate lens selection. Moreover, new synthetic images with respective ground truth are provided, in order to confirm that this work performs better than the current state of the art in efficiency and accuracy of the results.",
"fno": "0733b763",
"keywords": [
"Lenses",
"Cameras",
"Estimation",
"Calibration",
"Sensor Arrays",
"Image Color Analysis",
"Conferences"
],
"authors": [
{
"affiliation": null,
"fullName": "Luca Palmieri",
"givenName": "Luca",
"surname": "Palmieri",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Reinhard Koch",
"givenName": "Reinhard",
"surname": "Koch",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1763-1774",
"year": "2017",
"issn": "2160-7516",
"isbn": "978-1-5386-0733-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0733b754",
"articleId": "12OmNC4eSug",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0733b775",
"articleId": "12OmNzVoBO6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2010/7023/0/05585092",
"title": "Rich image capture with plenoptic cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2010/05585092/12OmNAYGls3",
"parentPublication": {
"id": "proceedings/iccp/2010/7023/0",
"title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215210",
"title": "Fourier Slice Super-resolution in plenoptic cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215210/12OmNwEJ0Kt",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a455",
"title": "Dictionary Learning Based Color Demosaicing for Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a455/12OmNwFzO2X",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a957",
"title": "Corner-Based Geometric Calibration of Multi-focus Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a957/12OmNy5R3sS",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1992/02/i0099",
"title": "Single Lens Stereo with a Plenoptic Camera",
"doi": null,
"abstractUrl": "/journal/tp/1992/02/i0099/13rRUwdIOT2",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2011/01/mcg2011010062",
"title": "Using Focused Plenoptic Cameras for Rich Image Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2011/01/mcg2011010062/13rRUyfKIKD",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2018/8425/0/842500a286",
"title": "Calibrating Light-Field Cameras Using Plenoptic Disc Features",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2018/842500a286/17D45VtKisQ",
"parentPublication": {
"id": "proceedings/3dv/2018/8425/0",
"title": "2018 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a115",
"title": "Blind Calibration for Focused Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a115/1cdOJf1lggo",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800c542",
"title": "Blur Aware Calibration of Multi-Focus Plenoptic Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800c542/1m3nIj7S0cU",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800b125",
"title": "Ray Tracing-Guided Design of Plenoptic Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800b125/1zWEpFekVbi",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1gTRZdIs",
"doi": "10.1109/VR.2019.8797912",
"title": "Edible Lens Made of Agar",
"normalizedTitle": "Edible Lens Made of Agar",
"abstract": "In this paper, we propose an edible lens made from foodstuffs. The optical lens is used for forming an optical system. It is expected that it will be possible to make optical systems edible by preparing an edible lens that has the same function as the conventional optical lens. In order to realize this, prototypes of edible lens made of agar were developed, because the agar had been reported as a material to form edible retroreflector materials. Furthermore, we investigated its optical performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose an edible lens made from foodstuffs. The optical lens is used for forming an optical system. It is expected that it will be possible to make optical systems edible by preparing an edible lens that has the same function as the conventional optical lens. In order to realize this, prototypes of edible lens made of agar were developed, because the agar had been reported as a material to form edible retroreflector materials. Furthermore, we investigated its optical performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose an edible lens made from foodstuffs. The optical lens is used for forming an optical system. It is expected that it will be possible to make optical systems edible by preparing an edible lens that has the same function as the conventional optical lens. In order to realize this, prototypes of edible lens made of agar were developed, because the agar had been reported as a material to form edible retroreflector materials. Furthermore, we investigated its optical performance.",
"fno": "08797912",
"keywords": [
"Food Processing Industry",
"Lenses",
"Retroreflectors",
"Optical System",
"Edible Lens",
"Agar",
"Optical Lens",
"Foodstuffs",
"Retroreflector Materials",
"Lenses",
"Prototypes",
"Biomedical Optical Imaging",
"Optical Refraction",
"Optical Variables Control",
"Image Resolution",
"Optical Imaging",
"Edible",
"Lens",
"Agar",
"Hardware",
"Emerging Technologies",
"Analysis And Design Of Emerging Devices And Systems",
"Emerging Tools And Methodologies"
],
"authors": [
{
"affiliation": "Gunma University",
"fullName": "Miyu Nomura",
"givenName": "Miyu",
"surname": "Nomura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Gunma University",
"fullName": "Hiromasa Oku",
"givenName": "Hiromasa",
"surname": "Oku",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1104-1105",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798145",
"articleId": "1cJ0YZ9Bfgs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08797966",
"articleId": "1cJ19fldjVu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2017/1032/0/1032d867",
"title": "Parameter-Free Lens Distortion Calibration of Central Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032d867/12OmNB1eJyc",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isot/2014/6752/0/07119451",
"title": "Electro-optic Effect in Polydimethylsiloxane-Cellulose Nanocrystal Composite for Reconfigurable Lens",
"doi": null,
"abstractUrl": "/proceedings-article/isot/2014/07119451/12OmNBUS7aB",
"parentPublication": {
"id": "proceedings/isot/2014/6752/0",
"title": "2014 International Symposium on Optomechatronic Technologies (ISOT 2014)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2012/1611/0/06239195",
"title": "Single lens off-chip cellphone microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2012/06239195/12OmNBsue51",
"parentPublication": {
"id": "proceedings/cvprw/2012/1611/0",
"title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/robot/1992/2720/0/00220032",
"title": "Modeling of a computer-controlled zoom lens",
"doi": null,
"abstractUrl": "/proceedings-article/robot/1992/00220032/12OmNC8MsH7",
"parentPublication": {
"id": "proceedings/robot/1992/2720/0",
"title": "Proceedings 1992 IEEE International Conference on Robotics and Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051914",
"title": "Towards ray optics formalization of optical imaging systems",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051914/12OmNvq5jzp",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1994/6952/2/00413504",
"title": "Calibration for peripheral attenuation in intensity images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1994/00413504/12OmNvsDHJ5",
"parentPublication": {
"id": "proceedings/icip/1994/6952/2",
"title": "Proceedings of 1st International Conference on Image Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a612",
"title": "Self-Calibration of Optical Lenses",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a612/12OmNyQ7FPm",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2014/4173/0/4173a371",
"title": "Optical Ray Tracing Based on Dijkstra Algorithm in Inhomogeneous Medium",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2014/4173a371/12OmNzXFozK",
"parentPublication": {
"id": "proceedings/bwcca/2014/4173/0",
"title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798075",
"title": "Edible Retroreflector Made of Candy",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798075/1cJ0Sk2qCgU",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09384477",
"title": "Lenslet VR: Thin, Flat and Wide-FOV Virtual Reality Display Using Fresnel Lens and Lenslet Array",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09384477/1scDuWhBPY4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jPbbHBGDHq",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"acronym": "wacv",
"groupId": "1000040",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jPbDql8Ptu",
"doi": "10.1109/WACV45572.2020.9093558",
"title": "Online Lens Motion Smoothing for Video Autofocus",
"normalizedTitle": "Online Lens Motion Smoothing for Video Autofocus",
"abstract": "Autofocus (AF) is the process of moving the camera's lens such that desired scene content is in focus. AF for single image capture is a well-studied research topic and most modern cameras have hardware support that allows quick lens movements to optimize image sharpness. How to best perform AF for video is less clear. Conventional wisdom would suggest that each temporal frame should be as sharp as possible. However, unlike single image capture, the effects of the lens movement is visible in the captured video. As a result, there are two parameters to consider in AF for video: sharpness and lens movement. In this paper, we show that users preferred videos with smooth lens movement, even if it results in less overall sharpness. Based on this observation, we propose two novel AF algorithms for video that strive for both smooth lens movement and sharp scene content. Specifically, we introduce (1) a bidirectional long short-term memory (BLSTM) module trained on smooth lens trajectories and (2) a simple weighted moving average (WMA) method that factors in prior lens motion. Both of these methods have demonstrated excellent results in terms of reducing lens movements (up to 64% reduction) without greatly affecting the sharpness (less than 5.2% change in sharpness). Moreover, videos produced using our methods are more preferred by users over conventional AF that aims only for maximizing sharpness.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autofocus (AF) is the process of moving the camera's lens such that desired scene content is in focus. AF for single image capture is a well-studied research topic and most modern cameras have hardware support that allows quick lens movements to optimize image sharpness. How to best perform AF for video is less clear. Conventional wisdom would suggest that each temporal frame should be as sharp as possible. However, unlike single image capture, the effects of the lens movement is visible in the captured video. As a result, there are two parameters to consider in AF for video: sharpness and lens movement. In this paper, we show that users preferred videos with smooth lens movement, even if it results in less overall sharpness. Based on this observation, we propose two novel AF algorithms for video that strive for both smooth lens movement and sharp scene content. Specifically, we introduce (1) a bidirectional long short-term memory (BLSTM) module trained on smooth lens trajectories and (2) a simple weighted moving average (WMA) method that factors in prior lens motion. Both of these methods have demonstrated excellent results in terms of reducing lens movements (up to 64% reduction) without greatly affecting the sharpness (less than 5.2% change in sharpness). Moreover, videos produced using our methods are more preferred by users over conventional AF that aims only for maximizing sharpness.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autofocus (AF) is the process of moving the camera's lens such that desired scene content is in focus. AF for single image capture is a well-studied research topic and most modern cameras have hardware support that allows quick lens movements to optimize image sharpness. How to best perform AF for video is less clear. Conventional wisdom would suggest that each temporal frame should be as sharp as possible. However, unlike single image capture, the effects of the lens movement is visible in the captured video. As a result, there are two parameters to consider in AF for video: sharpness and lens movement. In this paper, we show that users preferred videos with smooth lens movement, even if it results in less overall sharpness. Based on this observation, we propose two novel AF algorithms for video that strive for both smooth lens movement and sharp scene content. Specifically, we introduce (1) a bidirectional long short-term memory (BLSTM) module trained on smooth lens trajectories and (2) a simple weighted moving average (WMA) method that factors in prior lens motion. Both of these methods have demonstrated excellent results in terms of reducing lens movements (up to 64% reduction) without greatly affecting the sharpness (less than 5.2% change in sharpness). Moreover, videos produced using our methods are more preferred by users over conventional AF that aims only for maximizing sharpness.",
"fno": "09093558",
"keywords": [
"Cameras",
"Image Capture",
"Image Motion Analysis",
"Lenses",
"Moving Average Processes",
"Recurrent Neural Nets",
"Smoothing Methods",
"Video Signal Processing",
"Online Lens Motion Smoothing",
"Video Autofocus",
"Camera Lens",
"Scene Content",
"Single Image Capture",
"Image Sharpness",
"Captured Video",
"Smooth Lens Movement",
"Sharp Scene Content",
"Smooth Lens Trajectories",
"Simple Weighted Moving Average Method",
"Lens Movements",
"Bidirectional Long Short Term Memory",
"Lenses",
"Smoothing Methods",
"Clocks",
"Face",
"Cameras",
"Trajectory",
"Image Capture"
],
"authors": [
{
"affiliation": "York University,Toronto",
"fullName": "Abdullah Abuolaim",
"givenName": "Abdullah",
"surname": "Abuolaim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "York University,Toronto",
"fullName": "Michael S. Brown",
"givenName": "Michael S.",
"surname": "Brown",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "wacv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "147-155",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6553-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09093643",
"articleId": "1jPbwJf3Ta0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09093366",
"articleId": "1jPbu1QgJb2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bigcomp/2016/8796/0/07425799",
"title": "Recursive motion smoothing for online video stabilization in wide-area surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2016/07425799/12OmNqAU6na",
"parentPublication": {
"id": "proceedings/bigcomp/2016/8796/0",
"title": "2016 International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2011/707/0/05753127",
"title": "Motion invariance and custom blur from lens motion",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2011/05753127/12OmNqIzh8Z",
"parentPublication": {
"id": "proceedings/iccp/2011/707/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559009",
"title": "Image destabilization: Programmable defocus using lens and sensor motion",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559009/12OmNvxsSTw",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2014/5172/0/06897146",
"title": "Using WebGL to implement a glass lens in Online Labs",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2014/06897146/12OmNwDSdlO",
"parentPublication": {
"id": "proceedings/ic3/2014/5172/0",
"title": "2014 Seventh International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2017/6724/0/07926545",
"title": "Bezier Curve-Based Smoothing for Path Planner with Curvature Constraint",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2017/07926545/12OmNxFJXCN",
"parentPublication": {
"id": "proceedings/irc/2017/6724/0",
"title": "2017 First IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2006/1/1/01657016",
"title": "Lens Aberration Aware Timing-Driven Placement",
"doi": null,
"abstractUrl": "/proceedings-article/date/2006/01657016/12OmNxZ2Glb",
"parentPublication": {
"id": "proceedings/date/2006/1/1",
"title": "2006 Design, Automation and Test in Europe",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d209",
"title": "Adaptive Exponential Smoothing for Online Filtering of Pixel Prediction Maps",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d209/12OmNyQ7FHa",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2010/7846/0/05571184",
"title": "Hand Motion Recognition and Visualisation for Direct Sign Writing",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2010/05571184/12OmNzahbU7",
"parentPublication": {
"id": "proceedings/iv/2010/7846/0",
"title": "2010 14th International Conference Information Visualisation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200m2106",
"title": "Video Geo-Localization Employing Geo-Temporal Feature Learning and GPS Trajectory Smoothing",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200m2106/1BmLrWSqWze",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fas*w/2019/2406/0/240600a080",
"title": "Learning and Sharing for Improved k-Coverage in Smart Camera Networks",
"doi": null,
"abstractUrl": "/proceedings-article/fas*w/2019/240600a080/1ckrvvtgRz2",
"parentPublication": {
"id": "proceedings/fas*w/2019/2406/0",
"title": "2019 IEEE 4th International Workshops on Foundations and Applications of Self* Systems (FAS*W)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1r5431kxiAE",
"title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)",
"acronym": "trustcom",
"groupId": "1800729",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1r547dOl85i",
"doi": "10.1109/TrustCom50675.2020.00172",
"title": "Explicitly Privacy-Aware Space Usage Analysis",
"normalizedTitle": "Explicitly Privacy-Aware Space Usage Analysis",
"abstract": "Surveillance in private and public spaces provides observers with information that can enhance protection and efficiency but usually infringes upon the privacy of the individuals and groups. These informational privacy risks are centered on users' perceived and design-induced threats. They cannot be removed completely but can be minimized using suitable anonymization techniques. To minimize the users' informational privacy threats, we designed a privacy-aware surveillance system that gives the users leverage over the anonymization filters, to physically adjust the opaqueness of the camera lens used in the prototype according to their privacy requirements. We implement our prototype in the context of office space surveillance, where the proposed solution considers privacy requirements in such environments to improve users' trust in the surveillance system and reduce their privacy concerns.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Surveillance in private and public spaces provides observers with information that can enhance protection and efficiency but usually infringes upon the privacy of the individuals and groups. These informational privacy risks are centered on users' perceived and design-induced threats. They cannot be removed completely but can be minimized using suitable anonymization techniques. To minimize the users' informational privacy threats, we designed a privacy-aware surveillance system that gives the users leverage over the anonymization filters, to physically adjust the opaqueness of the camera lens used in the prototype according to their privacy requirements. We implement our prototype in the context of office space surveillance, where the proposed solution considers privacy requirements in such environments to improve users' trust in the surveillance system and reduce their privacy concerns.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Surveillance in private and public spaces provides observers with information that can enhance protection and efficiency but usually infringes upon the privacy of the individuals and groups. These informational privacy risks are centered on users' perceived and design-induced threats. They cannot be removed completely but can be minimized using suitable anonymization techniques. To minimize the users' informational privacy threats, we designed a privacy-aware surveillance system that gives the users leverage over the anonymization filters, to physically adjust the opaqueness of the camera lens used in the prototype according to their privacy requirements. We implement our prototype in the context of office space surveillance, where the proposed solution considers privacy requirements in such environments to improve users' trust in the surveillance system and reduce their privacy concerns.",
"fno": "438000b285",
"keywords": [
"Data Privacy",
"Surveillance",
"Privacy Aware Space Usage Analysis",
"Private Spaces",
"Public Spaces",
"Informational Privacy Risks",
"Design Induced Threats",
"Suitable Anonymization Techniques",
"Privacy Aware Surveillance System",
"Anonymization Filters",
"Privacy Requirements",
"Office Space Surveillance",
"Privacy Concerns",
"Privacy",
"Surveillance",
"Prototypes",
"Observers",
"Cameras",
"Security",
"Lenses",
"Privacy",
"Perceived Privacy",
"Surveillance",
"Camera Based",
"Office Space",
"Privacy Aware"
],
"authors": [
{
"affiliation": "Institute of Computer Science, University of St. Gallen, Switzerland",
"fullName": "Sanjiv S. Jha",
"givenName": "Sanjiv S.",
"surname": "Jha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Computer Science, University of St. Gallen, Switzerland",
"fullName": "Simon Mayer",
"givenName": "Simon",
"surname": "Mayer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technology Studies, University of St. Gallen, Switzerland",
"fullName": "Tanja Schneider",
"givenName": "Tanja",
"surname": "Schneider",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "trustcom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "1285-1290",
"year": "2020",
"issn": null,
"isbn": "978-1-6654-0392-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "438000b276",
"articleId": "1r545I4aCIg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "438000b291",
"articleId": "1r54fWnYmiY",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iotdi/2018/6312/0/631201a296",
"title": "Poster Abstract: Preserving IoT Privacy in Sharing Economy Via Smart Contract",
"doi": null,
"abstractUrl": "/proceedings-article/iotdi/2018/631201a296/12OmNAoUTud",
"parentPublication": {
"id": "proceedings/iotdi/2018/6312/0",
"title": "2018 IEEE/ACM Third International Conference on Internet-of-Things Design and Implementation (IoTDI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msn/2016/5696/0/07950226",
"title": "User-Demand-Oriented Privacy-Preservation in Video Delivering",
"doi": null,
"abstractUrl": "/proceedings-article/msn/2016/07950226/12OmNBqdrac",
"parentPublication": {
"id": "proceedings/msn/2016/5696/0",
"title": "2016 12th International Conference on Mobile Ad-Hoc and Sensor Networks (MSN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/06977427",
"title": "Anonymous Camera for Privacy Protection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/06977427/12OmNqFJhNU",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pst/2012/2323/0/20943000V02",
"title": "Privacy invasion in business environments",
"doi": null,
"abstractUrl": "/proceedings-article/pst/2012/20943000V02/12OmNrYlmL9",
"parentPublication": {
"id": "proceedings/pst/2012/2323/0",
"title": "2012 Tenth Annual International Conference on Privacy, Security and Trust",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssiri/2009/3758/0/3758a291",
"title": "Implementation of the Privacy Protection in Video Surveillance System",
"doi": null,
"abstractUrl": "/proceedings-article/ssiri/2009/3758a291/12OmNvDqsPd",
"parentPublication": {
"id": "proceedings/ssiri/2009/3758/0",
"title": "Secure System Integration and Reliability Improvement",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761616",
"title": "Context aware privacy in visual surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761616/12OmNwDACq3",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2017/2939/0/08078532",
"title": "PASS: Privacy aware secure signature scheme for surveillance systems",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2017/08078532/12OmNxFJXPA",
"parentPublication": {
"id": "proceedings/avss/2017/2939/0",
"title": "2017 14th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2013/0703/0/06636659",
"title": "Enforcing privacy through usage-controlled video surveillance",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2013/06636659/12OmNz61dFV",
"parentPublication": {
"id": "proceedings/avss/2013/0703/0",
"title": "2013 10th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2016/03/07181657",
"title": "Lossless ROI Privacy Protection of H.264/AVC Compressed Surveillance Videos",
"doi": null,
"abstractUrl": "/journal/ec/2016/03/07181657/13rRUNvgyZN",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2022/01/09035391",
"title": "Using Metrics Suites to Improve the Measurement of Privacy in Graphs",
"doi": null,
"abstractUrl": "/journal/tq/2022/01/09035391/1iaePndvbS8",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCcbEdf",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqzcvMI",
"doi": "10.1109/ACII.2017.8273647",
"title": "GIFGIF+: Collecting emotional animated GIFs with clustered multi-task learning",
"normalizedTitle": "GIFGIF+: Collecting emotional animated GIFs with clustered multi-task learning",
"abstract": "Animated GIFs are widely used on the Internet to express emotions, but their automatic analysis is largely unexplored. Existing GIF datasets with emotion labels are too small for training contemporary machine learning models, so we propose a semi-automatic method to collect emotional animated GIFs from the Internet with the least amount of human labor. The method trains weak emotion recognizers on labeled data, and uses them to sort a large quantity of unlabeled GIFs. We found that by exploiting the clustered structure of emotions, the number of GIFs a labeler needs to check can be greatly reduced. Using the proposed method, a dataset called GIFGIF+ with 23,544 GIFs over 17 emotions was created, which provides a promising platform for affective computing research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Animated GIFs are widely used on the Internet to express emotions, but their automatic analysis is largely unexplored. Existing GIF datasets with emotion labels are too small for training contemporary machine learning models, so we propose a semi-automatic method to collect emotional animated GIFs from the Internet with the least amount of human labor. The method trains weak emotion recognizers on labeled data, and uses them to sort a large quantity of unlabeled GIFs. We found that by exploiting the clustered structure of emotions, the number of GIFs a labeler needs to check can be greatly reduced. Using the proposed method, a dataset called GIFGIF+ with 23,544 GIFs over 17 emotions was created, which provides a promising platform for affective computing research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Animated GIFs are widely used on the Internet to express emotions, but their automatic analysis is largely unexplored. Existing GIF datasets with emotion labels are too small for training contemporary machine learning models, so we propose a semi-automatic method to collect emotional animated GIFs from the Internet with the least amount of human labor. The method trains weak emotion recognizers on labeled data, and uses them to sort a large quantity of unlabeled GIFs. We found that by exploiting the clustered structure of emotions, the number of GIFs a labeler needs to check can be greatly reduced. Using the proposed method, a dataset called GIFGIF+ with 23,544 GIFs over 17 emotions was created, which provides a promising platform for affective computing research.",
"fno": "08273647",
"keywords": [
"Computer Animation",
"Emotion Recognition",
"Learning Artificial Intelligence",
"Pattern Clustering",
"Emotional Animated GI Fs",
"Clustered Multitask Learning",
"GIF Datasets",
"Emotion Labels",
"Weak Emotion Recognizers",
"Unlabeled GI Fs",
"Emotion Expression",
"Contemporary Machine Learning Models",
"Affective Computing Research",
"GIFGIF Dataset",
"Emotion Recognition",
"Visualization",
"Videos",
"Internet",
"Standards",
"Affective Computing",
"Pipelines"
],
"authors": [
{
"affiliation": "Media Lab, Massachusetts Institute of Technology, Cambridge, MA, USA",
"fullName": "Weixuan Chen",
"givenName": "Weixuan",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media Lab, Massachusetts Institute of Technology, Cambridge, MA, USA",
"fullName": "Ognjen Oggi Rudovic",
"givenName": "Ognjen Oggi",
"surname": "Rudovic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Media Lab, Massachusetts Institute of Technology, Cambridge, MA, USA",
"fullName": "Rosalind W. Picard",
"givenName": "Rosalind W.",
"surname": "Picard",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "410-417",
"year": "2017",
"issn": "2156-8111",
"isbn": "978-1-5386-0563-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08273646",
"articleId": "12OmNCmGNZp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08273648",
"articleId": "12OmNzZEAqX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2009/3895/0/3895a699",
"title": "Joint Emotion-Topic Modeling for Social Affective Text Mining",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2009/3895a699/12OmNqBbHEo",
"parentPublication": {
"id": "proceedings/icdm/2009/3895/0",
"title": "2009 Ninth IEEE International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851b001",
"title": "Video2GIF: Automatic Generation of Animated GIFs from Video",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b001/12OmNwF0BHO",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/1/3336a459",
"title": "The Research on User Modeling for Personalized Affective Computing",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336a459/12OmNxFJXVm",
"parentPublication": {
"id": "proceedings/csse/2008/3336/1",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/incos/2011/4579/0/4579a068",
"title": "Endowing e-Learning Systems with Emotion Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/incos/2011/4579a068/12OmNzUPpdY",
"parentPublication": {
"id": "proceedings/incos/2011/4579/0",
"title": "Intelligent Networking and Collaborative Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a460",
"title": "Perception of Emotional Gaits Using Avatar Animation of Real and Artificially Synthesized Gaits",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a460/12OmNzWx07H",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2016/4571/0/4571a367",
"title": "Predicting Perceived Emotions in Animated GIFs with 3D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a367/12OmNzvhvv0",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2015/02/07029058",
"title": "Predicting Mood from Punctual Emotion Annotations on Videos",
"doi": null,
"abstractUrl": "/journal/ta/2015/02/07029058/13rRUB7a1e8",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/01/07374697",
"title": "MSP-IMPROV: An Acted Corpus of Dyadic Interactions to Study Emotion Perception",
"doi": null,
"abstractUrl": "/journal/ta/2017/01/07374697/13rRUxlgy25",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200b090",
"title": "Human-Centered Emotion Recognition in Animated GIFs",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200b090/1cdOVZXbdo4",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09612052",
"title": "Quantifying Emotional Similarity in Speech",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09612052/1yrD2kuc1oI",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBNM93Y",
"title": "Pervasive Computing, Signal Porcessing and Applications, International Conference on",
"acronym": "pcspa",
"groupId": "1800186",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyRg4zT",
"doi": "10.1109/PCSPA.2010.182",
"title": "A HMM-based Fuzzy Computing Model for Emotional Speech Recognition",
"normalizedTitle": "A HMM-based Fuzzy Computing Model for Emotional Speech Recognition",
"abstract": "Existing emotional speech recognition applications usually distinguish between a small number of emotions in speech. However this set of so called basic emotions in speech varies from one application to another depending on their according needs. In order to support such differing application needs an emotional speech model based on the fuzzy emotion hypercube is presented. In addition to existing models it supports also the recognition of derived emotions which are combinations of basic emotions in speech. We show the application of this model by a prosody based Hidden Markov Models(HMM). The approach is based on standard speech recognition technology using hidden semi-continuous Markov models. Both the selection of features and the design of the recognition system are addressed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing emotional speech recognition applications usually distinguish between a small number of emotions in speech. However this set of so called basic emotions in speech varies from one application to another depending on their according needs. In order to support such differing application needs an emotional speech model based on the fuzzy emotion hypercube is presented. In addition to existing models it supports also the recognition of derived emotions which are combinations of basic emotions in speech. We show the application of this model by a prosody based Hidden Markov Models(HMM). The approach is based on standard speech recognition technology using hidden semi-continuous Markov models. Both the selection of features and the design of the recognition system are addressed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing emotional speech recognition applications usually distinguish between a small number of emotions in speech. However this set of so called basic emotions in speech varies from one application to another depending on their according needs. In order to support such differing application needs an emotional speech model based on the fuzzy emotion hypercube is presented. In addition to existing models it supports also the recognition of derived emotions which are combinations of basic emotions in speech. We show the application of this model by a prosody based Hidden Markov Models(HMM). The approach is based on standard speech recognition technology using hidden semi-continuous Markov models. Both the selection of features and the design of the recognition system are addressed.",
"fno": "4180a731",
"keywords": [
"Hidden Markov Models HMM",
"Fuzzy Emotion Model",
"Emotion Computing",
"Emotional Speech Recognition"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuqiang Qin",
"givenName": "Yuqiang",
"surname": "Qin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Xueying Zhang",
"givenName": "Xueying",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hui Ying",
"givenName": "Hui",
"surname": "Ying",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "pcspa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-09-01T00:00:00",
"pubType": "proceedings",
"pages": "731-734",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4180-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4180a727",
"articleId": "12OmNzBOhKp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4180a735",
"articleId": "12OmNwF0C6N",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/csie/2009/3507/7/3507g225",
"title": "Multi-level Speech Emotion Recognition Based on HMM and ANN",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507g225/12OmNBBQZpU",
"parentPublication": {
"id": "proceedings/csie/2009/3507/7",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kse/2012/4760/0/4760a151",
"title": "A Study on Prosody of Vietnamese Emotional Speech",
"doi": null,
"abstractUrl": "/proceedings-article/kse/2012/4760a151/12OmNBiPRBv",
"parentPublication": {
"id": "proceedings/kse/2012/4760/0",
"title": "Knowledge and Systems Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/1/3583a418",
"title": "A Study on Emotional Feature Analysis and Recognition in Speech Signal",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583a418/12OmNBqMDgm",
"parentPublication": {
"id": "proceedings/icmtma/2009/3583/3",
"title": "2009 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rvsp/2013/3184/0/3184a253",
"title": "Building a Recognition System of Speech Emotion and Emotional States",
"doi": null,
"abstractUrl": "/proceedings-article/rvsp/2013/3184a253/12OmNrY3LDM",
"parentPublication": {
"id": "proceedings/rvsp/2013/3184/0",
"title": "2013 Second International Conference on Robot, Vision and Signal Processing (RVSP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/7/3507g243",
"title": "HMM-Based Uyghur Continuous Speech Recognition System",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507g243/12OmNviZljM",
"parentPublication": {
"id": "proceedings/csie/2009/3507/7",
"title": "Computer Science and Information Engineering, World Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ems/2016/4971/0/07920226",
"title": "Recognizing Emotional State Changes Using Speech Processing",
"doi": null,
"abstractUrl": "/proceedings-article/ems/2016/07920226/12OmNxEjY1T",
"parentPublication": {
"id": "proceedings/ems/2016/4971/0",
"title": "2016 European Modelling Symposium (EMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2010/3962/3/3962e449",
"title": "Speech Recognition System Based on Integrating Feature and HMM",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2010/3962e449/12OmNxb5hyf",
"parentPublication": {
"id": "proceedings/icmtma/2010/3962/3",
"title": "2010 International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2005/9385/0/01577304",
"title": "Features extraction and selection for emotional speech classification",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2005/01577304/12OmNy2rRSB",
"parentPublication": {
"id": "proceedings/avss/2005/9385/0",
"title": "IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a107",
"title": "Creating Emotional Speech for Conversational Agents",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a107/12OmNya72wB",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/01/08859243",
"title": "MES-P: An Emotional Tonal Speech Dataset in Mandarin with Distal and Proximal Labels",
"doi": null,
"abstractUrl": "/journal/ta/2022/01/08859243/1dR0QzhXnIA",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKir6",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VW8bql",
"doi": "10.1109/ICPR.2018.8545211",
"title": "Self-Talk: Responses to Users' Opinions and Challenges in Human Computer Dialog",
"normalizedTitle": "Self-Talk: Responses to Users' Opinions and Challenges in Human Computer Dialog",
"abstract": "People like to be, or partly, encouraged when their opinions or challenges are supported by listeners, even the listeners are robots. Encouraging responses from the robot which seem to get users' points potentially improve users' feeling in human computer dialog. According to this hypothesis, this paper proposes a method to generate supporting responses to users' opinions or challenges. The core ideas and contributions of the proposed method are: (1) multiple search engines cooperate, and (2) each engine random asks itself or ask another one to obtain more related information from the internet in multiple turns; then (3) final responses are abstracted from the answers. We call these three steps as Self-Talk. The comparisons between Self-Talk and several commercial open speech assistants show that the proposed method does generate suitable answers to users when they present their opinions or challenges in dialog. The hypothesis is positively evaluated that encouraging responses could improve users' chat feeling.",
"abstracts": [
{
"abstractType": "Regular",
"content": "People like to be, or partly, encouraged when their opinions or challenges are supported by listeners, even the listeners are robots. Encouraging responses from the robot which seem to get users' points potentially improve users' feeling in human computer dialog. According to this hypothesis, this paper proposes a method to generate supporting responses to users' opinions or challenges. The core ideas and contributions of the proposed method are: (1) multiple search engines cooperate, and (2) each engine random asks itself or ask another one to obtain more related information from the internet in multiple turns; then (3) final responses are abstracted from the answers. We call these three steps as Self-Talk. The comparisons between Self-Talk and several commercial open speech assistants show that the proposed method does generate suitable answers to users when they present their opinions or challenges in dialog. The hypothesis is positively evaluated that encouraging responses could improve users' chat feeling.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "People like to be, or partly, encouraged when their opinions or challenges are supported by listeners, even the listeners are robots. Encouraging responses from the robot which seem to get users' points potentially improve users' feeling in human computer dialog. According to this hypothesis, this paper proposes a method to generate supporting responses to users' opinions or challenges. The core ideas and contributions of the proposed method are: (1) multiple search engines cooperate, and (2) each engine random asks itself or ask another one to obtain more related information from the internet in multiple turns; then (3) final responses are abstracted from the answers. We call these three steps as Self-Talk. The comparisons between Self-Talk and several commercial open speech assistants show that the proposed method does generate suitable answers to users when they present their opinions or challenges in dialog. The hypothesis is positively evaluated that encouraging responses could improve users' chat feeling.",
"fno": "08545211",
"keywords": [
"History",
"Robots",
"Databases",
"Predictive Models",
"Pattern Recognition",
"Automation",
"Search Engines",
"Human Computer Dialog",
"Dialog Management DM",
"Viewpoint Expression",
"Abstract Extraction"
],
"authors": [
{
"affiliation": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, 100190, China",
"fullName": "Minghao Yang",
"givenName": "Minghao",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, 100190, China",
"fullName": "Na Sheng Ruo Yang",
"givenName": "Na Sheng Ruo",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Guilin University of Electronic Technology, China",
"fullName": "Ke Zhang",
"givenName": "Ke",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Laboratory of Pattern Recognition (NLPR), Institute of Automation, Chinese Academy of Sciences, 100190, China",
"fullName": "Jianhua Tao",
"givenName": "Jianhua",
"surname": "Tao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2839-2844",
"year": "2018",
"issn": "1051-4651",
"isbn": "978-1-5386-3788-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08545441",
"articleId": "17D45WHONrb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08546328",
"articleId": "17D45WnnFUq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457b080",
"title": "Visual Dialog",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b080/12OmNy4IF6s",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ci/2014/02/06661381",
"title": "Designing user-character dialog in interactive narratives: an exploratory experiment",
"doi": null,
"abstractUrl": "/journal/ci/2014/02/06661381/13rRUB7a1ip",
"parentPublication": {
"id": "trans/ci",
"title": "IEEE Transactions on Computational Intelligence and AI in Games",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/05/08341743",
"title": "Visual Dialog",
"doi": null,
"abstractUrl": "/journal/tp/2019/05/08341743/13rRUxjQyqq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g106",
"title": "Are You Talking to Me? Reasoned Visual Dialog Generation Through Adversarial Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g106/17D45Wc1IKc",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/03/08643972",
"title": "Automatic Mining of Opinions Expressed About APIs in Stack Overflow",
"doi": null,
"abstractUrl": "/journal/ts/2021/03/08643972/17PYEjG6pmZ",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200n3779",
"title": "Talk-to-Edit: Fine-Grained Facial Editing via Dialog",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200n3779/1BmI0wyVVrW",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a039",
"title": "Using the Scatter of Opinions to Predict Responses to Tweets",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a039/1GU6XLk51sY",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2020/9360/0/09150718",
"title": "Interactive Video Retrieval with Dialog",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2020/09150718/1lPHncEP756",
"parentPublication": {
"id": "proceedings/cvprw/2020/9360/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0727",
"title": "Vision-Dialog Navigation by Exploring Cross-Modal Memory",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0727/1m3ooaBZ0k0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2021/8899/0/889900a040",
"title": "Predictable and Adaptive Goal-oriented Dialog Policy Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2021/889900a040/1rFzWLFGqxW",
"parentPublication": {
"id": "proceedings/icsc/2021/8899/0",
"title": "2021 IEEE 15th International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JvaJJgvZ5K",
"title": "2022 IEEE Eighth International Conference on Multimedia Big Data (BigMM)",
"acronym": "bigmm",
"groupId": "9999062",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JvaL44yMIE",
"doi": "10.1109/BigMM55396.2022.00027",
"title": "Behavioral Analysis for User Satisfaction",
"normalizedTitle": "Behavioral Analysis for User Satisfaction",
"abstract": "Today's Web aims to increase engagement with the user, his or her emotions and behaviors. Contemplating the emotional sphere is potentially very useful when it comes to Web sites: understanding how the user feels can help the service provider understand his or her needs and improve the service he or she offers. This project aims to devise a behavioral feature extraction system to create an interactive system that can provide a personalized experience and maximize user satisfaction. The system contemplates behavioral analysis techniques using Machine Learning, Recommender System and Emotion Recognition algorithms. In the experimental phase, users tested the system through Expressing Mixed Emotions, SUS and SUPR-Q tasks and questionnaires with the aim of evaluating the User Experience and The Usability of the system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Today's Web aims to increase engagement with the user, his or her emotions and behaviors. Contemplating the emotional sphere is potentially very useful when it comes to Web sites: understanding how the user feels can help the service provider understand his or her needs and improve the service he or she offers. This project aims to devise a behavioral feature extraction system to create an interactive system that can provide a personalized experience and maximize user satisfaction. The system contemplates behavioral analysis techniques using Machine Learning, Recommender System and Emotion Recognition algorithms. In the experimental phase, users tested the system through Expressing Mixed Emotions, SUS and SUPR-Q tasks and questionnaires with the aim of evaluating the User Experience and The Usability of the system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Today's Web aims to increase engagement with the user, his or her emotions and behaviors. Contemplating the emotional sphere is potentially very useful when it comes to Web sites: understanding how the user feels can help the service provider understand his or her needs and improve the service he or she offers. This project aims to devise a behavioral feature extraction system to create an interactive system that can provide a personalized experience and maximize user satisfaction. The system contemplates behavioral analysis techniques using Machine Learning, Recommender System and Emotion Recognition algorithms. In the experimental phase, users tested the system through Expressing Mixed Emotions, SUS and SUPR-Q tasks and questionnaires with the aim of evaluating the User Experience and The Usability of the system.",
"fno": "596300a113",
"keywords": [
"Behavioural Sciences Computing",
"Emotion Recognition",
"Feature Extraction",
"Learning Artificial Intelligence",
"Recommender Systems",
"Web Sites",
"Behavioral Analysis Techniques",
"Behavioral Feature Extraction System",
"Emotion Recognition Algorithms",
"Emotional Sphere",
"Expressing Mixed Emotions",
"Interactive System",
"Machine Learning",
"Personalized Experience",
"Recommender System",
"Service Provider",
"User Experience",
"User Satisfaction",
"Web Sites",
"Navigation",
"Web Pages",
"Motion Pictures",
"User Experience",
"Behavioral Sciences",
"Web Sites",
"Reliability",
"Behavioral Analysis",
"Recommender Systems",
"User Profiling",
"Emotion Recognition"
],
"authors": [
{
"affiliation": "Key4 srl,Monopoli,(BA),ITALY,70043",
"fullName": "Michela Chimienti",
"givenName": "Michela",
"surname": "Chimienti",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Operator srl,Villabella,ITALY,37047",
"fullName": "Ivan Danzi",
"givenName": "Ivan",
"surname": "Danzi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Digital Innovation srl,Bari,ITALY,70125",
"fullName": "Vincenzo Gattulli",
"givenName": "Vincenzo",
"surname": "Gattulli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Bari,Dipartimento di Informatica,Bari,ITALY,70125",
"fullName": "Donato Impedovo",
"givenName": "Donato",
"surname": "Impedovo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Università degli Studi di Bari,Dipartimento di Informatica,Bari,ITALY,70125",
"fullName": "Giuseppe Pirlo",
"givenName": "Giuseppe",
"surname": "Pirlo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Digital Innovation srl,Bari,ITALY,70125",
"fullName": "Davide Veneto",
"givenName": "Davide",
"surname": "Veneto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bigmm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "113-119",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5963-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "596300a111",
"articleId": "1JvaN1gZJ8A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "596300a120",
"articleId": "1JvaKIGoR1e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wse/2001/1399/0/00988785",
"title": "Improving Web accessibility for visually handicapped people using KAI",
"doi": null,
"abstractUrl": "/proceedings-article/wse/2001/00988785/12OmNAXxXgO",
"parentPublication": {
"id": "proceedings/wse/2001/1399/0",
"title": "Proceedings 3rd International Workshop on Web Site Evolution. WSE 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2016/4470/0/4470a399",
"title": "Modeling User Expectations & Satisfaction for SaaS Applications Using Multi-agent Negotiation",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2016/4470a399/12OmNAkWvig",
"parentPublication": {
"id": "proceedings/wi/2016/4470/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccicc/2014/6081/0/06921514",
"title": "The effects of menu design on users' emotions, search performance and mouse behaviour",
"doi": null,
"abstractUrl": "/proceedings-article/iccicc/2014/06921514/12OmNqEjhWP",
"parentPublication": {
"id": "proceedings/iccicc/2014/6081/0",
"title": "2014 IEEE 13th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/skg/2013/3012/0/3012a030",
"title": "Semantic Analysis for Keywords Based User Segmentation from Internet Data",
"doi": null,
"abstractUrl": "/proceedings-article/skg/2013/3012a030/12OmNzVGcPf",
"parentPublication": {
"id": "proceedings/skg/2013/3012/0",
"title": "2013 Ninth International Conference on Semantics, Knowledge and Grids (SKG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wisa/2015/9371/0/07396605",
"title": "Social Emotion Analysis System for Online News",
"doi": null,
"abstractUrl": "/proceedings-article/wisa/2015/07396605/12OmNzwZ6lN",
"parentPublication": {
"id": "proceedings/wisa/2015/9371/0",
"title": "2015 12th Web Information System and Application Conference (WISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2016/09/07464876",
"title": "Microblogging Content Propagation Modeling Using Topic-Specific Behavioral Factors",
"doi": null,
"abstractUrl": "/journal/tk/2016/09/07464876/13rRUxASuc0",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/skg/2005/2534/0/04125887",
"title": "User XQuery Pattern Method based Personalization Recommender Service",
"doi": null,
"abstractUrl": "/proceedings-article/skg/2005/04125887/17D45WrVgaU",
"parentPublication": {
"id": "proceedings/skg/2005/2534/0",
"title": "Semantics, Knowledge and Grid, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a649",
"title": "Dimension Reduction Method by Principal Component Analysis in the Prediction of Final User Satisfaction",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a649/1GU6WUiMu5O",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873971",
"title": "Effects of Small Talk With a Crowd of Virtual Humans on Users' Emotional and Behavioral Responses",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873971/1GjwLJt4CaI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2021/4065/0/406500a246",
"title": "How Artificial Intelligence can be used for Behavioral Identification?",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2021/406500a246/1yBF7wECfSw",
"parentPublication": {
"id": "proceedings/cw/2021/4065/0",
"title": "2021 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1KmF7rVz6Y8",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KmFfqArfGM",
"doi": "10.1109/AIVR56993.2022.00027",
"title": "The Design and Development of a Goal-Oriented Framework for Emotional Virtual Humans",
"normalizedTitle": "The Design and Development of a Goal-Oriented Framework for Emotional Virtual Humans",
"abstract": "Affective interaction with virtual humans can enhance the quality of user experience in virtual reality. It takes place through various considerations such as emotional representation in their behavioral patterns, facial expressions, head pose, body stance, and so on. Deciding on the emotional state of a virtual human at a moment, however, is still a challenge. Computational models of emotion, stemming from appraisal theories, are suggested for modeling emotion in virtual agents. Despite their competence in extracting emotion from appraisal values, they are poorly defined in describing how to assess appraisal values within a sense-think-act behavior model. Motivated by this lack of empirical knowledge on the appraisal stage, in this preliminary work, we propose a framework to bridge the gap between a computational model of emotion and a behavior model of virtual humans. To this end, we use a need-based, goal- oriented, autonomous behavior model to generate salient stimuli for eliciting emotions. Our simulation of a case study suggests that the proposed framework can produce sensible emotional states that conform to the essential principles of appraisal-based emotion theories.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Affective interaction with virtual humans can enhance the quality of user experience in virtual reality. It takes place through various considerations such as emotional representation in their behavioral patterns, facial expressions, head pose, body stance, and so on. Deciding on the emotional state of a virtual human at a moment, however, is still a challenge. Computational models of emotion, stemming from appraisal theories, are suggested for modeling emotion in virtual agents. Despite their competence in extracting emotion from appraisal values, they are poorly defined in describing how to assess appraisal values within a sense-think-act behavior model. Motivated by this lack of empirical knowledge on the appraisal stage, in this preliminary work, we propose a framework to bridge the gap between a computational model of emotion and a behavior model of virtual humans. To this end, we use a need-based, goal- oriented, autonomous behavior model to generate salient stimuli for eliciting emotions. Our simulation of a case study suggests that the proposed framework can produce sensible emotional states that conform to the essential principles of appraisal-based emotion theories.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Affective interaction with virtual humans can enhance the quality of user experience in virtual reality. It takes place through various considerations such as emotional representation in their behavioral patterns, facial expressions, head pose, body stance, and so on. Deciding on the emotional state of a virtual human at a moment, however, is still a challenge. Computational models of emotion, stemming from appraisal theories, are suggested for modeling emotion in virtual agents. Despite their competence in extracting emotion from appraisal values, they are poorly defined in describing how to assess appraisal values within a sense-think-act behavior model. Motivated by this lack of empirical knowledge on the appraisal stage, in this preliminary work, we propose a framework to bridge the gap between a computational model of emotion and a behavior model of virtual humans. To this end, we use a need-based, goal- oriented, autonomous behavior model to generate salient stimuli for eliciting emotions. Our simulation of a case study suggests that the proposed framework can produce sensible emotional states that conform to the essential principles of appraisal-based emotion theories.",
"fno": "572500a135",
"keywords": [
"Emotion Recognition",
"Human Computer Interaction",
"Psychology",
"Virtual Reality",
"Appraisal Stage",
"Appraisal Theories",
"Appraisal Values",
"Appraisal Based Emotion Theories",
"Autonomous Behavior Model",
"Behavioral Patterns",
"Eliciting Emotions",
"Emotional Representation",
"Emotional State",
"Emotional Virtual Humans",
"Extracting Emotion",
"Goal Oriented Framework",
"Modeling Emotion",
"Sense Think Act Behavior Model",
"Sensible Emotional States",
"Virtual Agents",
"Virtual Human",
"Virtual Reality",
"Solid Modeling",
"Emotion Recognition",
"Computational Modeling",
"Virtual Reality",
"User Experience",
"Appraisal",
"Behavioral Sciences",
"Virtual Humans",
"Emotion Generation",
"Appraisal Theory",
"Behavior Modeling"
],
"authors": [
{
"affiliation": "La Trobe University,Department of Computer Science and Information Technology,Melbourne,Australia",
"fullName": "Samad Roohi",
"givenName": "Samad",
"surname": "Roohi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "La Trobe University,Department of Computer Science and Information Technology,Melbourne,Australia",
"fullName": "Richard Skarbez",
"givenName": "Richard",
"surname": "Skarbez",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-12-01T00:00:00",
"pubType": "proceedings",
"pages": "135-139",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5725-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "572500a130",
"articleId": "1KmF9OL0xxe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "572500a140",
"articleId": "1KmF8k8WXi8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2009/4800/0/05349337",
"title": "Same or different? Recollection of or empathizing with an emotional event from the perspective of appraisal models",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349337/12OmNBUAvZV",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisis/2012/4687/0/4687a899",
"title": "Spatial Presence in Virtual Worlds as a Perceptual Emotion: An Expansion on Cognitive Feeling?",
"doi": null,
"abstractUrl": "/proceedings-article/cisis/2012/4687a899/12OmNwudQS8",
"parentPublication": {
"id": "proceedings/cisis/2012/4687/0",
"title": "2012 Sixth International Conference on Complex, Intelligent, and Software Intensive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taai/2011/4601/0/4601a025",
"title": "Learning Human Emotion Patterns for Modeling Virtual Humans",
"doi": null,
"abstractUrl": "/proceedings-article/taai/2011/4601a025/12OmNzdoMjx",
"parentPublication": {
"id": "proceedings/taai/2011/4601/0",
"title": "2011 International Conference on Technologies and Applications of Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/01/08432120",
"title": "Induction and Profiling of Strong Multi-Componential Emotions in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/ta/2021/01/08432120/13rRUwI5TW2",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2021/0021/0/09666329",
"title": "Visualization of social emotional appraisal process of an agent",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2021/09666329/1A3hR8fiO0o",
"parentPublication": {
"id": "proceedings/aciiw/2021/0021/0",
"title": "2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873971",
"title": "Effects of Small Talk With a Crowd of Virtual Humans on Users' Emotional and Behavioral Responses",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873971/1GjwLJt4CaI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925491",
"title": "Towards Understanding Emotional Experience in a Componential Framework",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925491/1fHGHEu1F7i",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09210819",
"title": "A Multi-Componential Approach to Emotion Recognition and the Effect of Personality",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09210819/1nzuXGYbAeA",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09423869",
"title": "Automatic Estimation of Action Unit Intensities and Inference of Emotional Appraisals",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09423869/1tmd1rxeT0A",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a183",
"title": "Understanding Emotional Expression with Haptic Feedback Vest Patterns and Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a183/1tnX9YpX3Nu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAhmufDz2",
"doi": "10.1109/VR50410.2021.00040",
"title": "Effects of Language Familiarity in Simulated Natural Dialogue with a Virtual Crowd of Digital Humans on Emotion Contagion in Virtual Reality",
"normalizedTitle": "Effects of Language Familiarity in Simulated Natural Dialogue with a Virtual Crowd of Digital Humans on Emotion Contagion in Virtual Reality",
"abstract": "This investigation compared the emotional impact caused by a crowd of affective virtual humans (VHs) that communicated in the users' native or foreign language. We evaluated the users' affective reactions to a crowd of VHs that exhibited distinct emotional expressions. A total of four emotions were presented, which were Positive, Negative, Neutral, and Mixed. The VHs performed verbal and non-behaviors accordingly. Under the Mixed condition, the VHs were divided into three groups equally and each group was uniquely assigned one of the three emotions (i.e., positive, negative, and neutral). Users collected ten items from a virtual reality market. To complete the tasks, they interacted using natural speech with the emotional VHs. Three language conditions were investigated: one condition in USA and another two conditions in Taiwan. The group of participants in USA interacted with the VHs in English; and the two groups of participants in Taiwan interacted with the VHs using a foreign (English) language and a native (Mandarin) language respectively. We discovered that the medium of communication or language familiarity had a strong influence on participants' emotional reactions. When participants interacted in a foreign language with VHs with a positive emotional disposition, we found their positive emotional reactions were subdued and negative reactions were elevated. However, this was not the case when participants interacted with VHs in their native language, as their emotional reactions were contingent on the emotional disposition of the VHs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This investigation compared the emotional impact caused by a crowd of affective virtual humans (VHs) that communicated in the users' native or foreign language. We evaluated the users' affective reactions to a crowd of VHs that exhibited distinct emotional expressions. A total of four emotions were presented, which were Positive, Negative, Neutral, and Mixed. The VHs performed verbal and non-behaviors accordingly. Under the Mixed condition, the VHs were divided into three groups equally and each group was uniquely assigned one of the three emotions (i.e., positive, negative, and neutral). Users collected ten items from a virtual reality market. To complete the tasks, they interacted using natural speech with the emotional VHs. Three language conditions were investigated: one condition in USA and another two conditions in Taiwan. The group of participants in USA interacted with the VHs in English; and the two groups of participants in Taiwan interacted with the VHs using a foreign (English) language and a native (Mandarin) language respectively. We discovered that the medium of communication or language familiarity had a strong influence on participants' emotional reactions. When participants interacted in a foreign language with VHs with a positive emotional disposition, we found their positive emotional reactions were subdued and negative reactions were elevated. However, this was not the case when participants interacted with VHs in their native language, as their emotional reactions were contingent on the emotional disposition of the VHs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This investigation compared the emotional impact caused by a crowd of affective virtual humans (VHs) that communicated in the users' native or foreign language. We evaluated the users' affective reactions to a crowd of VHs that exhibited distinct emotional expressions. A total of four emotions were presented, which were Positive, Negative, Neutral, and Mixed. The VHs performed verbal and non-behaviors accordingly. Under the Mixed condition, the VHs were divided into three groups equally and each group was uniquely assigned one of the three emotions (i.e., positive, negative, and neutral). Users collected ten items from a virtual reality market. To complete the tasks, they interacted using natural speech with the emotional VHs. Three language conditions were investigated: one condition in USA and another two conditions in Taiwan. The group of participants in USA interacted with the VHs in English; and the two groups of participants in Taiwan interacted with the VHs using a foreign (English) language and a native (Mandarin) language respectively. We discovered that the medium of communication or language familiarity had a strong influence on participants' emotional reactions. When participants interacted in a foreign language with VHs with a positive emotional disposition, we found their positive emotional reactions were subdued and negative reactions were elevated. However, this was not the case when participants interacted with VHs in their native language, as their emotional reactions were contingent on the emotional disposition of the VHs.",
"fno": "255600a188",
"keywords": [
"Emotion Recognition",
"Linguistics",
"Natural Language Processing",
"Virtual Reality",
"Foreign Language",
"Native Language",
"Language Familiarity",
"Positive Emotional Disposition",
"Positive Emotional Reactions",
"Virtual Crowd",
"Emotion Contagion",
"Emotional Impact",
"Virtual Humans",
"Virtual Reality Market",
"Language Conditions",
"Learning Systems",
"Visualization",
"Solid Modeling",
"Three Dimensional Displays",
"Natural Languages",
"Virtual Reality",
"Writing",
"Virtual Humans",
"Virtual Reality",
"Virtual Crowds",
"Emotional Contagion"
],
"authors": [
{
"affiliation": "Clemson University",
"fullName": "Matias Volonte",
"givenName": "Matias",
"surname": "Volonte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chiao Tung University",
"fullName": "Chang-Chun Wang",
"givenName": "Chang-Chun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina Wilmington",
"fullName": "Elham Ebrahimi",
"givenName": "Elham",
"surname": "Ebrahimi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chiao Tung University",
"fullName": "Yu-Chun Hsu",
"givenName": "Yu-Chun",
"surname": "Hsu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chiao Tung University",
"fullName": "Kuan-Yu Liu",
"givenName": "Kuan-Yu",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chiao Tung University, National Yang-Ming Chiao Tung University",
"fullName": "Sai-Keung Wong",
"givenName": "Sai-Keung",
"surname": "Wong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Clemson University",
"fullName": "Sabarish V. Babu",
"givenName": "Sabarish V.",
"surname": "Babu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "188-197",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuAh3xnUc0",
"name": "pvr202118380-09417713s1-mm_255600a188.zip",
"size": "52.9 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417713s1-mm_255600a188.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600a179",
"articleId": "1tuB590ZFDO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a198",
"articleId": "1tuB40QFm92",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223346",
"title": "An evaluation of virtual human appearance fidelity on user's positive and negative affect in human-virtual human interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223346/12OmNvjyxwr",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811019",
"title": "Virtual Humans That Touch Back: Enhancing Nonverbal Communication with Virtual Humans through Bidirectional Touch",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811019/12OmNwMXnsz",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892289",
"title": "Coherence changes gaze behavior in virtual human interactions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892289/12OmNzSQdoG",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383334",
"title": "Effects of Virtual Human Appearance Fidelity on Emotion Contagion in Affective Inter-Personal Simulations",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383334/13rRUygBw7c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a330",
"title": "Investigating the Effects of Leading and Following Behaviors of Virtual Humans in Collaborative Fine Motor Tasks in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a330/1CJc2ZgMCFq",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a340",
"title": "An Evaluation of Native versus Foreign Communicative Interactions on Users’ Behavioral Reactions towards Affective Virtual Crowds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a340/1CJcm3krP2M",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873971",
"title": "Effects of Small Talk With a Crowd of Virtual Humans on Users' Emotional and Behavioral Responses",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873971/1GjwLJt4CaI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a135",
"title": "The Design and Development of a Goal-Oriented Framework for Emotional Virtual Humans",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a135/1KmFfqArfGM",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089573",
"title": "Effects of Interacting with a Crowd of Emotional Virtual Humans on Users’ Affective and Non-Verbal Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089573/1jIxfPwklig",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a316",
"title": "Virtual Morality: Using Virtual Reality to Study Moral Behavior in Extreme Accident Situations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a316/1tuB1QUgKJy",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1zL1CunfrGM",
"title": "2021 International Conference on Intelligent Computing, Automation and Applications (ICAA)",
"acronym": "icaa",
"groupId": "1842748",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zL1NFQqBP2",
"doi": "10.1109/ICAA53760.2021.00087",
"title": "Design and Implementation of a Chatbot System Integrated with Facial Expression Recognition",
"normalizedTitle": "Design and Implementation of a Chatbot System Integrated with Facial Expression Recognition",
"abstract": "Different from those who desires to talk, those “empty-nest youth” who do not take the initiative to talk about their emotions still want someone to be with them, and interacting with them, give them some care, although they cannot talk about their emotions. In order to pay attention to the mental health status of this group of people and meet their inner demands, this paper first uses facial expression recognition technology to analyze users' emotions in real time. Then employs chatbot technology to intervene the user's mood through automated responses. In the process of realizing the chatbot function, MongoDB is introduced to store the sayings of chatbots with different emotions, and seven chatbots with different emotions are divided. In addition, MongoDB is used to store the scores of users' emotions, which can achieve the function of recording users' emotions. Finally, Django framework is used to realize the page of the web side, which can call all the functions of the system and achieve the effect of different interventions according to the user's emotions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Different from those who desires to talk, those “empty-nest youth” who do not take the initiative to talk about their emotions still want someone to be with them, and interacting with them, give them some care, although they cannot talk about their emotions. In order to pay attention to the mental health status of this group of people and meet their inner demands, this paper first uses facial expression recognition technology to analyze users' emotions in real time. Then employs chatbot technology to intervene the user's mood through automated responses. In the process of realizing the chatbot function, MongoDB is introduced to store the sayings of chatbots with different emotions, and seven chatbots with different emotions are divided. In addition, MongoDB is used to store the scores of users' emotions, which can achieve the function of recording users' emotions. Finally, Django framework is used to realize the page of the web side, which can call all the functions of the system and achieve the effect of different interventions according to the user's emotions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Different from those who desires to talk, those “empty-nest youth” who do not take the initiative to talk about their emotions still want someone to be with them, and interacting with them, give them some care, although they cannot talk about their emotions. In order to pay attention to the mental health status of this group of people and meet their inner demands, this paper first uses facial expression recognition technology to analyze users' emotions in real time. Then employs chatbot technology to intervene the user's mood through automated responses. In the process of realizing the chatbot function, MongoDB is introduced to store the sayings of chatbots with different emotions, and seven chatbots with different emotions are divided. In addition, MongoDB is used to store the scores of users' emotions, which can achieve the function of recording users' emotions. Finally, Django framework is used to realize the page of the web side, which can call all the functions of the system and achieve the effect of different interventions according to the user's emotions.",
"fno": "373000a468",
"keywords": [
"Chatbots",
"Emotion Recognition",
"Face Recognition",
"Facial Expression Recognition",
"Chatbot Technology",
"Chatbot Function",
"Chatbot System",
"Empty Nest Youth",
"Mental Health Status",
"Django Framework",
"User Emotion",
"Mongo DB",
"Emotion Recognition",
"Automation",
"Mood",
"Face Recognition",
"Mental Health",
"Chatbots",
"Real Time Systems"
],
"authors": [
{
"affiliation": "Wuhan Business University,Hanyang District Wuhan,Hubei Province,China,430056",
"fullName": "Sijing Hu",
"givenName": "Sijing",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wuhan Business University,Hanyang District Wuhan,Hubei Province,China,430056",
"fullName": "Chengzhang Qu",
"givenName": "Chengzhang",
"surname": "Qu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icaa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "468-473",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3730-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "373000a451",
"articleId": "1zL1SIZogxO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "373000a474",
"articleId": "1zL1FYOFJ0Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icore/2021/0210/0/021000a157",
"title": "Framework for the mobile and web development of NU Guidance Service System (NUGSS)",
"doi": null,
"abstractUrl": "/proceedings-article/icore/2021/021000a157/1Aqytda4Mh2",
"parentPublication": {
"id": "proceedings/icore/2021/0210/0",
"title": "2021 1st International Conference in Information and Computing Research (iCORE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icim/2022/5174/0/517400a104",
"title": "Improving Consumers Continuous Usage of Chatbots: The Perspective of Multiple Reference Effects",
"doi": null,
"abstractUrl": "/proceedings-article/icim/2022/517400a104/1FHqDOXWsxO",
"parentPublication": {
"id": "proceedings/icim/2022/5174/0",
"title": "2022 8th International Conference on Information Management (ICIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2022/8810/0/881000a486",
"title": "Talk to Your Data: a Chatbot System for Multidimensional Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2022/881000a486/1FJ5o3IoUZW",
"parentPublication": {
"id": "proceedings/compsac/2022/8810/0",
"title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icfeict/2022/5476/0/547600a610",
"title": "Acceptance of Chatbot based on Emotional Intelligence through Machine Learning Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icfeict/2022/547600a610/1IFK423OuFW",
"parentPublication": {
"id": "proceedings/icfeict/2022/5476/0",
"title": "2022 2nd International Conference on Frontiers of Electronics, Information and Computation Technologies (ICFEICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ica/2022/6936/0/693600a030",
"title": "Intelligent Agents in Educational Institutions: NEdBOT - NLP-based Chatbot for Administrative Support Using DialogFlow",
"doi": null,
"abstractUrl": "/proceedings-article/ica/2022/693600a030/1JvaHDp1YzK",
"parentPublication": {
"id": "proceedings/ica/2022/6936/0",
"title": "2022 IEEE International Conference on Agents (ICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/assic/2022/6109/0/10088307",
"title": "Psychological Advisor Chatbot",
"doi": null,
"abstractUrl": "/proceedings-article/assic/2022/10088307/1M4rIMYzOp2",
"parentPublication": {
"id": "proceedings/assic/2022/6109/0",
"title": "2022 International Conference on Advancements in Smart, Secure and Intelligent Computing (ASSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2022/5490/0/10086016",
"title": "A chat with Dr. Jekyll and Mr. Hyde - Intent in chatbot communication",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2022/10086016/1M664YswrCw",
"parentPublication": {
"id": "proceedings/aciiw/2022/5490/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/nana/2020/8954/0/895400a174",
"title": "Study on Learner's Facial Expression Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/nana/2020/895400a174/1rlFbCTnXoc",
"parentPublication": {
"id": "proceedings/nana/2020/8954/0",
"title": "2020 International Conference on Networking and Network Applications (NaNA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vl-hcc/2021/4592/0/09576440",
"title": "ChatrEx: Designing Explainable Chatbot Interfaces for Enhancing Usefulness, Transparency, and Trust",
"doi": null,
"abstractUrl": "/proceedings-article/vl-hcc/2021/09576440/1y63t6uoDGo",
"parentPublication": {
"id": "proceedings/vl-hcc/2021/4592/0",
"title": "2021 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2021/2744/0/09631395",
"title": "Insights and lessons learned from trialling a mental health chatbot in the wild",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2021/09631395/1zmvM3neCpW",
"parentPublication": {
"id": "proceedings/iscc/2021/2744/0",
"title": "2021 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxG1ySA",
"doi": "10.1109/VR.2017.7892373",
"title": "Application of redirected walking in room-scale VR",
"normalizedTitle": "Application of redirected walking in room-scale VR",
"abstract": "Redirected walking (RDW) promises to allow near-natural walking in an infinitely large virtual environment (VE) by subtle manipulations of the virtual camera. Previous experiments showed that a physical radius of at least 22 meters is required for undetectable RDW. However, we found that it is possible to decrease this radius and to apply RDW to room-scale VR, i. e., up to approximately 5m × 5m. This is done by using curved paths in the Ve instead of straight paths, and by coupling them together in a way that enables continuous walking. Furthermore, the corresponding paths in the real world are laid out in a way that fits perfectly into room-scale VR. In this research demo, users can experience RDW in a room-scale head-mounted display VR setup and explore a VE of approximately 25m × 25m.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking (RDW) promises to allow near-natural walking in an infinitely large virtual environment (VE) by subtle manipulations of the virtual camera. Previous experiments showed that a physical radius of at least 22 meters is required for undetectable RDW. However, we found that it is possible to decrease this radius and to apply RDW to room-scale VR, i. e., up to approximately 5m × 5m. This is done by using curved paths in the Ve instead of straight paths, and by coupling them together in a way that enables continuous walking. Furthermore, the corresponding paths in the real world are laid out in a way that fits perfectly into room-scale VR. In this research demo, users can experience RDW in a room-scale head-mounted display VR setup and explore a VE of approximately 25m × 25m.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking (RDW) promises to allow near-natural walking in an infinitely large virtual environment (VE) by subtle manipulations of the virtual camera. Previous experiments showed that a physical radius of at least 22 meters is required for undetectable RDW. However, we found that it is possible to decrease this radius and to apply RDW to room-scale VR, i. e., up to approximately 5m × 5m. This is done by using curved paths in the Ve instead of straight paths, and by coupling them together in a way that enables continuous walking. Furthermore, the corresponding paths in the real world are laid out in a way that fits perfectly into room-scale VR. In this research demo, users can experience RDW in a room-scale head-mounted display VR setup and explore a VE of approximately 25m × 25m.",
"fno": "07892373",
"keywords": [
"Legged Locomotion",
"Visualization",
"Human Computer Interaction",
"Virtual Environments",
"Sensitivity",
"Virtual Reality",
"Redirected Walking",
"Room Scale"
],
"authors": [
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Eike Langbehn",
"givenName": "Eike",
"surname": "Langbehn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Paul Lubos",
"givenName": "Paul",
"surname": "Lubos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Simulation and Training, University of Central Florida",
"fullName": "Gerd Bruder",
"givenName": "Gerd",
"surname": "Bruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Hamburg",
"fullName": "Frank Steinicke",
"givenName": "Frank",
"surname": "Steinicke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "449-450",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892372",
"articleId": "12OmNz2C1zq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892374",
"articleId": "12OmNy49sP9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446479",
"title": "Adopting the Roll Manipulation for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446479/13bd1eSlys4",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446563",
"title": "Redirected Walking in Irregularly Shaped Physical Environments with Dynamic Obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446563/13bd1eW2l9A",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446167",
"title": "Redirected Spaces: Going Beyond Borders",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446167/13bd1fph1xv",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446263",
"title": "Mobius Walker: Pitch and Roll Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446263/13bd1gJ1v07",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446587",
"title": "Do Textures and Global Illumination Influence the Perception of Redirected Walking Based on Translational Gain?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446587/13bd1gJ1v0m",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/04/07833190",
"title": "Bending the Curve: Sensitivity to Bending of Curved Paths and Application in Room-Scale VR",
"doi": null,
"abstractUrl": "/journal/tg/2017/04/07833190/13rRUIIVlcQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09733261",
"title": "One-step out-of-place resetting for redirected walking in VR",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09733261/1BENJyPkx5S",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a349",
"title": "A Redirected Walking Toolkit for Exploring Large-Scale Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a349/1BLnzoFxHHy",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0",
"title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a053",
"title": "Redirected Walking Based on Historical User Walking Data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a053/1MNgUnNG7Ju",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1eSlysD",
"doi": "10.1109/VR.2018.8446442",
"title": "Path Prediction Using LSTM Network for Redirected Walking",
"normalizedTitle": "Path Prediction Using LSTM Network for Redirected Walking",
"abstract": "Redirected walking enables immersive walking experience in a limited-sized room. To apply redirected walking efficiently and minimize the number of resets, an accurate path prediction algorithm is required. We propose a data-driven path prediction model using Long Short-Term Memory(LSTM) network. User path data was collected via path exploration experiment on a maze-like environment and fed into LSTM network. Our algorithm can predict user's future path based on user's past position and facing direction data. We compare our path prediction result with actual user data and show that our model can accurately predict user's future path.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking enables immersive walking experience in a limited-sized room. To apply redirected walking efficiently and minimize the number of resets, an accurate path prediction algorithm is required. We propose a data-driven path prediction model using Long Short-Term Memory(LSTM) network. User path data was collected via path exploration experiment on a maze-like environment and fed into LSTM network. Our algorithm can predict user's future path based on user's past position and facing direction data. We compare our path prediction result with actual user data and show that our model can accurately predict user's future path.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking enables immersive walking experience in a limited-sized room. To apply redirected walking efficiently and minimize the number of resets, an accurate path prediction algorithm is required. We propose a data-driven path prediction model using Long Short-Term Memory(LSTM) network. User path data was collected via path exploration experiment on a maze-like environment and fed into LSTM network. Our algorithm can predict user's future path based on user's past position and facing direction data. We compare our path prediction result with actual user data and show that our model can accurately predict user's future path.",
"fno": "08446442",
"keywords": [
"Cognitive Systems",
"Virtual Reality",
"Accurate Path Prediction Algorithm",
"Data Driven Path Prediction Model",
"User Path Data",
"Path Exploration Experiment",
"LSTM Network",
"Path Prediction Result",
"Actual User Data",
"Redirected Walking",
"Immersive Walking Experience",
"Limited Sized Room",
"Long Short Term Memory Network",
"Legged Locomotion",
"Data Models",
"Predictive Models",
"Solid Modeling",
"Prediction Algorithms",
"Training",
"Three Dimensional Displays",
"Computing Methodologies Computer Graphics Graphics Systems And Interfaces Virtual Reality",
"Computing Methodologies Machine Learning Machine Learning Approaches Neural Networks"
],
"authors": [
{
"affiliation": "Yonsei University",
"fullName": "Yong-Hun Cho",
"givenName": "Yong-Hun",
"surname": "Cho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University",
"fullName": "Dong-Yong Lee",
"givenName": "Dong-Yong",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University",
"fullName": "In-Kwon Lee",
"givenName": "In-Kwon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "527-528",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446494",
"articleId": "13bd1eSlytf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446132",
"articleId": "13bd1gFCjsb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460032",
"title": "Automated path prediction for redirected walking using navigation meshes",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460032/12OmNBKEymO",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460030",
"title": "Eye tracking for locomotion prediction in redirected walking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460030/12OmNz4SOsF",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07036075",
"title": "Cognitive Resource Demands of Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07036075/13rRUxcKzVm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09715721",
"title": "Validating Simulation-Based Evaluation of Redirected Walking Systems",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09715721/1B4hxt06P9m",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049730",
"title": "Monte-Carlo Redirected Walking: Gain Selection Through Simulated Walks",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049730/1KYowitu5OM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797709",
"title": "Short-term Path Prediction for Virtual Open Spaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797709/1cJ192NspwI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2016/0840/0/07859537",
"title": "The redirected walking toolkit: a unified development platform for exploring large virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2016/07859537/1h0Jm3Gvypy",
"parentPublication": {
"id": "proceedings/wevr/2016/0840/0",
"title": "2016 IEEE 2nd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/02/09364750",
"title": "Multi-Technique Redirected Walking Method",
"doi": null,
"abstractUrl": "/journal/ec/2022/02/09364750/1rxdpzgvsxG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIx7fmpQ9a",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIx7m6wYKc",
"doi": "10.1109/VR46266.2020.00034",
"title": "Optimal Planning for Redirected Walking Based on Reinforcement Learning in Multi-user Environment with Irregularly Shaped Physical Space",
"normalizedTitle": "Optimal Planning for Redirected Walking Based on Reinforcement Learning in Multi-user Environment with Irregularly Shaped Physical Space",
"abstract": "Redirected Walking (RDW) enables users to walk in both virtual and physical tracking spaces simultaneously, which is an effective method to increase presence in Virtual Reality (VR). Recently, RDW technologies have been developed in a multi-user environment where multiple users share the same physical tracking space and simultaneously explore the same virtual space. Meanwhile, in the Steer-To-Optimal-Target (S2OT) method, user actions are planned in RDW by introducing machine learning models such as reinforcement learning. In this paper, we propose a new predictive RDW algorithm \"Multiuser-Steer-to-Optimal-Target (MS2OT)\" that extends the S2OT method into an environment with multiple users and various types of tracking space. In addition to the steering actions used in S2OT, MS2OT considers pre-reset actions and uses more steering targets and an improved reward function. The locations of multiple users and tracking space information are treated as visual information to be the state of the reinforcement learning model in MS2OT. Hence, the artificial neural network of a multilayer three-dimensional convolutional neural network with a dueling double deep network architecture is learned through Q-Learning. MS2OT significantly reduces the total number of resets compared to the conventional RDW algorithms such as S2C and APF-RDW in a multi-user environment and improves the total distance and average distance between resets during the same period. Experimental results show that MS2OT can process up to 32 users in real-time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected Walking (RDW) enables users to walk in both virtual and physical tracking spaces simultaneously, which is an effective method to increase presence in Virtual Reality (VR). Recently, RDW technologies have been developed in a multi-user environment where multiple users share the same physical tracking space and simultaneously explore the same virtual space. Meanwhile, in the Steer-To-Optimal-Target (S2OT) method, user actions are planned in RDW by introducing machine learning models such as reinforcement learning. In this paper, we propose a new predictive RDW algorithm \"Multiuser-Steer-to-Optimal-Target (MS2OT)\" that extends the S2OT method into an environment with multiple users and various types of tracking space. In addition to the steering actions used in S2OT, MS2OT considers pre-reset actions and uses more steering targets and an improved reward function. The locations of multiple users and tracking space information are treated as visual information to be the state of the reinforcement learning model in MS2OT. Hence, the artificial neural network of a multilayer three-dimensional convolutional neural network with a dueling double deep network architecture is learned through Q-Learning. MS2OT significantly reduces the total number of resets compared to the conventional RDW algorithms such as S2C and APF-RDW in a multi-user environment and improves the total distance and average distance between resets during the same period. Experimental results show that MS2OT can process up to 32 users in real-time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected Walking (RDW) enables users to walk in both virtual and physical tracking spaces simultaneously, which is an effective method to increase presence in Virtual Reality (VR). Recently, RDW technologies have been developed in a multi-user environment where multiple users share the same physical tracking space and simultaneously explore the same virtual space. Meanwhile, in the Steer-To-Optimal-Target (S2OT) method, user actions are planned in RDW by introducing machine learning models such as reinforcement learning. In this paper, we propose a new predictive RDW algorithm \"Multiuser-Steer-to-Optimal-Target (MS2OT)\" that extends the S2OT method into an environment with multiple users and various types of tracking space. In addition to the steering actions used in S2OT, MS2OT considers pre-reset actions and uses more steering targets and an improved reward function. The locations of multiple users and tracking space information are treated as visual information to be the state of the reinforcement learning model in MS2OT. Hence, the artificial neural network of a multilayer three-dimensional convolutional neural network with a dueling double deep network architecture is learned through Q-Learning. MS2OT significantly reduces the total number of resets compared to the conventional RDW algorithms such as S2C and APF-RDW in a multi-user environment and improves the total distance and average distance between resets during the same period. Experimental results show that MS2OT can process up to 32 users in real-time.",
"fno": "09089532",
"keywords": [
"Prediction Algorithms",
"Space Vehicles",
"Learning Artificial Intelligence",
"Legged Locomotion",
"Target Tracking",
"Shape",
"Virtual Reality",
"Redirected Walking",
"Resetting",
"Virtual Environments",
"Multi User",
"Collision Avoidance",
"Reinforcement Learning"
],
"authors": [
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Dong-Yong Lee",
"givenName": "Dong-Yong",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Yong-Hun Cho",
"givenName": "Yong-Hun",
"surname": "Cho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "Dae-Hong Min",
"givenName": "Dae-Hong",
"surname": "Min",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Yonsei University,Dept. of Computer Science",
"fullName": "In-Kwon Lee",
"givenName": "In-Kwon",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "155-163",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5608-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09089569",
"articleId": "1jIxfFs8qgo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09089554",
"articleId": "1jIxaOIHjaw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446563",
"title": "Redirected Walking in Irregularly Shaped Physical Environments with Dynamic Obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446563/13bd1eW2l9A",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a655",
"title": "Optimal Pose Guided Redirected Walking with Pose Score Precomputation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a655/1CJbHdnVzd6",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10049511",
"title": "Redirected Walking On Omnidirectional Treadmill",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10049511/1KYoAYFd0m4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049692",
"title": "FREE-RDW: A Multi-user Redirected Walking Method for Supporting Non-forward Steps",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049692/1KYopXwY5Vu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797818",
"title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998570",
"title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998570/1hx2DxYanDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a201",
"title": "Evaluate Optimal Redirected Walking Planning Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a201/1pBMkbxS3F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a184",
"title": "A Reinforcement Learning Approach to Redirected Walking with Passive Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a184/1yeCXhKVTXy",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1LS1YWuA",
"doi": "10.1109/ISMAR-Adjunct.2018.00071",
"title": "The Effect of AR Based Emotional Interaction Among Personified Physical Objects in Manual Operation",
"normalizedTitle": "The Effect of AR Based Emotional Interaction Among Personified Physical Objects in Manual Operation",
"abstract": "In this paper, we explore how Augmented Reality (AR) and anthropomorphism can be used to assign emotions to common physical objects based on their needs. We developed a novel emotional interaction model among personified physical objects so that they could react to other objects by changing virtual facial expressions. To explore the effect of such an emotional interface, we conducted a user study comparing three types of virtual cues shown on the real objects: (1) information only, (2) emotion only and (3) both information and emotional cues. A significant difference was found in task completion time and the quality of work when adding emotional cues to an informational AR-based guiding system. This implies that adding emotion feedback to informational cues may produce better task results than using informational cues alone.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we explore how Augmented Reality (AR) and anthropomorphism can be used to assign emotions to common physical objects based on their needs. We developed a novel emotional interaction model among personified physical objects so that they could react to other objects by changing virtual facial expressions. To explore the effect of such an emotional interface, we conducted a user study comparing three types of virtual cues shown on the real objects: (1) information only, (2) emotion only and (3) both information and emotional cues. A significant difference was found in task completion time and the quality of work when adding emotional cues to an informational AR-based guiding system. This implies that adding emotion feedback to informational cues may produce better task results than using informational cues alone.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we explore how Augmented Reality (AR) and anthropomorphism can be used to assign emotions to common physical objects based on their needs. We developed a novel emotional interaction model among personified physical objects so that they could react to other objects by changing virtual facial expressions. To explore the effect of such an emotional interface, we conducted a user study comparing three types of virtual cues shown on the real objects: (1) information only, (2) emotion only and (3) both information and emotional cues. A significant difference was found in task completion time and the quality of work when adding emotional cues to an informational AR-based guiding system. This implies that adding emotion feedback to informational cues may produce better task results than using informational cues alone.",
"fno": "08699229",
"keywords": [
"Augmented Reality",
"Human Computer Interaction",
"User Interfaces",
"Emotional Cues",
"Emotion Feedback",
"Informational Cues",
"Personified Physical Objects",
"Manual Operation",
"Anthropomorphism",
"Emotional Interaction Model",
"Virtual Facial Expressions",
"Emotional Interface",
"Virtual Cues",
"AR Based Emotional Interaction",
"Augmented Reality",
"Emotional Interaction",
"Physical Object Personification",
"Manual Operation",
"Augmented Reality",
"H 5 1 Information Interfaces And Presentation Multimedia Information Systems X 2014 Artificial Augmented And Virtual Realities"
],
"authors": [
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Laboratory",
"fullName": "Li Zhang",
"givenName": "Li",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Laboratory",
"fullName": "Weiping Ha",
"givenName": "Weiping",
"surname": "Ha",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Laboratory",
"fullName": "Xiaoliang Bai",
"givenName": "Xiaoliang",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Northwestern Polytechnical University, Cyber-Physical Interaction Laboratory",
"fullName": "Yongxing Chen",
"givenName": "Yongxing",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of South Australia, Em pathic Computing Laboratory",
"fullName": "Mark Billinghurst",
"givenName": "Mark",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "216-221",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699334",
"articleId": "19F1OQnjwSk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699257",
"articleId": "19F1S5KRwg8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmi/2002/1834/0/18340141",
"title": "Integrating Emotional Cues into a Framework for Dialogue Management",
"doi": null,
"abstractUrl": "/proceedings-article/icmi/2002/18340141/12OmNC2fGzM",
"parentPublication": {
"id": "proceedings/icmi/2002/1834/0",
"title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2009/4800/0/05349497",
"title": "A multiple perception model on emotional speech",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2009/05349497/12OmNC3FGjO",
"parentPublication": {
"id": "proceedings/acii/2009/4800/0",
"title": "2009 3rd International Conference on Affective Computing and Intelligent Interaction and Workshops (ACII 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2015/6026/1/07163145",
"title": "Multi-level classification of emotional body expression",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2015/07163145/12OmNrAv3Qv",
"parentPublication": {
"id": "proceedings/fg/2015/6026/5",
"title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344582",
"title": "Relevant body cues for the classification of emotional body expression in daily actions",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344582/12OmNwDACvD",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rev/2009/4104/0/05460235",
"title": "Visualizing Emotional Requirements",
"doi": null,
"abstractUrl": "/proceedings-article/rev/2009/05460235/12OmNwFicVJ",
"parentPublication": {
"id": "proceedings/rev/2009/4104/0",
"title": "Requirements Engineering Visualization, First International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a460",
"title": "Perception of Emotional Gaits Using Avatar Animation of Real and Artificially Synthesized Gaits",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a460/12OmNzWx07H",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2014/04/06849440",
"title": "CREMA-D: Crowd-Sourced Emotional Multimodal Actors Dataset",
"doi": null,
"abstractUrl": "/journal/ta/2014/04/06849440/13rRUzphDwg",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699328",
"title": "Compact Object Representation of a Non-Rigid Object for Real-Time Tracking in AR Systems",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699328/19F1QGFHn8Y",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a960",
"title": "[DC] Exploration of Context and Physiological Cues for Personalized Emotion-Adaptive Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a960/1CJexFbyxUI",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a667",
"title": "Leveraging AR and Object Interactions for Emotional Support Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a667/1tnXssxEIes",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1Oa8ukP6",
"doi": "10.1109/ISMAR-Adjunct.2018.00083",
"title": "Evaluation of Direct Manipulation Methods in Augmented Reality Environments Using Google Glass",
"normalizedTitle": "Evaluation of Direct Manipulation Methods in Augmented Reality Environments Using Google Glass",
"abstract": "This paper presents a study examining interaction methods for manipulating objects in augmented reality (AR) environments using Google Glass (Glass). We compared five interaction methods; three of them were implemented on Glass (virtual buttons, swipe pad of Glass, remote control via the touchscreen of a smartwatch) and two on a smartphone (virtual buttons and the touch interaction). 32 participants were asked to scale and rotate a virtual 3D object created from a physical sculpture of the Museum Günter Grass-Haus in Luebeck using the AR-App InfoGrid4Glass. We studied the interaction methods by measuring effectiveness, efficiency, and satisfaction of the users. The results of the study showed that smartphone interaction is superior to any Google Glass interaction methods. Of the interaction methods implemented for Glass, a combination of Glass with a smartwatch shows the highest usability. Our findings suggest that if users have a smartwatch available, it offers them a higher usability for interacting with virtual objects rather than using the touch pad of Glass or virtual buttons on Glass.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a study examining interaction methods for manipulating objects in augmented reality (AR) environments using Google Glass (Glass). We compared five interaction methods; three of them were implemented on Glass (virtual buttons, swipe pad of Glass, remote control via the touchscreen of a smartwatch) and two on a smartphone (virtual buttons and the touch interaction). 32 participants were asked to scale and rotate a virtual 3D object created from a physical sculpture of the Museum Günter Grass-Haus in Luebeck using the AR-App InfoGrid4Glass. We studied the interaction methods by measuring effectiveness, efficiency, and satisfaction of the users. The results of the study showed that smartphone interaction is superior to any Google Glass interaction methods. Of the interaction methods implemented for Glass, a combination of Glass with a smartwatch shows the highest usability. Our findings suggest that if users have a smartwatch available, it offers them a higher usability for interacting with virtual objects rather than using the touch pad of Glass or virtual buttons on Glass.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a study examining interaction methods for manipulating objects in augmented reality (AR) environments using Google Glass (Glass). We compared five interaction methods; three of them were implemented on Glass (virtual buttons, swipe pad of Glass, remote control via the touchscreen of a smartwatch) and two on a smartphone (virtual buttons and the touch interaction). 32 participants were asked to scale and rotate a virtual 3D object created from a physical sculpture of the Museum Günter Grass-Haus in Luebeck using the AR-App InfoGrid4Glass. We studied the interaction methods by measuring effectiveness, efficiency, and satisfaction of the users. The results of the study showed that smartphone interaction is superior to any Google Glass interaction methods. Of the interaction methods implemented for Glass, a combination of Glass with a smartwatch shows the highest usability. Our findings suggest that if users have a smartwatch available, it offers them a higher usability for interacting with virtual objects rather than using the touch pad of Glass or virtual buttons on Glass.",
"fno": "08699326",
"keywords": [
"Augmented Reality",
"Human Computer Interaction",
"Smart Phones",
"Touch Sensitive Screens",
"Direct Manipulation Methods",
"Augmented Reality Environments",
"Touch Interaction",
"Virtual 3 D Object",
"AR App Info Grid 4 Glass",
"Smartphone Interaction",
"Google Glass Interaction Methods",
"Museum Gu X 0308 Nter Grass Haus",
"Smartwatch",
"Augmented Reality",
"Cross Device Interaction",
"User Study",
"Google Glass",
"Smartwatch",
"X 2022 Human Centered Computing X 2192 Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "University of Luebeck, Institute for Multimedia and Interactive Systems (IMIS)",
"fullName": "Alexander Ohlei",
"givenName": "Alexander",
"surname": "Ohlei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Luebeck, Institute for Multimedia and Interactive Systems (IMIS)",
"fullName": "Thomas Winkler",
"givenName": "Thomas",
"surname": "Winkler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Luebeck, Institute for Multimedia and Interactive Systems (IMIS)",
"fullName": "Daniel Wessel",
"givenName": "Daniel",
"surname": "Wessel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Luebeck, Institute for Multimedia and Interactive Systems (IMIS)",
"fullName": "Michael Herczeg",
"givenName": "Michael",
"surname": "Herczeg",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "266-269",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699190",
"articleId": "19F1MykMwmI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699230",
"articleId": "19F1TkkiEJa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223368",
"title": "Dynamic hierarchical virtual button-based hand interaction for wearable AR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223368/12OmNAMbZFA",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948463",
"title": "[Poster] Smartwatch-aided handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948463/12OmNAQrYBV",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948513",
"title": "Google glass, The META and Co. How to calibrate optical see-through head mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948513/12OmNB8TUim",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948449",
"title": "[Poster] Device vs. user perspective rendering in google glass AR applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948449/12OmNCbU2XO",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a162",
"title": "[POSTER] Consistency between Reflection on the Glass and Virtual Object in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a162/12OmNvk7K2Z",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460065",
"title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448289",
"title": "Performance Envelopes of in-Air Direct and Smartwatch Indirect Control for Head-Mounted Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448289/13bd1fZBGcE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/01/ttg2010010004",
"title": "Opportunistic Tangible User Interfaces for Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2010/01/ttg2010010004/13rRUwvT9gn",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2017/03/07469860",
"title": "Ubii: Physical World Interaction Through Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tm/2017/03/07469860/13rRUxASuca",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089453",
"title": "A Tangible Spherical Proxy for Object Manipulation in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089453/1jIxguSW9va",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJc3FU1jUc",
"doi": "10.1109/VR51125.2022.00030",
"title": "The Influence of Environmental Lighting on Size Variations in Optical See-through Tangible Augmented Reality",
"normalizedTitle": "The Influence of Environmental Lighting on Size Variations in Optical See-through Tangible Augmented Reality",
"abstract": "Optical see-through head-mounted displays (OST HMDs) are becoming increasingly popular as they get better and smaller. One application area is interaction with virtual content, which is more intuitive when using physical objects as tangibles. Since it is not possible to use a matching replica for each virtual object, it is necessary to identify physical objects that can represent several different virtual objects. As a first step, we investigated to what extent a physical object can differ in size from its virtual counterpart.Since the perception of content in optical see-through Augmented Reality (OST AR) is strongly influenced by the ambient lighting, the illumination intensity was considered in our study. We investigated three indoor lighting conditions and their effects on the perception of seven different size variations between the physical object and its virtual overlay.The results of the study show that there is a decrease in usability and presence with increasing illuminance. However, this cannot be avoided when applications are run under realistic interior lighting conditions. Furthermore, the results demonstrate that the size ranges in which a physical object can deviate from its virtual counterpart without having a strong negative impact on usability, presence and performance increase with increasing environmental illumination. Therefore, it is possible to interact with even smaller and even larger physical props to manipulate the associated virtual content under brighter lighting conditions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Optical see-through head-mounted displays (OST HMDs) are becoming increasingly popular as they get better and smaller. One application area is interaction with virtual content, which is more intuitive when using physical objects as tangibles. Since it is not possible to use a matching replica for each virtual object, it is necessary to identify physical objects that can represent several different virtual objects. As a first step, we investigated to what extent a physical object can differ in size from its virtual counterpart.Since the perception of content in optical see-through Augmented Reality (OST AR) is strongly influenced by the ambient lighting, the illumination intensity was considered in our study. We investigated three indoor lighting conditions and their effects on the perception of seven different size variations between the physical object and its virtual overlay.The results of the study show that there is a decrease in usability and presence with increasing illuminance. However, this cannot be avoided when applications are run under realistic interior lighting conditions. Furthermore, the results demonstrate that the size ranges in which a physical object can deviate from its virtual counterpart without having a strong negative impact on usability, presence and performance increase with increasing environmental illumination. Therefore, it is possible to interact with even smaller and even larger physical props to manipulate the associated virtual content under brighter lighting conditions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Optical see-through head-mounted displays (OST HMDs) are becoming increasingly popular as they get better and smaller. One application area is interaction with virtual content, which is more intuitive when using physical objects as tangibles. Since it is not possible to use a matching replica for each virtual object, it is necessary to identify physical objects that can represent several different virtual objects. As a first step, we investigated to what extent a physical object can differ in size from its virtual counterpart.Since the perception of content in optical see-through Augmented Reality (OST AR) is strongly influenced by the ambient lighting, the illumination intensity was considered in our study. We investigated three indoor lighting conditions and their effects on the perception of seven different size variations between the physical object and its virtual overlay.The results of the study show that there is a decrease in usability and presence with increasing illuminance. However, this cannot be avoided when applications are run under realistic interior lighting conditions. Furthermore, the results demonstrate that the size ranges in which a physical object can deviate from its virtual counterpart without having a strong negative impact on usability, presence and performance increase with increasing environmental illumination. Therefore, it is possible to interact with even smaller and even larger physical props to manipulate the associated virtual content under brighter lighting conditions.",
"fno": "961700a121",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Lighting",
"Illumination Intensity",
"OST AR",
"Matching Replica",
"OST HMD",
"Optical See Through Head Mounted Displays",
"Realistic Interior Lighting Conditions",
"Virtual Overlay",
"Seven Different Size Variations",
"Virtual Counterpart",
"Virtual Object",
"Physical Object",
"Optical See Through Tangible Augmented Reality",
"Associated Virtual Content",
"Three Dimensional Displays",
"Head Mounted Displays",
"Shape",
"Conferences",
"Lighting",
"User Interfaces",
"Object Recognition",
"Tangible Augmented Reality",
"Optical See Through Augmented Reality",
"Tangible Interaction",
"Illumination"
],
"authors": [
{
"affiliation": "Saarland Informatics Campus,German Research Center for Artificial Intelligence (DFKI),Saarbrücken,Germany",
"fullName": "Denise Kahl",
"givenName": "Denise",
"surname": "Kahl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saarland Informatics Campus,German Research Center for Artificial Intelligence (DFKI),Saarbrücken,Germany",
"fullName": "Marc Ruble",
"givenName": "Marc",
"surname": "Ruble",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Saarland Informatics Campus,German Research Center for Artificial Intelligence (DFKI),Saarbrücken,Germany",
"fullName": "Antonio Krüger",
"givenName": "Antonio",
"surname": "Krüger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "121-129",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJc3CKa69G",
"name": "pvr202296170-09756830s1-mm_961700a121.zip",
"size": "261 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756830s1-mm_961700a121.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a112",
"articleId": "1CJbGjLoRXi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a130",
"articleId": "1CJceaqF3Xi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402574",
"title": "Occlusion capable optical see-through head-mounted display using freeform optics",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402574/12OmNBEpnEt",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a640",
"title": "Towards Eye-Perspective Rendering for Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a640/1CJewzlI3CM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a796",
"title": "A Replication Study to Measure the Perceived Three-Dimensional Location of Virtual Objects in Optical See Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a796/1CJfrSkdYUE",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a352",
"title": "Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a352/1J7WodvTPzy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a389",
"title": "Objective Measurements of Background Color Shifts Caused by Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a389/1J7WuL68jAY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090625",
"title": "Automatic Calibration of Commercial Optical See-Through Head-Mounted Displays for Medical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090625/1jIxwp2g0VO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09253561",
"title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2020/2346/0/09284416",
"title": "Effects of Lighting Variations in Virtual Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2020/09284416/1pttNAXcHbq",
"parentPublication": {
"id": "proceedings/iisa/2020/2346/0",
"title": "2020 11th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a147",
"title": "Investigation of Size Variations in Optical See-through Tangible Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a147/1yeCYy4wcZa",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJdZ8RwdnG",
"doi": "10.1109/VRW55335.2022.00289",
"title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs",
"normalizedTitle": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs",
"abstract": "Augmented reality merges the real and virtual worlds seamlessly in real-time. However, we need contextual menus to manipulate virtual objects rendered in our physical space. Unfortunately, designing a menu for augmented reality head-mounted displays (AR-HMDs) is challenging because of their limited display field of view (FOV). In this paper, we propose HoloCMDS (see Section 2) to support quick access of contextual commands in AR-HMDs and conduct an initial experiment to get users' feedback about this technique.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented reality merges the real and virtual worlds seamlessly in real-time. However, we need contextual menus to manipulate virtual objects rendered in our physical space. Unfortunately, designing a menu for augmented reality head-mounted displays (AR-HMDs) is challenging because of their limited display field of view (FOV). In this paper, we propose HoloCMDS (see Section 2) to support quick access of contextual commands in AR-HMDs and conduct an initial experiment to get users' feedback about this technique.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented reality merges the real and virtual worlds seamlessly in real-time. However, we need contextual menus to manipulate virtual objects rendered in our physical space. Unfortunately, designing a menu for augmented reality head-mounted displays (AR-HMDs) is challenging because of their limited display field of view (FOV). In this paper, we propose HoloCMDS (see Section 2) to support quick access of contextual commands in AR-HMDs and conduct an initial experiment to get users' feedback about this technique.",
"fno": "840200a876",
"keywords": [
"Augmented Reality",
"Helmet Mounted Displays",
"Rendering Computer Graphics",
"Virtual Reality",
"Holo CMDS",
"Investigating Around Field",
"View Glanceable Commands Selection",
"AR HM Ds",
"Real Worlds",
"Virtual Worlds",
"Contextual Menus",
"Virtual Objects",
"Physical Space",
"Menu",
"Augmented Reality Head Mounted Displays",
"Display Field",
"Contextual Commands",
"Three Dimensional Displays",
"Head Mounted Displays",
"Conferences",
"User Interfaces",
"Real Time Systems",
"Augmented Reality",
"Human Centered Computing Mixed Reality",
"Human Centered Computing User Interface Design"
],
"authors": [
{
"affiliation": "Inria Bordeaux,France",
"fullName": "Rajkumar Darbar",
"givenName": "Rajkumar",
"surname": "Darbar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria Bordeaux,France",
"fullName": "Arnaud Prouzeau",
"givenName": "Arnaud",
"surname": "Prouzeau",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inria Bordeaux,France",
"fullName": "Martin Hachet",
"givenName": "Martin",
"surname": "Hachet",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "876-877",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a874",
"articleId": "1CJdT50CO3e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a878",
"articleId": "1CJf8pMNC2k",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2018/3365/0/08446247",
"title": "Concept for Rendering Optimizations for Full Human Field of View HMDs",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446247/13bd1eY1x3i",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a686",
"title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a812",
"title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a074",
"title": "An Exploration of Hands-free Text Selection for Virtual Reality Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a074/1JrRaeV82L6",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a285",
"title": "Birds vs. Fish: Visualizing Out-of-View Objects in Augmented Reality using 3D Minimaps",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a285/1gysmdpyM3C",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089433",
"title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a717",
"title": "[DC] Glanceable AR: Towards an Always-on Augmented Reality Future",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a717/1tnXrUsEHYc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09429918",
"title": "The Impact of Focus and Context Visualization Techniques on Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09429918/1txPs5wi56E",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2021/4065/0/406500a147",
"title": "Supporting Vine Vegetation Status Observation Using AR",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2021/406500a147/1yBF2DRa37y",
"parentPublication": {
"id": "proceedings/cw/2021/4065/0",
"title": "2021 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a159",
"title": "Exploring the Effect of Visual Cues on Eye Gaze During AR-Guided Picking and Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a159/1yeQM18rD7G",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJe1WgruTe",
"doi": "10.1109/VRW55335.2022.00029",
"title": "Investigating Lighting Quality in Office Workstations: A Combined Approach Utilizing Virtual Reality and Physical Workstations",
"normalizedTitle": "Investigating Lighting Quality in Office Workstations: A Combined Approach Utilizing Virtual Reality and Physical Workstations",
"abstract": "While recent studies suggest benefits of using immersive virtual reality (VR) technology to obtain user experience during the design process of spaces, it is still unclear whether VR can be used as a reliable tool to evaluate certain environmental features such as lighting quality. This study examines six daylight control options (electrochromic windows versus blinds with different degrees of daylight transmission and color) and their impacts on users' satisfaction and visual comfort in a physical office as well as its digital twin. The daylight control options were installed in a physical office setting located in Mississippi, U.S., where participants (N = 112) experienced each daylight control option and completed an online survey. Using Matterport, web-based 3D models of the office with the daylight control options were created. Participants (N = 35, study still in progress) explored the immersive virtual models using Oculus Quest 2 VR goggles on a university campus. A verbal survey was administered to obtain participants' feedback while they were experiencing the virtual office using VR. Semi-structured interviews were also conducted after participants' VR experience. Findings revealed high visual comfort ratings for electrochromic windows in both virtual and physical settings. However, results regarding perception of glare severity and overall satisfaction with workstations were inconsistent between the VR and physical setting. Findings from this study help the designers, architects, and stakeholders identify the optimal window design features to improve user experience and comfort for office workstations. Also, this study addresses the validity of VR as a tool to assess user experience of lighting quality in spaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While recent studies suggest benefits of using immersive virtual reality (VR) technology to obtain user experience during the design process of spaces, it is still unclear whether VR can be used as a reliable tool to evaluate certain environmental features such as lighting quality. This study examines six daylight control options (electrochromic windows versus blinds with different degrees of daylight transmission and color) and their impacts on users' satisfaction and visual comfort in a physical office as well as its digital twin. The daylight control options were installed in a physical office setting located in Mississippi, U.S., where participants (N = 112) experienced each daylight control option and completed an online survey. Using Matterport, web-based 3D models of the office with the daylight control options were created. Participants (N = 35, study still in progress) explored the immersive virtual models using Oculus Quest 2 VR goggles on a university campus. A verbal survey was administered to obtain participants' feedback while they were experiencing the virtual office using VR. Semi-structured interviews were also conducted after participants' VR experience. Findings revealed high visual comfort ratings for electrochromic windows in both virtual and physical settings. However, results regarding perception of glare severity and overall satisfaction with workstations were inconsistent between the VR and physical setting. Findings from this study help the designers, architects, and stakeholders identify the optimal window design features to improve user experience and comfort for office workstations. Also, this study addresses the validity of VR as a tool to assess user experience of lighting quality in spaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While recent studies suggest benefits of using immersive virtual reality (VR) technology to obtain user experience during the design process of spaces, it is still unclear whether VR can be used as a reliable tool to evaluate certain environmental features such as lighting quality. This study examines six daylight control options (electrochromic windows versus blinds with different degrees of daylight transmission and color) and their impacts on users' satisfaction and visual comfort in a physical office as well as its digital twin. The daylight control options were installed in a physical office setting located in Mississippi, U.S., where participants (N = 112) experienced each daylight control option and completed an online survey. Using Matterport, web-based 3D models of the office with the daylight control options were created. Participants (N = 35, study still in progress) explored the immersive virtual models using Oculus Quest 2 VR goggles on a university campus. A verbal survey was administered to obtain participants' feedback while they were experiencing the virtual office using VR. Semi-structured interviews were also conducted after participants' VR experience. Findings revealed high visual comfort ratings for electrochromic windows in both virtual and physical settings. However, results regarding perception of glare severity and overall satisfaction with workstations were inconsistent between the VR and physical setting. Findings from this study help the designers, architects, and stakeholders identify the optimal window design features to improve user experience and comfort for office workstations. Also, this study addresses the validity of VR as a tool to assess user experience of lighting quality in spaces.",
"fno": "840200a085",
"keywords": [
"Daylighting",
"Ergonomics",
"Human Factors",
"Lighting",
"User Interfaces",
"Virtual Reality",
"Lighting Quality",
"Office Workstations",
"Combined Approach Utilizing Virtual Reality",
"Physical Workstations",
"Immersive Virtual Reality",
"User Experience",
"Daylight Control Option",
"Electrochromic Windows",
"Daylight Transmission",
"Physical Office",
"Immersive Virtual Models",
"Oculus Quest 2 VR Goggles",
"Virtual Office",
"Participants",
"Virtual Settings",
"Physical Settings",
"Physical Setting",
"Solid Modeling",
"Visualization",
"Three Dimensional Displays",
"Conferences",
"Lighting",
"Virtual Reality",
"Aerospace Electronics",
"Daylight Control",
"Virtual Reality",
"Visual Comfort",
"Satisfaction"
],
"authors": [
{
"affiliation": "Lawrence Technological University",
"fullName": "Roxana Jafarifiroozabadi",
"givenName": "Roxana",
"surname": "Jafarifiroozabadi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "View Inc.",
"fullName": "Piers MacNaughton",
"givenName": "Piers",
"surname": "MacNaughton",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Lawrence Technological University",
"fullName": "Alina Osnaga",
"givenName": "Alina",
"surname": "Osnaga",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "85-87",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJe1TdoEgg",
"name": "pvrw202284020-09757390s1-mm_840200a085.zip",
"size": "108 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757390s1-mm_840200a085.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a081",
"articleId": "1CJep6mwUKs",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a088",
"articleId": "1CJd13g3dza",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sustainit/2012/46/0/06388011",
"title": "Neural network-based forecasting of energy consumption due to electric lighting in office buildings",
"doi": null,
"abstractUrl": "/proceedings-article/sustainit/2012/06388011/12OmNAoUTuD",
"parentPublication": {
"id": "proceedings/sustainit/2012/46/0",
"title": "2012 Second IFIP Conference on Sustainable Internet and ICT for Sustainability (SustainIT 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percomw/2015/8425/0/07134055",
"title": "An intervention study on automated lighting control to save energy in open space offices",
"doi": null,
"abstractUrl": "/proceedings-article/percomw/2015/07134055/12OmNxy4MZI",
"parentPublication": {
"id": "proceedings/percomw/2015/8425/0",
"title": "2015 IEEE International Conference on Pervasive Computing and Communication Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcsw/2012/4686/0/4686a245",
"title": "Intelligent Illumination Model-Based Lighting Control",
"doi": null,
"abstractUrl": "/proceedings-article/icdcsw/2012/4686a245/12OmNzzfTjo",
"parentPublication": {
"id": "proceedings/icdcsw/2012/4686/0",
"title": "2012 32nd International Conference on Distributed Computing Systems Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446510",
"title": "Comparing Interface Affordances for Controlling a Push Broom in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446510/13bd1tMztXW",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2018/8161/0/08633628",
"title": "A Novel and Robust GreenSoul-ed Lighting Controller",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2018/08633628/17D45WrVgdE",
"parentPublication": {
"id": "proceedings/iisa/2018/8161/0",
"title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbase/2021/2709/0/270900a062",
"title": "Evaluation Models for Luminous Environment Satisfaction in Green Office Buildings Integrating Environmental and Spatial Attributes Based on Massive Data Samples",
"doi": null,
"abstractUrl": "/proceedings-article/icbase/2021/270900a062/1AH87mD0l0Y",
"parentPublication": {
"id": "proceedings/icbase/2021/2709/0",
"title": "2021 2nd International Conference on Big Data & Artificial Intelligence & Software Engineering (ICBASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a093",
"title": "Exploring the Design Space for Immersive Embodiment in Dance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a093/1CJc1vWLV6w",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/annsim/2022/5288/0/09859378",
"title": "Determining Critical Points To Control Electric Lighting To Meet Circadian Lighting Requirements And Minimize Energy Use",
"doi": null,
"abstractUrl": "/proceedings-article/annsim/2022/09859378/1G4EQoRKEDe",
"parentPublication": {
"id": "proceedings/annsim/2022/5288/0",
"title": "2022 Annual Modeling and Simulation Conference (ANNSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874390",
"title": "Investigating Search Among Physical and Virtual Objects Under Different Lighting Conditions",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874390/1GjwKHZsfIc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797972",
"title": "Evaluation of Maslows Hierarchy of Needs on Long-Term Use of HMDs – A Case Study of Office Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797972/1cJ0V5mcpB6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1JrQPhTSspy",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1JrReInK5H2",
"doi": "10.1109/ISMAR55827.2022.00078",
"title": "Investigating The Effect of Direction on The Limits of Haptic Retargeting",
"normalizedTitle": "Investigating The Effect of Direction on The Limits of Haptic Retargeting",
"abstract": "Haptic Retargeting enables spatially decoupled physical objects to provide haptic feedback for multiple virtual objects in Virtual Reality (VR). By decoupling the virtual hand from its real position, through Hand Redirection, multiple virtual objects can be mapped to a single physical proxy. However, redirection beyond a detectable level is disruptive to the user experience. The limits of haptic retargeting have mainly been explored in one primary direction—the user reaching forwards. We designed an experiment with participants performing reaching movements across 8 reaching directions in the horizontal plane, with a hand redirection of up to 30°. We identify an overall haptic retargeting limit and find that a physical proxy can be remapped to virtual objects of up to 16.14° away. We find a significant effect of reaching direction on the limit. In practice, however, these differences are small, measuring only a couple of degrees, translating to approximately 1cm across a 30cm reach. We argue that, while the psychology literature might suggest the need for specific directional limits and while we do find an effect of direction on retargeting limits, interaction designers can mitigate these requirements by applying slightly conservative global retargeting limits. Our contributions further the community’s knowledge of both how to deploy haptic retargeting in interaction without compromising the user’s experience and how visual and proprioceptive cues interact in peripersonal space in VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Haptic Retargeting enables spatially decoupled physical objects to provide haptic feedback for multiple virtual objects in Virtual Reality (VR). By decoupling the virtual hand from its real position, through Hand Redirection, multiple virtual objects can be mapped to a single physical proxy. However, redirection beyond a detectable level is disruptive to the user experience. The limits of haptic retargeting have mainly been explored in one primary direction—the user reaching forwards. We designed an experiment with participants performing reaching movements across 8 reaching directions in the horizontal plane, with a hand redirection of up to 30°. We identify an overall haptic retargeting limit and find that a physical proxy can be remapped to virtual objects of up to 16.14° away. We find a significant effect of reaching direction on the limit. In practice, however, these differences are small, measuring only a couple of degrees, translating to approximately 1cm across a 30cm reach. We argue that, while the psychology literature might suggest the need for specific directional limits and while we do find an effect of direction on retargeting limits, interaction designers can mitigate these requirements by applying slightly conservative global retargeting limits. Our contributions further the community’s knowledge of both how to deploy haptic retargeting in interaction without compromising the user’s experience and how visual and proprioceptive cues interact in peripersonal space in VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Haptic Retargeting enables spatially decoupled physical objects to provide haptic feedback for multiple virtual objects in Virtual Reality (VR). By decoupling the virtual hand from its real position, through Hand Redirection, multiple virtual objects can be mapped to a single physical proxy. However, redirection beyond a detectable level is disruptive to the user experience. The limits of haptic retargeting have mainly been explored in one primary direction—the user reaching forwards. We designed an experiment with participants performing reaching movements across 8 reaching directions in the horizontal plane, with a hand redirection of up to 30°. We identify an overall haptic retargeting limit and find that a physical proxy can be remapped to virtual objects of up to 16.14° away. We find a significant effect of reaching direction on the limit. In practice, however, these differences are small, measuring only a couple of degrees, translating to approximately 1cm across a 30cm reach. We argue that, while the psychology literature might suggest the need for specific directional limits and while we do find an effect of direction on retargeting limits, interaction designers can mitigate these requirements by applying slightly conservative global retargeting limits. Our contributions further the community’s knowledge of both how to deploy haptic retargeting in interaction without compromising the user’s experience and how visual and proprioceptive cues interact in peripersonal space in VR.",
"fno": "532500a612",
"keywords": [
"Haptic Interfaces",
"Human Computer Interaction",
"Virtual Reality",
"Haptic Feedback",
"Haptic Retargeting",
"HCI",
"Human Computer Interaction",
"Multiple Virtual Objects",
"Virtual Reality",
"VR",
"Visualization",
"Psychology",
"Propioception",
"User Experience",
"Haptic Interfaces",
"Object Recognition",
"Augmented Reality",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Empirical Studies In HCI",
"Interaction Techniques",
"Interaction Paradigms",
"Virtual Reality"
],
"authors": [
{
"affiliation": "Monash University",
"fullName": "Aldrich Clarence",
"givenName": "Aldrich",
"surname": "Clarence",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne",
"fullName": "Jarrod Knibbe",
"givenName": "Jarrod",
"surname": "Knibbe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Queensland",
"fullName": "Maxime Cordeil",
"givenName": "Maxime",
"surname": "Cordeil",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Monash University",
"fullName": "Michael Wybrow",
"givenName": "Michael",
"surname": "Wybrow",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "612-621",
"year": "2022",
"issn": "1554-7868",
"isbn": "978-1-6654-5325-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1JrReFiI39C",
"name": "pismar202253250-09995431s1-mm_532500a612.zip",
"size": "998 kB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202253250-09995431s1-mm_532500a612.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "532500a603",
"articleId": "1JrRa6CvhW8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "532500a622",
"articleId": "1JrR9HiNwDm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2000/0478/0/04780233",
"title": "Visuo-Haptic Display Using Head-Mounted Projector",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2000/04780233/12OmNwHz00K",
"parentPublication": {
"id": "proceedings/vr/2000/0478/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2017/04/07892978",
"title": "Evaluation of Wearable Haptic Systems for the Fingers in Augmented Reality Applications",
"doi": null,
"abstractUrl": "/journal/th/2017/04/07892978/13rRUwInv4D",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2012/01/tth2012010077",
"title": "Rendering Virtual Tumors in Real Tissue Mock-Ups Using Haptic Augmented Reality",
"doi": null,
"abstractUrl": "/journal/th/2012/01/tth2012010077/13rRUwInvt1",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a625",
"title": "Shape Aware Haptic Retargeting for Accurate Hand Interactions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a625/1CJcacl0uhq",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a618",
"title": "Retargeting Destinations of Passive Props for Enhancing Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a618/1CJeVmWfgWc",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798143",
"title": "Estimating Detection Thresholds for Desktop-Scale Hand Redirection in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798143/1cJ0GRxSQwM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797974",
"title": "Remapped Physical-Virtual Interfaces with Bimanual Haptic Retargeting",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797974/1cJ0NcRFX5m",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09382898",
"title": "Combining Dynamic Passive Haptics and Haptic Retargeting for Enhanced Haptic Feedback in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09382898/1saZv7Dd9Ty",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a150",
"title": "Unscripted Retargeting: Reach Prediction for Haptic Retargeting in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a150/1tuAPeNHqog",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/02/09576632",
"title": "Adaptive Reset Techniques for Haptic Retargeted Interaction",
"doi": null,
"abstractUrl": "/journal/tg/2023/02/09576632/1xIKunVGow0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pBMeBWXAZ2",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pBMhSuBHpe",
"doi": "10.1109/ISMAR-Adjunct51615.2020.00026",
"title": "Effects of Behavioral and Anthropomorphic Realism on Social Influence with Virtual Humans in AR",
"normalizedTitle": "Effects of Behavioral and Anthropomorphic Realism on Social Influence with Virtual Humans in AR",
"abstract": "While many applications in AR will display embodied agents in scenes, there is little research examining the social influence of these AR renderings. In this experiment, we manipulated the behavioral and anthropomorphic realism of an embodied agent. Participants wore an AR headset and walked a path specified by four virtual cubes, designed to bring them close to either humans or objects rendered in AR. In addition there was a control condition with no virtual objects in the room. Participants were then asked to choose between two physical chairs to sit on—one with a virtual human or object on it, or one without any. We examined the interpersonal distance between participants and rendered objects, physical seat choice, body rotation direction while choosing a seat, and social presence ratings. For interpersonal distance, there was an effect of anthropomorphic realism but not behavioral realism—participants left more space for human-shaped objects than for non-human objects, regardless of how real the human behaved. There were no significant differences in seat choice and rotation direction. Social presence ratings were higher for agents high in both behavioral and anthropomorphic realism than for other conditions. We discuss implications for the social influence theory [5] and for the design of AR systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While many applications in AR will display embodied agents in scenes, there is little research examining the social influence of these AR renderings. In this experiment, we manipulated the behavioral and anthropomorphic realism of an embodied agent. Participants wore an AR headset and walked a path specified by four virtual cubes, designed to bring them close to either humans or objects rendered in AR. In addition there was a control condition with no virtual objects in the room. Participants were then asked to choose between two physical chairs to sit on—one with a virtual human or object on it, or one without any. We examined the interpersonal distance between participants and rendered objects, physical seat choice, body rotation direction while choosing a seat, and social presence ratings. For interpersonal distance, there was an effect of anthropomorphic realism but not behavioral realism—participants left more space for human-shaped objects than for non-human objects, regardless of how real the human behaved. There were no significant differences in seat choice and rotation direction. Social presence ratings were higher for agents high in both behavioral and anthropomorphic realism than for other conditions. We discuss implications for the social influence theory [5] and for the design of AR systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While many applications in AR will display embodied agents in scenes, there is little research examining the social influence of these AR renderings. In this experiment, we manipulated the behavioral and anthropomorphic realism of an embodied agent. Participants wore an AR headset and walked a path specified by four virtual cubes, designed to bring them close to either humans or objects rendered in AR. In addition there was a control condition with no virtual objects in the room. Participants were then asked to choose between two physical chairs to sit on—one with a virtual human or object on it, or one without any. We examined the interpersonal distance between participants and rendered objects, physical seat choice, body rotation direction while choosing a seat, and social presence ratings. For interpersonal distance, there was an effect of anthropomorphic realism but not behavioral realism—participants left more space for human-shaped objects than for non-human objects, regardless of how real the human behaved. There were no significant differences in seat choice and rotation direction. Social presence ratings were higher for agents high in both behavioral and anthropomorphic realism than for other conditions. We discuss implications for the social influence theory [5] and for the design of AR systems.",
"fno": "767500a041",
"keywords": [
"Human Factors",
"Rendering Computer Graphics",
"Virtual Reality",
"Object Rendering",
"Physical Seat Choice",
"Social Presence Ratings",
"Interpersonal Distance",
"Anthropomorphic Realism",
"Human Shaped Objects",
"Nonhuman Objects",
"Rotation Direction",
"Social Influence Theory",
"Virtual Human",
"AR Renderings",
"Virtual Cubes",
"Virtual Objects",
"Behavioral Realism",
"AR Headset",
"Body Rotation Direction",
"Atmospheric Measurements",
"Rendering Computer Graphics",
"Particle Measurements",
"Software",
"Augmented Reality",
"Rotation Measurement",
"Recruitment",
"Applied Computing",
"Law",
"Social And Behavioral Sciences",
"Psychology",
"Human Centered Computing",
"Human Computer Interaction HCI",
"Interaction Paradigms",
"Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Stanford University",
"fullName": "Hanseul Jun",
"givenName": "Hanseul",
"surname": "Jun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Stanford University",
"fullName": "Jeremy Bailenson",
"givenName": "Jeremy",
"surname": "Bailenson",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "41-44",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7675-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "767500a039",
"articleId": "1pBMfjaOy08",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "767500a045",
"articleId": "1pBMfcK08fu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671762",
"title": "Improving procedural task performance with Augmented Reality annotations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671762/12OmNB7LvHJ",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223377",
"title": "Avatar embodiment realism and virtual fitness training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2010/9343/0/05643560",
"title": "Experiences with an AR evaluation test bed: Presence, performance, and physiological measurement",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2010/05643560/12OmNCmGNZi",
"parentPublication": {
"id": "proceedings/ismar/2010/9343/0",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a251",
"title": "Workshop on enterprise AR adoption obstacles",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a251/12OmNqI04Zv",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a024",
"title": "Measuring Perception of Realism in Mixed and Augmented Reality Summary",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a024/12OmNwErpst",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504731",
"title": "The effect of realism on the virtual hand illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504731/12OmNxu6p9n",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a892",
"title": "Learning Environments in AR: Comparing Tablet and Head-mounted Augmented Reality Devices at Room and Table Scale",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a892/1CJfrHBJ1pC",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049649",
"title": "Comparing the Effects of Visual Realism on Size Perception in VR versus Real World Viewing through Physical and Verbal Judgments",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049649/1KYolXflEWI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797965",
"title": "Danger from the Deep: A Gap Affordance Study in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797965/1cJ0G8LwhhK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a098",
"title": "Food Talks: Visual and Interaction Principles for Representing Environmental and Nutritional Food Information in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a098/1gysj4CL9YI",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1pystLSz19C",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pysvRpTvr2",
"doi": "10.1109/ISMAR50242.2020.00053",
"title": "Enhancing Visitor Experience or Hindering Docent Roles: Attentional Issues in Augmented Reality Supported Installations",
"normalizedTitle": "Enhancing Visitor Experience or Hindering Docent Roles: Attentional Issues in Augmented Reality Supported Installations",
"abstract": "Studies using augmented reality (AR) technology have suggested that users focus excessively on the virtual content in the AR environment at the expense of the physical world around them. This has implications related to the design of installations that aim to incorporate the user's physical environment as part of the AR experience. To better understand how user attention is managed in an AR environment, we present an observational study of Rewild Our Planet, a multi-modal installation that combined video, audio, a human docent and mobile AR to promote awareness about environmental issues. We found that, while AR was successful in engaging visitors, it drew attention away from other modalities within the installation. This impacts the work of the human docent and affects how visitors absorb information presented in the installation. Based on these observations, we present guidelines to inform the design of future AR-supported installations with the aim of minimizing or taking advantage of the observed attentional issues.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Studies using augmented reality (AR) technology have suggested that users focus excessively on the virtual content in the AR environment at the expense of the physical world around them. This has implications related to the design of installations that aim to incorporate the user's physical environment as part of the AR experience. To better understand how user attention is managed in an AR environment, we present an observational study of Rewild Our Planet, a multi-modal installation that combined video, audio, a human docent and mobile AR to promote awareness about environmental issues. We found that, while AR was successful in engaging visitors, it drew attention away from other modalities within the installation. This impacts the work of the human docent and affects how visitors absorb information presented in the installation. Based on these observations, we present guidelines to inform the design of future AR-supported installations with the aim of minimizing or taking advantage of the observed attentional issues.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Studies using augmented reality (AR) technology have suggested that users focus excessively on the virtual content in the AR environment at the expense of the physical world around them. This has implications related to the design of installations that aim to incorporate the user's physical environment as part of the AR experience. To better understand how user attention is managed in an AR environment, we present an observational study of Rewild Our Planet, a multi-modal installation that combined video, audio, a human docent and mobile AR to promote awareness about environmental issues. We found that, while AR was successful in engaging visitors, it drew attention away from other modalities within the installation. This impacts the work of the human docent and affects how visitors absorb information presented in the installation. Based on these observations, we present guidelines to inform the design of future AR-supported installations with the aim of minimizing or taking advantage of the observed attentional issues.",
"fno": "850800a279",
"keywords": [
"Augmented Reality",
"User Interfaces",
"Visitor Experience",
"Hindering Docent Roles",
"Augmented Reality Supported Installations",
"Virtual Content",
"AR Environment",
"Physical World",
"AR Experience",
"User Attention",
"Rewild Our Planet",
"Multimodal Installation",
"Human Docent",
"Environmental Issues",
"Observed Attentional Issues",
"AR Supported Installations",
"Augmented Reality Technology",
"User Physical Environment",
"Mobile AR",
"Planets",
"User Experience",
"Augmented Reality",
"Guidelines",
"Human Centered Computing",
"Mixed Augmented Reality",
"Human Centered Computing",
"Empirical Studies In HCI",
"Human Centered Computing",
"Field Studies"
],
"authors": [
{
"affiliation": "University of Melbourne,School of Computing and Information Systems",
"fullName": "Brandon Victor Syiem",
"givenName": "Brandon Victor",
"surname": "Syiem",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,School of Computing and Information Systems",
"fullName": "Ryan M. Kelly",
"givenName": "Ryan M.",
"surname": "Kelly",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,School of Computing and Information Systems",
"fullName": "Eduardo Velloso",
"givenName": "Eduardo",
"surname": "Velloso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,School of Computing and Information Systems",
"fullName": "Jorge Goncalves",
"givenName": "Jorge",
"surname": "Goncalves",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Melbourne,School of Computing and Information Systems",
"fullName": "Tilman Dingler",
"givenName": "Tilman",
"surname": "Dingler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "279-288",
"year": "2020",
"issn": "1554-7868",
"isbn": "978-1-7281-8508-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "850800a269",
"articleId": "1pyswIF6obm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "850800a289",
"articleId": "1pysuoUYBhK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vs-games/2013/0965/0/06624246",
"title": "Transferring a Virtual Environment Client Session between Independent Opensimulator Installations",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2013/06624246/12OmNB7cjhk",
"parentPublication": {
"id": "proceedings/vs-games/2013/0965/0",
"title": "2013 5th International Conference on Games and Virtual Worlds for Serious Applications (VS-GAMES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402561",
"title": "Using children's developmental psychology to guide augmented-reality design and usability",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402561/12OmNrIrPhx",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a385",
"title": "Making a Hands-On Display with Augmented Reality Work at a Science Museum",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a385/12OmNwpXRVO",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2002/1862/0/18620553",
"title": "Cybernarium Days 2002 - A Public Experience of Virtual and Augmented Worlds",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2002/18620553/12OmNyUWQXM",
"parentPublication": {
"id": "proceedings/cw/2002/1862/0",
"title": "First International Symposium on Cyber Worlds, 2002. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2001/1007/0/10070005",
"title": "Multiple Conceptions of Character-Based Interactive Installations",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2001/10070005/12OmNyY4rmP",
"parentPublication": {
"id": "proceedings/cgi/2001/1007/0",
"title": "Proceedings. Computer Graphics International 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2012/4663/0/06483990",
"title": "The augmented painting: Playful interaction with multi-spectral images",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2012/06483990/12OmNzahbVl",
"parentPublication": {
"id": "proceedings/ismar-amh/2012/4663/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imet/2022/7016/0/09929344",
"title": "The Ledra Palace project: Using emerging technologies to communicate exhibition content-Evaluation of results",
"doi": null,
"abstractUrl": "/proceedings-article/imet/2022/09929344/1HYuVEEo1ck",
"parentPublication": {
"id": "proceedings/imet/2022/7016/0",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2021/2463/0/246300b873",
"title": "Design of District-level Photovoltaic Installations for Optimal Power Production and Economic Benefit",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2021/246300b873/1wLcsG3nhTO",
"parentPublication": {
"id": "proceedings/compsac/2021/2463/0",
"title": "2021 IEEE 45th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a008",
"title": "Visitor Artwork Ambient and how Making New Functions of Cultural Heritage by Using Augmented Reality within an Ambient Intelligence",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a008/1yeQP7ueE0g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccst/2021/4254/0/425400a034",
"title": "Current Status and Prospects of Mobile AR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/iccst/2021/425400a034/1ziP9KHT7Vu",
"parentPublication": {
"id": "proceedings/iccst/2021/4254/0",
"title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBgQFM5",
"title": "Proceedings of 11th IEEE Workshop on Real-Time Operating Systems and Software",
"acronym": "rtoss",
"groupId": "1001975",
"volume": "0",
"displayVolume": "0",
"year": "1994",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBubOVB",
"doi": "10.1109/RTOSS.1994.292568",
"title": "An end-to-end approach to schedule tasks with shared resources in multiprocessor systems",
"normalizedTitle": "An end-to-end approach to schedule tasks with shared resources in multiprocessor systems",
"abstract": "We propose an end-to-end approach to scheduling tasks that share resources in a multiprocessor or distributed systems. In our approach, each task is mapped into a chain of subtasks, depending on its resource accesses. After each subtask is assigned a proper priority, its worst-case response time can be bounded. Consequently the worst-case response time of each task can be obtained and the schedulability of each task can be verified by comparing the worst-case response time with its relative deadline.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose an end-to-end approach to scheduling tasks that share resources in a multiprocessor or distributed systems. In our approach, each task is mapped into a chain of subtasks, depending on its resource accesses. After each subtask is assigned a proper priority, its worst-case response time can be bounded. Consequently the worst-case response time of each task can be obtained and the schedulability of each task can be verified by comparing the worst-case response time with its relative deadline.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose an end-to-end approach to scheduling tasks that share resources in a multiprocessor or distributed systems. In our approach, each task is mapped into a chain of subtasks, depending on its resource accesses. After each subtask is assigned a proper priority, its worst-case response time can be bounded. Consequently the worst-case response time of each task can be obtained and the schedulability of each task can be verified by comparing the worst-case response time with its relative deadline.",
"fno": "00292568",
"keywords": [
"Scheduling",
"Synchronisation",
"Multiprocessing Systems",
"Real Time Systems",
"Concurrency Control",
"Operating Systems Computers",
"End To End Approach",
"Scheduling",
"Shared Resources",
"Multiprocessor Systems",
"Distributed Systems",
"Subtask Chain",
"Resource Accesses",
"Worst Case Response Time",
"Deadline",
"Real Time Systems",
"Multiprocessing Systems",
"Processor Scheduling",
"Access Protocols",
"Sun",
"Computer Science",
"Real Time Systems",
"Delay Effects"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., Illinois Univ., Urbana, IL, USA",
"fullName": "Jun Sun",
"givenName": null,
"surname": "Jun Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Illinois Univ., Urbana, IL, USA",
"fullName": "R. Bettati",
"givenName": "R.",
"surname": "Bettati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., Illinois Univ., Urbana, IL, USA",
"fullName": "J.W.-S. Liu",
"givenName": "J.W.-S.",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "rtoss",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1994-01-01T00:00:00",
"pubType": "proceedings",
"pages": "18,19,20,21,22",
"year": "1994",
"issn": null,
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00292567",
"articleId": "12OmNCmpcKV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00292569",
"articleId": "12OmNCeaPVZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/infcom/1999/5417/1/00749305",
"title": "Minimizing end-to-end delay in high-speed networks with a simple coordinated schedule",
"doi": null,
"abstractUrl": "/proceedings-article/infcom/1999/00749305/12OmNBhZ4i0",
"parentPublication": {
"id": "proceedings/infcom/1999/5417/2",
"title": "Proceedings of INFOCOM'99: Conference on Computer Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rtcsa/2014/3953/0/06910543",
"title": "Improving the response time analysis of global fixed-priority multiprocessor scheduling",
"doi": null,
"abstractUrl": "/proceedings-article/rtcsa/2014/06910543/12OmNBmf3ak",
"parentPublication": {
"id": "proceedings/rtcsa/2014/3953/0",
"title": "2014 IEEE 20th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2009/3781/0/05090720",
"title": "Response-time analysis of arbitrarily activated tasks in multiprocessor systems with shared resources",
"doi": null,
"abstractUrl": "/proceedings-article/date/2009/05090720/12OmNBqv29K",
"parentPublication": {
"id": "proceedings/date/2009/3781/0",
"title": "2009 Design, Automation & Test in Europe Conference & Exhibition (DATE'09)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rtas/2010/6690/0/05465963",
"title": "Scheduling Suspendable, Pipelined Tasks with Non-Preemptive Sections in Soft Real-Time Multiprocessor Systems",
"doi": null,
"abstractUrl": "/proceedings-article/rtas/2010/05465963/12OmNrkT7HP",
"parentPublication": {
"id": "proceedings/rtas/2010/6690/0",
"title": "2010 16th IEEE Real-Time and Embedded Technology and Applications Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/real/1989/2004/0/00063568",
"title": "Application of real-time monitoring to scheduling tasks with random execution times",
"doi": null,
"abstractUrl": "/proceedings-article/real/1989/00063568/12OmNwHQBaQ",
"parentPublication": {
"id": "proceedings/real/1989/2004/0",
"title": "1989 Real-Time Systems Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/spdp/1995/7195/0/71950312",
"title": "Wait-free consensus in \"in-phase\" multiprocessor systems",
"doi": null,
"abstractUrl": "/proceedings-article/spdp/1995/71950312/12OmNxj23bF",
"parentPublication": {
"id": "proceedings/spdp/1995/7195/0",
"title": "Parallel and Distributed Processing, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/real/1993/4480/0/00393503",
"title": "MOCA: A multiprocessor on-line competitive algorithm for real-time system scheduling",
"doi": null,
"abstractUrl": "/proceedings-article/real/1993/00393503/12OmNy4r3Tm",
"parentPublication": {
"id": "proceedings/real/1993/4480/0",
"title": "1993 Proceedings Real-Time Systems Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wpdrts/1995/7099/0/70990091",
"title": "Bounding the end-to-end response time in multiprocessor real-time systems",
"doi": null,
"abstractUrl": "/proceedings-article/wpdrts/1995/70990091/12OmNzC5TmN",
"parentPublication": {
"id": "proceedings/wpdrts/1995/7099/0",
"title": "Parallel and Distributed Real-Time Systems, Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/1986/05/01676781",
"title": "Scheduling Multiprocessor Tasks to Minimize Schedule Length",
"doi": null,
"abstractUrl": "/journal/tc/1986/05/01676781/13rRUxAStZO",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/1993/04/l0382",
"title": "Resource Reclaiming in Multiprocessor Real-Time Systems",
"doi": null,
"abstractUrl": "/journal/td/1993/04/l0382/13rRUxASugX",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKire",
"title": "2018 IEEE/ACM Symposium on Edge Computing (SEC)",
"acronym": "sec",
"groupId": "1816984",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WZZ7BS",
"doi": "10.1109/SEC.2018.00043",
"title": "A Heuristic Algorithm Based on Resource Requirements Forecasting for Server Placement in Edge Computing",
"normalizedTitle": "A Heuristic Algorithm Based on Resource Requirements Forecasting for Server Placement in Edge Computing",
"abstract": "The placement of edge computing server is the key to the rapid development of edge computing. We propose prediction-mapping-optimization heuristic based on resource requirements forecasting for server placement in edge computing. Through this algorithm, we divide the task into multiple subtasks, and then realize the mapping of subtask-location of server, and finish the information interaction between the servers and the data source through the data naming mechanism proposed by us. With the goal of the lowest cost of service providers, we propose a cross-region resource optimization model and obtained the final server placement strategy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The placement of edge computing server is the key to the rapid development of edge computing. We propose prediction-mapping-optimization heuristic based on resource requirements forecasting for server placement in edge computing. Through this algorithm, we divide the task into multiple subtasks, and then realize the mapping of subtask-location of server, and finish the information interaction between the servers and the data source through the data naming mechanism proposed by us. With the goal of the lowest cost of service providers, we propose a cross-region resource optimization model and obtained the final server placement strategy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The placement of edge computing server is the key to the rapid development of edge computing. We propose prediction-mapping-optimization heuristic based on resource requirements forecasting for server placement in edge computing. Through this algorithm, we divide the task into multiple subtasks, and then realize the mapping of subtask-location of server, and finish the information interaction between the servers and the data source through the data naming mechanism proposed by us. With the goal of the lowest cost of service providers, we propose a cross-region resource optimization model and obtained the final server placement strategy.",
"fno": "944500a354",
"keywords": [
"Cloud Computing",
"File Servers",
"Optimisation",
"Resource Allocation",
"Heuristic Algorithm",
"Resource Requirements Forecasting",
"Edge Computing Server",
"Prediction Mapping Optimization",
"Cross Region Resource Optimization Model",
"Final Server Placement Strategy",
"Information Interaction",
"Data Naming Mechanism",
"Servers",
"Edge Computing",
"Optimization",
"Heuristic Algorithms",
"Forecasting",
"Prediction Algorithms",
"Task Analysis",
"Edge Computing Server Placement NDN Resource Optimization"
],
"authors": [
{
"affiliation": null,
"fullName": "Kaile Xiao",
"givenName": "Kaile",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhipeng Gao",
"givenName": "Zhipeng",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Qian Wang",
"givenName": "Qian",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yang Yang",
"givenName": "Yang",
"surname": "Yang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sec",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "354-355",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9445-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "944500a351",
"articleId": "17D45Wuc35Z",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "944500a356",
"articleId": "17D45VsBU4d",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscc/2016/0679/0/07543758",
"title": "Greedy heuristic for replica server placement in Cloud based Content Delivery Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2016/07543758/12OmNC4eSGT",
"parentPublication": {
"id": "proceedings/iscc/2016/0679/0",
"title": "2016 IEEE Symposium on Computers and Communication (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/edge/2018/7238/0/723800a066",
"title": "An Energy-Aware Edge Server Placement Algorithm in Mobile Edge Computing",
"doi": null,
"abstractUrl": "/proceedings-article/edge/2018/723800a066/141AnpAbeCf",
"parentPublication": {
"id": "proceedings/edge/2018/7238/0",
"title": "2018 IEEE International Conference on Edge Computing (EDGE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipccc/2021/4331/0/09679438",
"title": "Data Placement Strategies for Data-Intensive Computing over Edge Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/ipccc/2021/09679438/1AjTtOdezeM",
"parentPublication": {
"id": "proceedings/ipccc/2021/4331/0",
"title": "2021 IEEE International Performance, Computing, and Communications Conference (IPCCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpads/2021/0878/0/087800a923",
"title": "Joint Optimization of Auto-Scaling and Adaptive Service Placement in Edge Computing",
"doi": null,
"abstractUrl": "/proceedings-article/icpads/2021/087800a923/1D4LCurSzxS",
"parentPublication": {
"id": "proceedings/icpads/2021/0878/0",
"title": "2021 IEEE 27th International Conference on Parallel and Distributed Systems (ICPADS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnc/2023/5719/0/10074304",
"title": "On the Placement of Edge Servers in Mobile Edge Computing",
"doi": null,
"abstractUrl": "/proceedings-article/icnc/2023/10074304/1LKwFY0QbE4",
"parentPublication": {
"id": "proceedings/icnc/2023/5719/0",
"title": "2023 International Conference on Computing, Networking and Communications (ICNC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2019/2519/0/251900b029",
"title": "Efficient Data Placement and Retrieval Services in Edge Computing",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2019/251900b029/1ezROo6nJUk",
"parentPublication": {
"id": "proceedings/icdcs/2019/2519/0",
"title": "2019 IEEE 39th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/cc/2022/02/09001206",
"title": "A Dynamic Deep-Learning-Based Virtual Edge Node Placement Scheme for Edge Cloud Systems in Mobile Environment",
"doi": null,
"abstractUrl": "/journal/cc/2022/02/09001206/1hwt0N6nRWo",
"parentPublication": {
"id": "trans/cc",
"title": "IEEE Transactions on Cloud Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2020/6876/0/09139826",
"title": "Robust Server Placement for Edge Computing",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2020/09139826/1lss5Cf5br2",
"parentPublication": {
"id": "proceedings/ipdps/2020/6876/0",
"title": "2020 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccgrid/2020/6095/0/09139690",
"title": "Robustness-oriented k Edge Server Placement",
"doi": null,
"abstractUrl": "/proceedings-article/ccgrid/2020/09139690/1lssszVmsxi",
"parentPublication": {
"id": "proceedings/ccgrid/2020/6095/0",
"title": "2020 20th IEEE/ACM International Symposium on Cluster, Cloud and Internet Computing (CCGRID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2021/06/09301260",
"title": "Distributed and Dynamic Service Placement in Pervasive Edge Computing Networks",
"doi": null,
"abstractUrl": "/journal/td/2021/06/09301260/1pK113ngIsU",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJe0tUBKU0",
"doi": "10.1109/VRW55335.2022.00121",
"title": "A Testbed for Exploring Multi-Level Precueing in Augmented Reality",
"normalizedTitle": "A Testbed for Exploring Multi-Level Precueing in Augmented Reality",
"abstract": "Precueing information about upcoming subtasks prior to performing them has the potential to make an entire task faster and easier to accomplish than cueing only the current subtask. Most AR and VR research on precueing has addressed path-following tasks requiring simple actions at a series of locations, such as pushing a button or just visiting that location. We present a testbed for exploring multi-level precueing in a richer task that requires the user to move their hand between specified locations, transporting an object between some of them, and rotating it to a designated orientation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Precueing information about upcoming subtasks prior to performing them has the potential to make an entire task faster and easier to accomplish than cueing only the current subtask. Most AR and VR research on precueing has addressed path-following tasks requiring simple actions at a series of locations, such as pushing a button or just visiting that location. We present a testbed for exploring multi-level precueing in a richer task that requires the user to move their hand between specified locations, transporting an object between some of them, and rotating it to a designated orientation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Precueing information about upcoming subtasks prior to performing them has the potential to make an entire task faster and easier to accomplish than cueing only the current subtask. Most AR and VR research on precueing has addressed path-following tasks requiring simple actions at a series of locations, such as pushing a button or just visiting that location. We present a testbed for exploring multi-level precueing in a richer task that requires the user to move their hand between specified locations, transporting an object between some of them, and rotating it to a designated orientation.",
"fno": "840200a540",
"keywords": [
"Augmented Reality",
"Multilevel Precueing",
"Augmented Reality",
"VR Research",
"Path Following Tasks",
"Three Dimensional Displays",
"Conferences",
"User Interfaces",
"Task Analysis",
"Augmented Reality",
"Human Centered Computing X 2014 Human Computer Interaction HCI X 2014 Interaction Paradigms X 2014 Mixed Augmented Reality",
"Computing Methodologies X 2014 Computer Graphics X 2014 Graphics Systems And Interfaces X 2014 Perception"
],
"authors": [
{
"affiliation": "Columbia University,Department of Computer Science",
"fullName": "Jen-Shuo Liu",
"givenName": "Jen-Shuo",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Teachers College, Columbia University,Department of Human Development",
"fullName": "Barbara Tversky",
"givenName": "Barbara",
"surname": "Tversky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Columbia University,Department of Computer Science",
"fullName": "Steven Feiner",
"givenName": "Steven",
"surname": "Feiner",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "540-541",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJe05WVg6k",
"name": "pvrw202284020-09757378s1-mm_840200a540.zip",
"size": "41.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757378s1-mm_840200a540.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a538",
"articleId": "1CJf9GYjHMc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a542",
"articleId": "1CJcAVYrJew",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2012/4660/0/06402553",
"title": "Subtle cueing for visual search in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402553/12OmNqOffz7",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892383",
"title": "Gesture-based augmented reality annotation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892383/12OmNwJPMYX",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a004",
"title": "Visual Subliminal Cues for Spatial Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a004/12OmNyp9MgE",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446396",
"title": "Attention Guiding Using Augmented Reality in Complex Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446396/13bd1fdV4l1",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2013/06/mic2013060066",
"title": "Augmented Reality Interfaces",
"doi": null,
"abstractUrl": "/magazine/ic/2013/06/mic2013060066/13rRUIJcWhZ",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/06/07435333",
"title": "Towards Pervasive Augmented Reality: Context-Awareness in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2017/06/07435333/13rRUwfZBVq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/10/ttg2011101355",
"title": "Exploring the Benefits of Augmented Reality Documentation for Maintenance and Repair",
"doi": null,
"abstractUrl": "/journal/tg/2011/10/ttg2011101355/13rRUxly8XD",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/11/08493594",
"title": "A Comparison of Predictive Spatial Augmented Reality Cues for Procedural Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2018/11/08493594/14M3DYV3qyA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a187",
"title": "Exploring Cultural Heritage in Augmented Reality with GoFind!",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a187/17D45Xq6dD0",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874255",
"title": "Precueing Object Placement and Orientation for Manual Tasks in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874255/1GjwLnkmt8I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7Wic8zpC0",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00213",
"title": "Collaborative manual tasks in distributed virtual environments",
"normalizedTitle": "Collaborative manual tasks in distributed virtual environments",
"abstract": "We often encounter many situations where we manually interact with others collaboratively. This allows us to solve tasks that we would otherwise not be able to do on our own. Handover tasks are an everyday interaction that we perform with others without thinking about it. However, these tasks are complex coordinated actions between two actors based on sensory feedback that are per-formed in a very short time. In this work, an overview of handover tasks will be provided from a perspective of the interdisciplinary fields of human-robot interaction (HRI), neuroscience, cognitive science and human-computer interaction (HCI). For this purpose, the structure and process of a handover interaction will be examined and the current state of research will be reflected. The ability to represent realistic manual interactions in an immersive collaborative scenario enables the simulation and training of complex workflows in a realistic context. The study design presented in this paper is intended to improve the understanding and design of handover inter-action techniques in virtual environments. Furthermore possibilities to substitute missing haptic feedback with other sensory stimuli will be evaluated.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We often encounter many situations where we manually interact with others collaboratively. This allows us to solve tasks that we would otherwise not be able to do on our own. Handover tasks are an everyday interaction that we perform with others without thinking about it. However, these tasks are complex coordinated actions between two actors based on sensory feedback that are per-formed in a very short time. In this work, an overview of handover tasks will be provided from a perspective of the interdisciplinary fields of human-robot interaction (HRI), neuroscience, cognitive science and human-computer interaction (HCI). For this purpose, the structure and process of a handover interaction will be examined and the current state of research will be reflected. The ability to represent realistic manual interactions in an immersive collaborative scenario enables the simulation and training of complex workflows in a realistic context. The study design presented in this paper is intended to improve the understanding and design of handover inter-action techniques in virtual environments. Furthermore possibilities to substitute missing haptic feedback with other sensory stimuli will be evaluated.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We often encounter many situations where we manually interact with others collaboratively. This allows us to solve tasks that we would otherwise not be able to do on our own. Handover tasks are an everyday interaction that we perform with others without thinking about it. However, these tasks are complex coordinated actions between two actors based on sensory feedback that are per-formed in a very short time. In this work, an overview of handover tasks will be provided from a perspective of the interdisciplinary fields of human-robot interaction (HRI), neuroscience, cognitive science and human-computer interaction (HCI). For this purpose, the structure and process of a handover interaction will be examined and the current state of research will be reflected. The ability to represent realistic manual interactions in an immersive collaborative scenario enables the simulation and training of complex workflows in a realistic context. The study design presented in this paper is intended to improve the understanding and design of handover inter-action techniques in virtual environments. Furthermore possibilities to substitute missing haptic feedback with other sensory stimuli will be evaluated.",
"fno": "536500a950",
"keywords": [
"Force Feedback",
"Groupware",
"Haptic Interfaces",
"Human Computer Interaction",
"Human Robot Interaction",
"Virtual Reality",
"Cognitive Science",
"Collaborative Manual Tasks",
"Complex Coordinated Actions",
"Complex Workflows",
"Distributed Virtual Environments",
"Everyday Interaction",
"Handover Interaction",
"Handover Interaction Techniques",
"Handover Tasks",
"HCI",
"HRI",
"Human Computer Interaction",
"Human Robot Interaction",
"Immersive Collaborative Scenario",
"Missing Haptic Feedback",
"Neuroscience",
"Realistic Manual Interactions",
"Sensory Feedback",
"Sensory Stimuli",
"Human Computer Interaction",
"Training",
"Neuroscience",
"Virtual Environments",
"Human Robot Interaction",
"Collaboration",
"Manuals",
"Human Centered Computing",
"Visualization",
"Visualization Techniques",
"Visualization Design And Evaluation Methods"
],
"authors": [
{
"affiliation": "University of Applied Science Berlin (HTW)",
"fullName": "Sebastian Keppler",
"givenName": "Sebastian",
"surname": "Keppler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Applied Science Berlin (HTW)",
"fullName": "Johann Habakuk Israel",
"givenName": "Johann",
"surname": "Habakuk Israel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Berlin (TU Berlin)",
"fullName": "Eva Wiese",
"givenName": "Eva",
"surname": "Wiese",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "950-953",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a946",
"articleId": "1J7Wc1Fe2cg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a954",
"articleId": "1J7W7ejQ5m8",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2018/6049/0/604901a395",
"title": "Manual Assembly Training in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a395/12OmNBKEyqp",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1995/7084/0/70840148",
"title": "Interacting in distributed collaborative virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840148/12OmNvAAto5",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscwd/2005/0002/1/01504112",
"title": "Agent-based interaction model for collaborative virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/cscwd/2005/01504112/12OmNvT2oOz",
"parentPublication": {
"id": "proceedings/cscwd/2005/0002/1",
"title": "International Conference on Computer Supported Cooperative Work in Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2006/0225/0/02250103",
"title": "Towards a General Model for Selection in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2006/02250103/12OmNwF0BWC",
"parentPublication": {
"id": "proceedings/3dui/2006/0225/0",
"title": "3D User Interfaces (3DUI'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223467",
"title": "Can living in virtual environments alter reality?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223467/12OmNx0RIYM",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dcve/2014/5217/0/07160928",
"title": "A survey of communication and awareness in collaborative virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dcve/2014/07160928/12OmNxGj9Sa",
"parentPublication": {
"id": "proceedings/3dcve/2014/5217/0",
"title": "2014 International Workshop on Collaborative Virtual Environments (3DCVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2013/4953/0/4953a764",
"title": "Scheduling of Sporadic Tasks with Deadline Constrains in Cloud Environments",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2013/4953a764/12OmNxVlTDp",
"parentPublication": {
"id": "proceedings/aina/2013/4953/0",
"title": "2013 IEEE 27th International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504709",
"title": "The effect of multi-sensory cues on performance and experience during walking in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504709/12OmNyrqzC0",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892311",
"title": "Hand gesture controls for image categorization in immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892311/12OmNyrqzs0",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089442",
"title": "Effects of Virtual Hand Representation on Interaction and Embodiment in HMD-based Virtual Environments Using Controllers",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089442/1jIxe7ldiE0",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1J7W6LmbCw0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "9973799",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J7WoEKP7vq",
"doi": "10.1109/ISMAR-Adjunct57072.2022.00101",
"title": "Press the red button: A user study comparing notification placement with augmented and non-augmented tasks in AR",
"normalizedTitle": "Press the red button: A user study comparing notification placement with augmented and non-augmented tasks in AR",
"abstract": "Visual notifications are omnipresent in applications ranging from smart phones to Virtual Reality (VR) and Augmented Reality (AR) systems. However notifications can cause disruptive effects on task performance and different notification placements have been shown to have an influence on response times, as well as e.g. on user per-ceived intrusiveness and disruptiveness. We investigated the effects and impacts of four visual notification types in AR environments where a card game task was performed in AR or the real world. In a user study, we interrupted the execution of the main task with one of the AR notification types.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual notifications are omnipresent in applications ranging from smart phones to Virtual Reality (VR) and Augmented Reality (AR) systems. However notifications can cause disruptive effects on task performance and different notification placements have been shown to have an influence on response times, as well as e.g. on user per-ceived intrusiveness and disruptiveness. We investigated the effects and impacts of four visual notification types in AR environments where a card game task was performed in AR or the real world. In a user study, we interrupted the execution of the main task with one of the AR notification types.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual notifications are omnipresent in applications ranging from smart phones to Virtual Reality (VR) and Augmented Reality (AR) systems. However notifications can cause disruptive effects on task performance and different notification placements have been shown to have an influence on response times, as well as e.g. on user per-ceived intrusiveness and disruptiveness. We investigated the effects and impacts of four visual notification types in AR environments where a card game task was performed in AR or the real world. In a user study, we interrupted the execution of the main task with one of the AR notification types.",
"fno": "536500a483",
"keywords": [
"Augmented Reality",
"Computer Games",
"Human Computer Interaction",
"Interactive Systems",
"Smart Phones",
"Virtual Reality",
"AR Notification Types",
"Card Game Task",
"Different Notification Placements",
"Disruptive Effects",
"Disruptiveness",
"Intrusiveness",
"Nonaugmented Tasks",
"Red Button",
"Response Times",
"Smart Phones",
"Task Performance",
"User Study Comparing Notification Placement",
"Visual Notification Types",
"Visual Notifications",
"Visualization",
"Presses",
"Games",
"Distance Measurement",
"Time Factors",
"Task Analysis",
"Augmented Reality"
],
"authors": [
{
"affiliation": "University of Würzburg,HCI Group",
"fullName": "Lucas Plabst",
"givenName": "Lucas",
"surname": "Plabst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Colorado State University,Computer Science NUILAB",
"fullName": "Sebastian Oberdörfe",
"givenName": "Sebastian",
"surname": "Oberdörfe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hochschule Fulda",
"fullName": "Francisco Ortega",
"givenName": "Francisco",
"surname": "Ortega",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hochschule Fulda",
"fullName": "Florian Niebling",
"givenName": "Florian",
"surname": "Niebling",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "483-484",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5365-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "536500a477",
"articleId": "1J7WkpqbbYA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "536500a485",
"articleId": "1J7Wb7HpqbC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671762",
"title": "Improving procedural task performance with Augmented Reality annotations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671762/12OmNB7LvHJ",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wf-iot/2014/3459/0/06803144",
"title": "Short paper: Calory Battle AR: An extensible mobile augmented reality exergame platform",
"doi": null,
"abstractUrl": "/proceedings-article/wf-iot/2014/06803144/12OmNx9nGF5",
"parentPublication": {
"id": "proceedings/wf-iot/2014/3459/0",
"title": "2014 IEEE World Forum on Internet of Things (WF-IoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402590",
"title": "Why should my students use AR? A comparative review of the educational impacts of augmented-reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402590/12OmNxd4txi",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a012",
"title": "Depth Perception and Action in Wearable Augmented Reality: A Pilot Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a012/12OmNzd7bXH",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2008/04/mcg2008040040",
"title": "Toward Next-Gen Mobile AR Games",
"doi": null,
"abstractUrl": "/magazine/cg/2008/04/mcg2008040040/13rRUxASujW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a584",
"title": "Investigating Display Position of a Head-Fixed Augmented Reality Notification for Dual-task",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a584/1CJd297BiDu",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a686",
"title": "Exploring Augmented Reality Notification Placement while Communicating with Virtual Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a686/1J7WgWfFoOs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a519",
"title": "Exploring the Effects of Augmented Reality Notification Type and Placement in AR HMD while Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a519/1MNgzTvjRsI",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a034",
"title": "AR Tips: Augmented First-Person View Task Instruction Videos",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a034/1gysm0mzZlK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a717",
"title": "[DC] Glanceable AR: Towards an Always-on Augmented Reality Future",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a717/1tnXrUsEHYc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1cJ7xyunODu",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ7y1sjSBG",
"doi": "10.1109/SVR.2018.00031",
"title": "Augmented Reality Owner Manual Evaluation by NASA TLX Method",
"normalizedTitle": "Augmented Reality Owner Manual Evaluation by NASA TLX Method",
"abstract": "Information is indispensable for any products and, likewise, the users expect to use all the resources offered by an artifact. In this way, the instruction manual is necessary for every product to be supplied. In turn, the building use, operation, and maintenance manual, also known as the building owner manual (BOM) is governed by rules that guide its creation, such as, items and definitions that must be followed. The building owner manual has been updated over time, but its format has not been modified. Such manuals are in most cases presented in the textual format with technical terms. A potential remedy to this situation is to enhance the traditional building owner manual format using Augmented Reality (AR) that promotes new forms of interaction between the user and the BOM. In a nutshell, AR enables the displaying of virtual (computer generated) graphics overlaid on views of the real environment. In this way, this paper is part of a Ph.D. research that aims to optimize the performance of the building owner manual through AR. Thus, the objective of this research is to evaluate the incorporation of Augmented Reality features into the BOM in order to qualify its use. The methodology adopted in this work follows the Design Science Research approach. The contribution granted presents models of insertion of the Augmented Reality technology in the building owners manual and qualifies the incorporation of the AR, through experiments with measurement method following NASA TLX protocol. This innovative study brings to the building owners manual advances in its use in terms of visualization, instruction and assembly in a qualified way, explained by the workload measured by NASA TLX method. In short, it was proved that the insertion of AR technology, regardless of the format acts favorably integrated with the BOM, optimizing its orientation role and recommending the best use of the building.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Information is indispensable for any products and, likewise, the users expect to use all the resources offered by an artifact. In this way, the instruction manual is necessary for every product to be supplied. In turn, the building use, operation, and maintenance manual, also known as the building owner manual (BOM) is governed by rules that guide its creation, such as, items and definitions that must be followed. The building owner manual has been updated over time, but its format has not been modified. Such manuals are in most cases presented in the textual format with technical terms. A potential remedy to this situation is to enhance the traditional building owner manual format using Augmented Reality (AR) that promotes new forms of interaction between the user and the BOM. In a nutshell, AR enables the displaying of virtual (computer generated) graphics overlaid on views of the real environment. In this way, this paper is part of a Ph.D. research that aims to optimize the performance of the building owner manual through AR. Thus, the objective of this research is to evaluate the incorporation of Augmented Reality features into the BOM in order to qualify its use. The methodology adopted in this work follows the Design Science Research approach. The contribution granted presents models of insertion of the Augmented Reality technology in the building owners manual and qualifies the incorporation of the AR, through experiments with measurement method following NASA TLX protocol. This innovative study brings to the building owners manual advances in its use in terms of visualization, instruction and assembly in a qualified way, explained by the workload measured by NASA TLX method. In short, it was proved that the insertion of AR technology, regardless of the format acts favorably integrated with the BOM, optimizing its orientation role and recommending the best use of the building.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Information is indispensable for any products and, likewise, the users expect to use all the resources offered by an artifact. In this way, the instruction manual is necessary for every product to be supplied. In turn, the building use, operation, and maintenance manual, also known as the building owner manual (BOM) is governed by rules that guide its creation, such as, items and definitions that must be followed. The building owner manual has been updated over time, but its format has not been modified. Such manuals are in most cases presented in the textual format with technical terms. A potential remedy to this situation is to enhance the traditional building owner manual format using Augmented Reality (AR) that promotes new forms of interaction between the user and the BOM. In a nutshell, AR enables the displaying of virtual (computer generated) graphics overlaid on views of the real environment. In this way, this paper is part of a Ph.D. research that aims to optimize the performance of the building owner manual through AR. Thus, the objective of this research is to evaluate the incorporation of Augmented Reality features into the BOM in order to qualify its use. The methodology adopted in this work follows the Design Science Research approach. The contribution granted presents models of insertion of the Augmented Reality technology in the building owners manual and qualifies the incorporation of the AR, through experiments with measurement method following NASA TLX protocol. This innovative study brings to the building owners manual advances in its use in terms of visualization, instruction and assembly in a qualified way, explained by the workload measured by NASA TLX method. In short, it was proved that the insertion of AR technology, regardless of the format acts favorably integrated with the BOM, optimizing its orientation role and recommending the best use of the building.",
"fno": "060400a150",
"keywords": [
"Augmented Reality",
"User Manuals",
"NASA TLX Method",
"BOM",
"Instruction Manual",
"Maintenance Manual",
"Building Owner Manual",
"Augmented Reality Owner Manual Evaluation",
"AR",
"Manuals",
"Visualization",
"Augmented Reality",
"NASA",
"Buildings",
"Mars",
"Bills Of Materials",
"Augmented Reality",
"NASA TLX",
"Maintenance",
"Prototype",
"Building Owner Manual"
],
"authors": [
{
"affiliation": "Universidade Estadual de Campinas",
"fullName": "Lorena Moreira",
"givenName": "Lorena",
"surname": "Moreira",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade Estadual de Campinas",
"fullName": "Regina Ruschel",
"givenName": "Regina",
"surname": "Ruschel",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "150-156",
"year": "2018",
"issn": null,
"isbn": "978-1-7281-0604-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "060400a089",
"articleId": "1cJ7xKWb9iU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "060400a099",
"articleId": "1cJ7zIwsoEg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2008/1971/0/04480755",
"title": "Augmented Reality for Industrial Building Acceptance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480755/12OmNwc3wyn",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948517",
"title": "Collaboration in mediated and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948517/12OmNy6HQPU",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a001",
"title": "Collaboration in Mediated and Augmented Reality (CiMAR) Summary",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a001/12OmNybfqVO",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/07/07935524",
"title": "Handheld Guides in Inspection Tasks: Augmented Reality versus Picture",
"doi": null,
"abstractUrl": "/journal/tg/2018/07/07935524/13rRUwIF6lc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a872",
"title": "Towards Retargetable Animations for Industrial Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a872/1CJddfDEu1W",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874255",
"title": "Precueing Object Placement and Orientation for Manual Tasks in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874255/1GjwLnkmt8I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a558",
"title": "ATOFIS, an AR Training System for Manual Assembly: A Full Comparative Evaluation against Guides",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a558/1JrRgTi23y8",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a004",
"title": "A Scalable and Long-Term Wearable Augmented Reality System for Order Picking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a004/1gysmqM7SJW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a179",
"title": "Augmented Reality for Manual Assembly in Industry 4.0: Gathering Guidelines",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a179/1oZBDyQMM6c",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a189",
"title": "Manual PCB assembly using Augmented Reality towards Total Quality",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a189/1oZBzP5SgGk",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1oZBzHKi4UM",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1oZBDyQMM6c",
"doi": "10.1109/SVR51698.2020.00039",
"title": "Augmented Reality for Manual Assembly in Industry 4.0: Gathering Guidelines",
"normalizedTitle": "Augmented Reality for Manual Assembly in Industry 4.0: Gathering Guidelines",
"abstract": "The manufacturing industry always strugle to maintain competitiveness and lowering its production costs. With the significant improvement in the reliability and stability of facilities, human error has become one of the most critical factors for quality assurance. About 70% to 90% of quality defects in assembly production systems are directly or indirectly caused by human errors. In the last decades, the interest in using Augmented Reality (AR) in many application areas, including the industry, has increased. Although AR is recognized as an important technology, there is an evident gap in design guidelines and methodologies in this field. The objective of this paper is to find guidelines to help in designing AR-based manual assembly systems. We did a systematic literature mapping over scientific review papers, from 2015 to 2020, and found 14 review papers. From 14 papers, 11 of them (78,6%) presented guidelines topics. The guidelines found were classified into four groups: usability, cognitive, ergonomics, and corporate-related. The suggested group of general guidelines based on previous work may be useful as a starting point when designing AR-based manual assembly systems. We conclude that a comprehensive set, that encompasses general and specific guidelines, depends on the characteristics of the assembly line being implemented. Limitations of this work are the low number of review papers found and the probability of having new guideline categories not found in this paper.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The manufacturing industry always strugle to maintain competitiveness and lowering its production costs. With the significant improvement in the reliability and stability of facilities, human error has become one of the most critical factors for quality assurance. About 70% to 90% of quality defects in assembly production systems are directly or indirectly caused by human errors. In the last decades, the interest in using Augmented Reality (AR) in many application areas, including the industry, has increased. Although AR is recognized as an important technology, there is an evident gap in design guidelines and methodologies in this field. The objective of this paper is to find guidelines to help in designing AR-based manual assembly systems. We did a systematic literature mapping over scientific review papers, from 2015 to 2020, and found 14 review papers. From 14 papers, 11 of them (78,6%) presented guidelines topics. The guidelines found were classified into four groups: usability, cognitive, ergonomics, and corporate-related. The suggested group of general guidelines based on previous work may be useful as a starting point when designing AR-based manual assembly systems. We conclude that a comprehensive set, that encompasses general and specific guidelines, depends on the characteristics of the assembly line being implemented. Limitations of this work are the low number of review papers found and the probability of having new guideline categories not found in this paper.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The manufacturing industry always strugle to maintain competitiveness and lowering its production costs. With the significant improvement in the reliability and stability of facilities, human error has become one of the most critical factors for quality assurance. About 70% to 90% of quality defects in assembly production systems are directly or indirectly caused by human errors. In the last decades, the interest in using Augmented Reality (AR) in many application areas, including the industry, has increased. Although AR is recognized as an important technology, there is an evident gap in design guidelines and methodologies in this field. The objective of this paper is to find guidelines to help in designing AR-based manual assembly systems. We did a systematic literature mapping over scientific review papers, from 2015 to 2020, and found 14 review papers. From 14 papers, 11 of them (78,6%) presented guidelines topics. The guidelines found were classified into four groups: usability, cognitive, ergonomics, and corporate-related. The suggested group of general guidelines based on previous work may be useful as a starting point when designing AR-based manual assembly systems. We conclude that a comprehensive set, that encompasses general and specific guidelines, depends on the characteristics of the assembly line being implemented. Limitations of this work are the low number of review papers found and the probability of having new guideline categories not found in this paper.",
"fno": "923100a179",
"keywords": [
"Assembling",
"Augmented Reality",
"Ergonomics",
"Factory Automation",
"Probability",
"Production Engineering Computing",
"Quality Control",
"User Interfaces",
"Probability",
"Ergonomics",
"Augmented Reality",
"Assembly Production Systems",
"Quality Assurance",
"Reliability",
"Manufacturing Industry 4 0",
"Assembly Line",
"Designing AR Based Manual Assembly Systems",
"Guidelines",
"Augmented Reality",
"Manuals",
"Systematics",
"Task Analysis",
"Maintenance Engineering",
"Training",
"Augmented Reality",
"Design Guidelines",
"Production System",
"Industry 4 0",
"Manual Assembly"
],
"authors": [
{
"affiliation": "Santa Catarina St. University,Electrical Eng. Department,Joinville,Brazil",
"fullName": "Salvador S. Agati",
"givenName": "Salvador S.",
"surname": "Agati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Santa Catarina St. University,Electrical Eng. Department,Joinville,Brazil",
"fullName": "Rudieri D. Bauer",
"givenName": "Rudieri D.",
"surname": "Bauer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Santa Catarina St. University,Computer Sc. Department,Joinville,Brazil",
"fullName": "Marcelo da S. Hounsell",
"givenName": "Marcelo da S.",
"surname": "Hounsell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Santa Catarina St. University,Electrical Eng. Department,Joinville,Brazil",
"fullName": "Aleksander S. Paterno",
"givenName": "Aleksander S.",
"surname": "Paterno",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "179-188",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9231-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "923100a174",
"articleId": "1oZBCuhzaQ8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "923100a189",
"articleId": "1oZBzP5SgGk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100032",
"title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a120",
"title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a120/12OmNAle6zC",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/3941/1/3941a133",
"title": "Key Technique of Assembly System in an Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941a133/12OmNqC2uYI",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/3",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kelvar/2016/2344/0/07563677",
"title": "Towards the development of guidelines for educational evaluation of augmented reality tools",
"doi": null,
"abstractUrl": "/proceedings-article/kelvar/2016/07563677/12OmNzUxOdN",
"parentPublication": {
"id": "proceedings/kelvar/2016/2344/0",
"title": "2016 IEEE Virtual Reality Workshop on K-12 Embodied Learning through Virtual & Augmented Reality (KELVAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a558",
"title": "ATOFIS, an AR Training System for Manual Assembly: A Full Comparative Evaluation against Guides",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a558/1JrRgTi23y8",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a150",
"title": "Augmented Reality Owner Manual Evaluation by NASA TLX Method",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a150/1cJ7y1sjSBG",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a189",
"title": "Manual PCB assembly using Augmented Reality towards Total Quality",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a189/1oZBzP5SgGk",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a279",
"title": "Enhancing Visitor Experience or Hindering Docent Roles: Attentional Issues in Augmented Reality Supported Installations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a279/1pysvRpTvr2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a486",
"title": "Guideline and Tool for Designing an Assembly Task Support System Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a486/1pysyhDXiw0",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a298",
"title": "Problems with Physical Simulation in a Virtual Lego-based Assembly Task using Unity3D Engine",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a298/1qpzCOjJ5e0",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1oZBzHKi4UM",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"acronym": "svr",
"groupId": "1800426",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1oZBzP5SgGk",
"doi": "10.1109/SVR51698.2020.00040",
"title": "Manual PCB assembly using Augmented Reality towards Total Quality",
"normalizedTitle": "Manual PCB assembly using Augmented Reality towards Total Quality",
"abstract": "Despite the fourth wave of industry automation, the so-called Industry 4.0, manual assembly still remains as an important step to many products and is responsible for employing a lot of workers. But manual assembly is prone to errors, is time consuming and boring. One of the enabling technologies of Industry 4.0, Augmented Reality (AR) plays a major role on helping manual assembly by providing on-demand guidance. The technology can also support monitoring features that are compliant to Total Quality Management (TQM) requirements which seeks to minimize errors and maximize quality. This paper reviews the research on using AR to aid manual assembly and presents requirements and the architecture design for an AR system applied to the context of manual assembly of Printed Circuit Boards (PCB). Four device configurations and SDKs were evaluated to be applied in industrial processes of manual assembly of PCB. The paper explores particulars of the task, product and workers towards a novel AR system to fulfill high quality product expectations. This study shows that an AR-Based assembly need not to be expensive in order to be effective, it is shown that every assembly configuration has to be carefully assessed in order to achieve a proper system configuration.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Despite the fourth wave of industry automation, the so-called Industry 4.0, manual assembly still remains as an important step to many products and is responsible for employing a lot of workers. But manual assembly is prone to errors, is time consuming and boring. One of the enabling technologies of Industry 4.0, Augmented Reality (AR) plays a major role on helping manual assembly by providing on-demand guidance. The technology can also support monitoring features that are compliant to Total Quality Management (TQM) requirements which seeks to minimize errors and maximize quality. This paper reviews the research on using AR to aid manual assembly and presents requirements and the architecture design for an AR system applied to the context of manual assembly of Printed Circuit Boards (PCB). Four device configurations and SDKs were evaluated to be applied in industrial processes of manual assembly of PCB. The paper explores particulars of the task, product and workers towards a novel AR system to fulfill high quality product expectations. This study shows that an AR-Based assembly need not to be expensive in order to be effective, it is shown that every assembly configuration has to be carefully assessed in order to achieve a proper system configuration.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Despite the fourth wave of industry automation, the so-called Industry 4.0, manual assembly still remains as an important step to many products and is responsible for employing a lot of workers. But manual assembly is prone to errors, is time consuming and boring. One of the enabling technologies of Industry 4.0, Augmented Reality (AR) plays a major role on helping manual assembly by providing on-demand guidance. The technology can also support monitoring features that are compliant to Total Quality Management (TQM) requirements which seeks to minimize errors and maximize quality. This paper reviews the research on using AR to aid manual assembly and presents requirements and the architecture design for an AR system applied to the context of manual assembly of Printed Circuit Boards (PCB). Four device configurations and SDKs were evaluated to be applied in industrial processes of manual assembly of PCB. The paper explores particulars of the task, product and workers towards a novel AR system to fulfill high quality product expectations. This study shows that an AR-Based assembly need not to be expensive in order to be effective, it is shown that every assembly configuration has to be carefully assessed in order to achieve a proper system configuration.",
"fno": "923100a189",
"keywords": [
"Assembling",
"Augmented Reality",
"Factory Automation",
"Printed Circuits",
"Production Engineering Computing",
"Total Quality Management",
"Augmented Reality",
"Manual PCB Assembly",
"Total Quality Management Requirements",
"Industry Automation",
"Industry 4 0",
"On Demand Guidance",
"Architecture Design",
"Printed Circuit Boards Manual Assembly",
"Industrial Process",
"Total Quality Management",
"Manuals",
"Production",
"Task Analysis",
"Augmented Reality",
"Training",
"Tools",
"Computer Graphics",
"Production Systems",
"System Design",
"Total Quality Management"
],
"authors": [
{
"affiliation": "Universidade do Estado de Santa Catarina - UDESC,Laboratory for Research on Visual Applications - LARVA,Joinville,Brazil",
"fullName": "Rudieri Dietrich Bauer",
"givenName": "Rudieri Dietrich",
"surname": "Bauer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade do Estado de Santa Catarina - UDESC,Laboratory for Research on Visual Applications - LARVA,Joinville,Brazil",
"fullName": "Salvador Sergi Agati",
"givenName": "Salvador Sergi",
"surname": "Agati",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade do Estado de Santa Catarina - UDESC,Laboratory for Research on Visual Applications - LARVA,Joinville,Brazil",
"fullName": "Marcelo dá Silva Hounsell",
"givenName": "Marcelo",
"surname": "dá Silva Hounsell",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universidade do Estado de Santa Catarina - UDESC,Laboratory for Research on Visual Applications - LARVA,Joinville,Brazil",
"fullName": "André Tavares da Silva",
"givenName": "André Tavares",
"surname": "da Silva",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "svr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "189-198",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9231-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "923100a179",
"articleId": "1oZBDyQMM6c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "923100a199",
"articleId": "1oZBDgufHnq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/1999/0210/0/02100032",
"title": "Virtual Reality and Augmented Reality as a Training Tool for Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/iv/1999/02100032/12OmNAObbyR",
"parentPublication": {
"id": "proceedings/iv/1999/0210/0",
"title": "1999 IEEE International Conference on Information Visualization (Cat. No. PR00210)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2009/3791/0/3791a021",
"title": "Assembly Design and Evaluation Based on Bare-Hand Interaction in an Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a021/12OmNC2OSHr",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccms/2010/3941/1/3941a133",
"title": "Key Technique of Assembly System in an Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iccms/2010/3941a133/12OmNqC2uYI",
"parentPublication": {
"id": "proceedings/iccms/2010/3941/3",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a558",
"title": "ATOFIS, an AR Training System for Manual Assembly: A Full Comparative Evaluation against Guides",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a558/1JrRgTi23y8",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2018/0604/0/060400a150",
"title": "Augmented Reality Owner Manual Evaluation by NASA TLX Method",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2018/060400a150/1cJ7y1sjSBG",
"parentPublication": {
"id": "proceedings/svr/2018/0604/0",
"title": "2018 20th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a004",
"title": "A Scalable and Long-Term Wearable Augmented Reality System for Order Picking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a004/1gysmqM7SJW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090576",
"title": "Augmented Reality for the Manufacturing Industry: The Case of an Assembly Assistant",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090576/1jIxw4eZW8M",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090631",
"title": "Framing the Scene: An Examination of Augmented Reality Head Worn Displays in Construction Assembly Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090631/1jIxyGx0KXK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a179",
"title": "Augmented Reality for Manual Assembly in Industry 4.0: Gathering Guidelines",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a179/1oZBDyQMM6c",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a135",
"title": "A User Study on AR-assisted Industrial Assembly",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a135/1pBMl1Z7xw4",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAsTgXa",
"title": "2017 IEEE 17th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNroijg2",
"doi": "10.1109/ICALT.2017.145",
"title": "A Comparison between Oculus Rift and a Low-Cost Smartphone VR Headset: Immersive User Experience and Learning",
"normalizedTitle": "A Comparison between Oculus Rift and a Low-Cost Smartphone VR Headset: Immersive User Experience and Learning",
"abstract": "Differences in technological characteristics between expensive head-mounted displays like Oculus Rift and low-cost mobile-based Virtual Reality (VR) devices may affect the experience of the user and learning in virtual environments with an educational content and therefore are important to be studied. This paper describes a study that aims at finding differences in levels of spatial presence, usability, simulator sickness, satisfaction, workload and learning outcome between Oculus Rift and a low cost smartphone VR Headset, when users interact with an educational virtual environment. Our results do not show differences in the variables studied. It seems that mobile-based VR systems could provide acceptable levels of immersive user experience and contribute to the pedagogical use of VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Differences in technological characteristics between expensive head-mounted displays like Oculus Rift and low-cost mobile-based Virtual Reality (VR) devices may affect the experience of the user and learning in virtual environments with an educational content and therefore are important to be studied. This paper describes a study that aims at finding differences in levels of spatial presence, usability, simulator sickness, satisfaction, workload and learning outcome between Oculus Rift and a low cost smartphone VR Headset, when users interact with an educational virtual environment. Our results do not show differences in the variables studied. It seems that mobile-based VR systems could provide acceptable levels of immersive user experience and contribute to the pedagogical use of VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Differences in technological characteristics between expensive head-mounted displays like Oculus Rift and low-cost mobile-based Virtual Reality (VR) devices may affect the experience of the user and learning in virtual environments with an educational content and therefore are important to be studied. This paper describes a study that aims at finding differences in levels of spatial presence, usability, simulator sickness, satisfaction, workload and learning outcome between Oculus Rift and a low cost smartphone VR Headset, when users interact with an educational virtual environment. Our results do not show differences in the variables studied. It seems that mobile-based VR systems could provide acceptable levels of immersive user experience and contribute to the pedagogical use of VR.",
"fno": "3870a477",
"keywords": [
"Headphones",
"Usability",
"Mobile Communication",
"Planets",
"Extraterrestrial Measurements",
"Virtual Environments",
"Virtual Reality",
"Mobile VR Headset",
"Oculus Rift",
"User Experience",
"Learning"
],
"authors": [
{
"affiliation": null,
"fullName": "Nikiforos M. Papachristos",
"givenName": "Nikiforos M.",
"surname": "Papachristos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Ioannis Vrellis",
"givenName": "Ioannis",
"surname": "Vrellis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tassos A. Mikropoulos",
"givenName": "Tassos A.",
"surname": "Mikropoulos",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "477-481",
"year": "2017",
"issn": "2161-377X",
"isbn": "978-1-5386-3870-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3870a472",
"articleId": "12OmNyL0TxL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3870a482",
"articleId": "12OmNAq3hJR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223356",
"title": "An immersive labyrinth",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223356/12OmNA0vnNl",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07364040",
"title": "Immersive visualization for materials science data analysis using the Oculus Rift",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07364040/12OmNApcu7e",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457c671",
"title": "Position Tracking for Virtual Reality Using Commodity WiFi",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457c671/12OmNC8MsMz",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892261",
"title": "The AR-Rift 2 prototype",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892261/12OmNCcKQmq",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802040",
"title": "Keynote speaker: Digital fear and pain control and the Oculus Rift: SnowWorld, SpiderWorld, and world trade center world",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802040/12OmNCfjeuj",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892331",
"title": "Advertising perception with immersive virtual reality devices",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892331/12OmNvk7JO0",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2016/6117/0/6117a216",
"title": "Going Outside While Staying Inside — Exercise Motivation with Immersive vs. Non–immersive Recreational Virtual Environment Augmentation for Older Adult Nursing Home Residents",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2016/6117a216/12OmNxwncza",
"parentPublication": {
"id": "proceedings/ichi/2016/6117/0",
"title": "2016 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cts/2016/2300/0/07871013",
"title": "Immersive Telerobotics Using the Oculus Rift and the 5DT Ultra Data Glove",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07871013/12OmNzA6GIG",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446076",
"title": "Comparing VR Display with Conventional Displays for User Evaluation Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446076/13bd1gCd7Tr",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577177",
"title": "Influence of hearing your steps and environmental sounds in VR while walking",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577177/17D45XoXP3w",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxETane",
"doi": "10.1109/VR.2017.7892253",
"title": "Lean into it: Exploring leaning-based motion cueing interfaces for virtual reality movement",
"normalizedTitle": "Lean into it: Exploring leaning-based motion cueing interfaces for virtual reality movement",
"abstract": "We describe here a pilot user study comparing five different locomotion interfaces for virtual reality (VR) locomotion. We compared a standard non-motion cueing interface, Joystick, with four leaning-based seated motion-cueing interfaces: NaviChair, MuvMan, Head-Directed and Swivel Chair. The aim of this mixed methods study was to investigate the usability and user experience of each interface, in order to better understand relevant factors and guide the design of future ground-based VR locomotion interfaces. We asked participants to give talk-aloud feedback and simultaneously recorded their responses while they were performing a search task in VR. Afterwards, participants completed an online questionnaire. Although the Joystick was rated as more comfortable and precise than the other interfaces, the leaning-based interfaces showed a trend to provide more enjoyment and a greater sense of self-motion. There were also potential issues of using velocity-control for rotations in leaning-based interfaces when using HMDs instead of stationary displays. Developers need to focus on improving the controllability and perceived safety of these seated motion cueing interfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe here a pilot user study comparing five different locomotion interfaces for virtual reality (VR) locomotion. We compared a standard non-motion cueing interface, Joystick, with four leaning-based seated motion-cueing interfaces: NaviChair, MuvMan, Head-Directed and Swivel Chair. The aim of this mixed methods study was to investigate the usability and user experience of each interface, in order to better understand relevant factors and guide the design of future ground-based VR locomotion interfaces. We asked participants to give talk-aloud feedback and simultaneously recorded their responses while they were performing a search task in VR. Afterwards, participants completed an online questionnaire. Although the Joystick was rated as more comfortable and precise than the other interfaces, the leaning-based interfaces showed a trend to provide more enjoyment and a greater sense of self-motion. There were also potential issues of using velocity-control for rotations in leaning-based interfaces when using HMDs instead of stationary displays. Developers need to focus on improving the controllability and perceived safety of these seated motion cueing interfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe here a pilot user study comparing five different locomotion interfaces for virtual reality (VR) locomotion. We compared a standard non-motion cueing interface, Joystick, with four leaning-based seated motion-cueing interfaces: NaviChair, MuvMan, Head-Directed and Swivel Chair. The aim of this mixed methods study was to investigate the usability and user experience of each interface, in order to better understand relevant factors and guide the design of future ground-based VR locomotion interfaces. We asked participants to give talk-aloud feedback and simultaneously recorded their responses while they were performing a search task in VR. Afterwards, participants completed an online questionnaire. Although the Joystick was rated as more comfortable and precise than the other interfaces, the leaning-based interfaces showed a trend to provide more enjoyment and a greater sense of self-motion. There were also potential issues of using velocity-control for rotations in leaning-based interfaces when using HMDs instead of stationary displays. Developers need to focus on improving the controllability and perceived safety of these seated motion cueing interfaces.",
"fno": "07892253",
"keywords": [
"Tracking",
"Controllability",
"Navigation",
"Usability",
"Electronic Mail",
"Virtual Environments",
"Active Locomotion",
"Motion Cueing",
"Natural User Interface",
"Virtual Reality",
"Virtual Locomotion"
],
"authors": [
{
"affiliation": "Simon Fraser University, Surrey, BC, Canada",
"fullName": "Alexandra Kitson",
"givenName": "Alexandra",
"surname": "Kitson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University, Surrey, BC, Canada",
"fullName": "Abraham M. Hashemian",
"givenName": "Abraham M.",
"surname": "Hashemian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University, Surrey, BC, Canada",
"fullName": "Ekaterina R. Stepanova",
"givenName": "Ekaterina R.",
"surname": "Stepanova",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Bonn-Rhein-Sieg University of Applied Sciences, Germany",
"fullName": "Ernst Kruijff",
"givenName": "Ernst",
"surname": "Kruijff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University, Surrey, BC, Canada",
"fullName": "Bernhard E. Riecke",
"givenName": "Bernhard E.",
"surname": "Riecke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "215-216",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892252",
"articleId": "12OmNz5apMT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892254",
"articleId": "12OmNqI04Fn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2017/6716/0/07893320",
"title": "Comparing leaning-based motion cueing interfaces for virtual reality locomotion",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2017/07893320/12OmNqIhFMx",
"parentPublication": {
"id": "proceedings/3dui/2017/6716/0",
"title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892282",
"title": "Development and evaluation of a hands-free motion cueing interface for ground-based navigation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892282/12OmNwoxSdn",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759437",
"title": "An evaluation of navigational ability comparing Redirected Free Exploration with Distractors to Walking-in-Place and joystick locomotio interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759437/12OmNx8OuyK",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09737429",
"title": "Intentional Head-Motion Assisted Locomotion for Reducing Cybersickness",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09737429/1BQidPzNjBS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a728",
"title": "How to Take a Brake from Embodied Locomotion – Seamless Status Control Methods for Seated Leaning Interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a728/1CJc8kd55YY",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09894041",
"title": "Integrating Continuous and Teleporting VR Locomotion Into a Seamless ‘HyperJump’ Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09894041/1GIqrCx8RCE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a576",
"title": "Leaning-Based Control of an Immersive-Telepresence Robot",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a576/1JrR64XrANW",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/01/08762207",
"title": "Locomotion in Place in Virtual Reality: A Comparative Evaluation of Joystick, Teleport, and Leaning",
"doi": null,
"abstractUrl": "/journal/tg/2021/01/08762207/1bIeI0S82Aw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/04/09200691",
"title": "HeadJoystick: Improving Flying in VR Using a Novel Leaning-Based Interface",
"doi": null,
"abstractUrl": "/journal/tg/2022/04/09200691/1ndVrxsmSkw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09629264",
"title": "Leaning-Based Interfaces Improve Ground-Based VR Locomotion in Reach-the-Target, Follow-the-Path, and Racing Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09629264/1yXvJdO9qaQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1IHnB5FtCQ8",
"title": "2022 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1IHocrego2A",
"doi": "10.1109/FIE56618.2022.9962641",
"title": "An Exploratory Study of Social Presence in a Collaborative Desktop Virtual Reality (VR) Land Surveying Task",
"normalizedTitle": "An Exploratory Study of Social Presence in a Collaborative Desktop Virtual Reality (VR) Land Surveying Task",
"abstract": "This work-in-progress research paper investigates the role of virtual reality (VR) in engineering education lab courses. While there are ongoing debates about the feasibility and practicality of deploying VR on a large scale for engineering instruction, there has been growing evidence in support of the efficacy of VR to promote certain types of engineering instruction[1]. Most importantly, it is essential that we understand students’ social interactions in VR-based learning environments. This work-in-progress explores patterns of social presence indicators during a collaborative desktop virtual reality (VR) Land surveying task. Participants were asked to think-aloud and video record their interactions and conversations while completing learning tasks using a desktop VR environment. The desktop program was a computer program that simulates land surveying. The qualitative data analysis was based on the framework of social presence by Rourke et al. [2]. The study reported on indicators, frequencies and patterns or themes of social presence observed during student’s interactions with one another in the learning environment. This study also highlighted potential implications of this observation for future research on students’ social experiences in VR-based engineering education.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This work-in-progress research paper investigates the role of virtual reality (VR) in engineering education lab courses. While there are ongoing debates about the feasibility and practicality of deploying VR on a large scale for engineering instruction, there has been growing evidence in support of the efficacy of VR to promote certain types of engineering instruction[1]. Most importantly, it is essential that we understand students’ social interactions in VR-based learning environments. This work-in-progress explores patterns of social presence indicators during a collaborative desktop virtual reality (VR) Land surveying task. Participants were asked to think-aloud and video record their interactions and conversations while completing learning tasks using a desktop VR environment. The desktop program was a computer program that simulates land surveying. The qualitative data analysis was based on the framework of social presence by Rourke et al. [2]. The study reported on indicators, frequencies and patterns or themes of social presence observed during student’s interactions with one another in the learning environment. This study also highlighted potential implications of this observation for future research on students’ social experiences in VR-based engineering education.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This work-in-progress research paper investigates the role of virtual reality (VR) in engineering education lab courses. While there are ongoing debates about the feasibility and practicality of deploying VR on a large scale for engineering instruction, there has been growing evidence in support of the efficacy of VR to promote certain types of engineering instruction[1]. Most importantly, it is essential that we understand students’ social interactions in VR-based learning environments. This work-in-progress explores patterns of social presence indicators during a collaborative desktop virtual reality (VR) Land surveying task. Participants were asked to think-aloud and video record their interactions and conversations while completing learning tasks using a desktop VR environment. The desktop program was a computer program that simulates land surveying. The qualitative data analysis was based on the framework of social presence by Rourke et al. [2]. The study reported on indicators, frequencies and patterns or themes of social presence observed during student’s interactions with one another in the learning environment. This study also highlighted potential implications of this observation for future research on students’ social experiences in VR-based engineering education.",
"fno": "09962641",
"keywords": [
"Computer Aided Instruction",
"Data Analysis",
"Educational Courses",
"Engineering Computing",
"Engineering Education",
"Groupware",
"Human Computer Interaction",
"Virtual Reality",
"Collaborative Desktop Virtual Reality Land Surveying Task",
"Desktop Program",
"Desktop VR Environment",
"Engineering Education Lab Courses",
"Engineering Instruction",
"Land Surveying Simulation",
"Learning Tasks",
"Qualitative Data Analysis",
"Social Presence Indicators",
"Student Social Interactions",
"VR Based Engineering Education",
"VR Based Learning Environments",
"Work In Progress Research Paper",
"Solid Modeling",
"Data Analysis",
"Federated Learning",
"Collaboration",
"Virtual Reality",
"Oral Communication",
"Task Analysis",
"Virtual Reality VR",
"Social Presence",
"Virtual Reality Learning Environment VRLE",
"Interaction",
"Community Of Inquiry Co I"
],
"authors": [
{
"affiliation": "University of Georgia,Engineering Education and Transformative Practices College of Engineering,USA",
"fullName": "Isaac Dunmoye",
"givenName": "Isaac",
"surname": "Dunmoye",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,Engineering Education and Transformative Practices College of Engineering,USA",
"fullName": "Dominik May",
"givenName": "Dominik",
"surname": "May",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Georgia,Engineering Education and Transformative Practices College of Engineering,USA",
"fullName": "Nathaniel Hunsu",
"givenName": "Nathaniel",
"surname": "Hunsu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6244-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09962625",
"articleId": "1IHojzVpkqY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09962474",
"articleId": "1IHo4v9cpyg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wevr/2017/3881/0/07957707",
"title": "Remain seated: towards fully-immersive desktop VR",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2017/07957707/12OmNBTs7ve",
"parentPublication": {
"id": "proceedings/wevr/2017/3881/0",
"title": "2017 IEEE 3rd Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2012/4725/0/4725a010",
"title": "A VR Framework for Desktop Applications",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2012/4725a010/12OmNBrlPwO",
"parentPublication": {
"id": "proceedings/svr/2012/4725/0",
"title": "2012 14th Symposium on Virtual and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2010/6261/0/05673624",
"title": "Age and technology: Adult learning performance in desktop virtual reality environments",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2010/05673624/12OmNybfqTY",
"parentPublication": {
"id": "proceedings/fie/2010/6261/0",
"title": "2010 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a514",
"title": "Evaluating Perceptional Tasks for Medicine: A Comparative User Study Between a Virtual Reality and a Desktop Application",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a514/1CJcdgjLhMA",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a267",
"title": "Improving Language Learning by an Interact-to-Learn Desktop VR Application: A Case Study with Peinture",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a267/1CJeH98Mvg4",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waie/2021/0068/0/006800a038",
"title": "Modeling and Application of Precision Surveying Instrument and Equipment",
"doi": null,
"abstractUrl": "/proceedings-article/waie/2021/006800a038/1CalladRSLu",
"parentPublication": {
"id": "proceedings/waie/2021/0068/0",
"title": "2021 3rd International Workshop on Artificial Intelligence and Education (WAIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2019/2297/0/229700a061",
"title": "Visual Saliency Prediction in Dynamic Virtual Reality Environments Experienced with Head-Mounted Displays: An Exploratory Study",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2019/229700a061/1fHkoP8izEQ",
"parentPublication": {
"id": "proceedings/cw/2019/2297/0",
"title": "2019 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icuems/2020/8832/0/09151754",
"title": "Application of Virtual Simulation Technology in Teaching of Surveying and Mapping Engineering",
"doi": null,
"abstractUrl": "/proceedings-article/icuems/2020/09151754/1lRlPdSgeu4",
"parentPublication": {
"id": "proceedings/icuems/2020/8832/0",
"title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a078",
"title": "Modeling Emotions for Training in Immersive Simulations (METIS): A Cross-Platform Virtual Classroom Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a078/1pBMeXqNvhK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieit/2021/2563/0/256300a477",
"title": "Applied Research of VR Technology in Civil Engineering Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/ieit/2021/256300a477/1wHKq2RrP20",
"parentPublication": {
"id": "proceedings/ieit/2021/2563/0",
"title": "2021 International Conference on Internet, Education and Information Technology (IEIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1jIxhEnA8IE",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1jIxA2Yom1G",
"doi": "10.1109/VRW50115.2020.00258",
"title": "Developing a VR tool for studying pedestrian movement and choice behavior",
"normalizedTitle": "Developing a VR tool for studying pedestrian movement and choice behavior",
"abstract": "This paper presents a new VR research tool to systemically study pedestrian movement and choice behavior. This new VR tool, called CivilEvac, features a complex multi-level building that is an exact copy of an existing building. CivilEvac allows participants to freely navigate through the building, records their movements and vision fields at 10 fps, which assist the analysis of pedestrian movement and choice behavior. By showcasing CivilEvac, this paper contributes an example of using VR experiments specifically developed to study pedestrian movement and choice behavior. Thereby adding to the discussion surrounding the usage of VR technologies for studying pedestrian behavior.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a new VR research tool to systemically study pedestrian movement and choice behavior. This new VR tool, called CivilEvac, features a complex multi-level building that is an exact copy of an existing building. CivilEvac allows participants to freely navigate through the building, records their movements and vision fields at 10 fps, which assist the analysis of pedestrian movement and choice behavior. By showcasing CivilEvac, this paper contributes an example of using VR experiments specifically developed to study pedestrian movement and choice behavior. Thereby adding to the discussion surrounding the usage of VR technologies for studying pedestrian behavior.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a new VR research tool to systemically study pedestrian movement and choice behavior. This new VR tool, called CivilEvac, features a complex multi-level building that is an exact copy of an existing building. CivilEvac allows participants to freely navigate through the building, records their movements and vision fields at 10 fps, which assist the analysis of pedestrian movement and choice behavior. By showcasing CivilEvac, this paper contributes an example of using VR experiments specifically developed to study pedestrian movement and choice behavior. Thereby adding to the discussion surrounding the usage of VR technologies for studying pedestrian behavior.",
"fno": "09090616",
"keywords": [
"Buildings",
"Tools",
"Fires",
"Trajectory",
"Three Dimensional Displays",
"Virtual Environments",
"Virtual Reality",
"Virtual Environment",
"Pedestrian Behavior",
"Wayfinding Behavior",
"Multi Story Building"
],
"authors": [
{
"affiliation": "Delft University of Technology",
"fullName": "Yan Feng",
"givenName": "Yan",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Delft University of Technology",
"fullName": "Dorine C. Duives",
"givenName": "Dorine C.",
"surname": "Duives",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Delft University of Technology",
"fullName": "Serge P. Hoogendoorn",
"givenName": "Serge P.",
"surname": "Hoogendoorn",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-03-01T00:00:00",
"pubType": "proceedings",
"pages": "814-815",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6532-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09090465",
"articleId": "1jIxup5e9l6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09090422",
"articleId": "1jIxpjMzqlW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ams/2009/3648/0/3648a549",
"title": "A Low Cost Approach to Pediatric Pedestrian Safety in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2009/3648a549/12OmNroijan",
"parentPublication": {
"id": "proceedings/ams/2009/3648/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2019/9245/0/924500a230",
"title": "Pedestrian Occupancy Prediction for Autonomous Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2019/924500a230/18M7jsfPgUo",
"parentPublication": {
"id": "proceedings/irc/2019/9245/0",
"title": "2019 Third IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09677963",
"title": "Survey of Movement Reproduction in Immersive Virtual Rehabilitation",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09677963/1A4SqmEsrhm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671813",
"title": "Modelling of Destinations for Data-driven Pedestrian Trajectory Prediction in Public Buildings",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671813/1A8hldZKjKg",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itei/2021/8050/0/805000a232",
"title": "VR technology applied to traditional dance",
"doi": null,
"abstractUrl": "/proceedings-article/itei/2021/805000a232/1CzeG2lZvEI",
"parentPublication": {
"id": "proceedings/itei/2021/8050/0",
"title": "2021 3rd International Conference on Internet Technology and Educational Informization (ITEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/mu/2022/02/09779506",
"title": "Why VR Games Sickness? An Empirical Study of Capturing and Analyzing VR Games Head Movement Dataset",
"doi": null,
"abstractUrl": "/magazine/mu/2022/02/09779506/1DwUBBXPkVG",
"parentPublication": {
"id": "mags/mu",
"title": "IEEE MultiMedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a317",
"title": "WriArm: Leveraging Wrist Movement to Design Wrist+Arm Based Teleportation in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a317/1JrRkBbpP1K",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089506",
"title": "Analyzing Pedestrian Behavior in Augmented Reality — Proof of Concept",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089506/1jIxfZ6InFm",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089574",
"title": "VR behavioral data tracking: With great power comes great responsibility",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089574/1jIxgmmGEr6",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09273221",
"title": "Crowd Navigation in VR: Exploring Haptic Rendering of Collisions",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09273221/1pb9BhAe16o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAyAuRkHe",
"doi": "10.1109/VR50410.2021.00020",
"title": "Design and Evaluation of a Free-Hand VR-based Authoring Environment for Automated Vehicle Testing",
"normalizedTitle": "Design and Evaluation of a Free-Hand VR-based Authoring Environment for Automated Vehicle Testing",
"abstract": "Virtual Reality is increasingly used for safe evaluation and validation of autonomous vehicles by automotive engineers. However, the design and creation of virtual testing environments is a cumbersome process. Engineers are bound to utilize desktop-based authoring tools, and a high level of expertise is necessary. By performing scene authoring entirely inside VR, faster design iterations become possible. To this end, we propose a VR authoring environment that enables engineers to design road networks and traffic scenarios for automated vehicle testing based on free-hand interaction. We present a 3D interaction technique for the efficient placement and selection of virtual objects that is employed on a 2D panel. We conducted a comparative user study in which our interaction technique outperformed existing approaches regarding precision and task completion time. Furthermore, we demonstrate the effectiveness of the system by a qualitative user study with domain experts.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual Reality is increasingly used for safe evaluation and validation of autonomous vehicles by automotive engineers. However, the design and creation of virtual testing environments is a cumbersome process. Engineers are bound to utilize desktop-based authoring tools, and a high level of expertise is necessary. By performing scene authoring entirely inside VR, faster design iterations become possible. To this end, we propose a VR authoring environment that enables engineers to design road networks and traffic scenarios for automated vehicle testing based on free-hand interaction. We present a 3D interaction technique for the efficient placement and selection of virtual objects that is employed on a 2D panel. We conducted a comparative user study in which our interaction technique outperformed existing approaches regarding precision and task completion time. Furthermore, we demonstrate the effectiveness of the system by a qualitative user study with domain experts.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual Reality is increasingly used for safe evaluation and validation of autonomous vehicles by automotive engineers. However, the design and creation of virtual testing environments is a cumbersome process. Engineers are bound to utilize desktop-based authoring tools, and a high level of expertise is necessary. By performing scene authoring entirely inside VR, faster design iterations become possible. To this end, we propose a VR authoring environment that enables engineers to design road networks and traffic scenarios for automated vehicle testing based on free-hand interaction. We present a 3D interaction technique for the efficient placement and selection of virtual objects that is employed on a 2D panel. We conducted a comparative user study in which our interaction technique outperformed existing approaches regarding precision and task completion time. Furthermore, we demonstrate the effectiveness of the system by a qualitative user study with domain experts.",
"fno": "255600a001",
"keywords": [
"Authoring Systems",
"Automotive Engineering",
"Traffic Engineering Computing",
"Virtual Reality",
"Automated Vehicle Testing",
"Virtual Reality",
"Safe Evaluation",
"Autonomous Vehicles",
"Automotive Engineers",
"Virtual Testing Environments",
"Desktop Based Authoring Tools",
"VR Authoring Environment",
"Road Networks",
"Traffic Scenarios",
"Free Hand Interaction",
"Virtual Objects",
"Free Hand VR Based Authoring Environment",
"Three Dimensional Displays",
"Roads",
"Design Methodology",
"Fingers",
"Virtual Environments",
"User Interfaces",
"Usability",
"Human Centered Computing Human Computer Interaction HCI Virtual Reality",
"Human Centered Computing Interaction Design And Evaluation Methods User Interface Design User Studies"
],
"authors": [
{
"affiliation": "Visual Computing Institute, RWTH Aachen University",
"fullName": "Sevinc Eroglu",
"givenName": "Sevinc",
"surname": "Eroglu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ford Motor Company,Aachen,Germany",
"fullName": "Frederic Stefan",
"givenName": "Frederic",
"surname": "Stefan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ford Motor Company,Aachen,Germany",
"fullName": "Alain Chevalier",
"givenName": "Alain",
"surname": "Chevalier",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Ford Motor Company,Aachen,Germany",
"fullName": "Daniel Roettger",
"givenName": "Daniel",
"surname": "Roettger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Trier",
"fullName": "Daniel Zielasko",
"givenName": "Daniel",
"surname": "Zielasko",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Visual Computing Institute, RWTH Aachen University",
"fullName": "Torsten W. Kuhlen",
"givenName": "Torsten W.",
"surname": "Kuhlen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Human-Computer Interaction, University of Trier",
"fullName": "Benjamin Weyers",
"givenName": "Benjamin",
"surname": "Weyers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuAxQBeDIs",
"name": "pvr202118380-09417677s1-mm_255600a001.zip",
"size": "141 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417677s1-mm_255600a001.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600z034",
"articleId": "1tuAQ5eWUfe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a011",
"articleId": "1tuB2XnhSYo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icsc/2016/0662/0/0662a358",
"title": "Mobile Augmented Reality Authoring Tool",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2016/0662a358/12OmNAXglVC",
"parentPublication": {
"id": "proceedings/icsc/2016/0662/0",
"title": "2016 IEEE Tenth International Conference on Semantic Computing (ICSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920093",
"title": "A Combined Immersive and Desktop Authoring Tool for Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920093/12OmNAi6vVS",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2003/1882/0/18820301",
"title": "Scalable VR Application Authoring",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2003/18820301/12OmNrJAe5d",
"parentPublication": {
"id": "proceedings/vr/2003/1882/0",
"title": "Proceedings IEEE Virtual Reality 2003",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2012/1249/0/06231166",
"title": "VR JuggLua: A framework for VR applications combining Lua, OpenSceneGraph, and VR Juggler",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2012/06231166/12OmNrkT7GZ",
"parentPublication": {
"id": "proceedings/searis/2012/1249/0",
"title": "2012 5th Workshop on Software Engineering and Architectures for Realtime Interactive Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09714052",
"title": "PoVRPoint: Authoring Presentations in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09714052/1B0Y1Tyx2PC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a683",
"title": "Answering With Bow and Arrow: Questionnaires and VR Blend Without Distorting the Outcome",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a683/1CJbQ0Iu1zO",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a748",
"title": "Supervised Machine Learning Hand Gesture Classification in VR for Immersive Training",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a748/1CJenlXsOSQ",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a935",
"title": "Immersive Animation Authoring in Industrial VR Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a935/1J7Wg05jJeM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a326",
"title": "EditAR: A Digital Twin Authoring Environment for Creation of AR/VR and Video Instructions from a Single Demonstration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a326/1JrQQVVOVdm",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a139",
"title": "VR Collaboration in Large Companies: An Interview Study on the Role of Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a139/1yeQK6CDe3C",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAOsMIC",
"doi": "10.1109/ISMAR.2015.46",
"title": "[POSTER] ARPML: The Augmented Reality Process Modeling Language",
"normalizedTitle": "[POSTER] ARPML: The Augmented Reality Process Modeling Language",
"abstract": "The successful application of augmented reality as a guidance tool for procedural tasks like maintenance or repair requires an easily usable way of modeling support processes. Even though some suggestions have already been made to address this problem, they still have shortcomings and don't provide all the required features. Thus in a first step the requirements a possible solution has to meet are collected and presented. Based on these, the augmented reality process modeling language (ARPML) is developed, which consists of the four building blocks (i) templates, (ii) sensors, (iii) work steps and (iv) tasks. In contrast to existing approaches it facilitates the creation of multiple views on a single process. This makes it possible to specifically select instructions and information needed in targeted work contexts. It also allows to combine multiple variants of one process into one model with only a minimum of redundancy. The application of ARPML is shown with a practical example.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The successful application of augmented reality as a guidance tool for procedural tasks like maintenance or repair requires an easily usable way of modeling support processes. Even though some suggestions have already been made to address this problem, they still have shortcomings and don't provide all the required features. Thus in a first step the requirements a possible solution has to meet are collected and presented. Based on these, the augmented reality process modeling language (ARPML) is developed, which consists of the four building blocks (i) templates, (ii) sensors, (iii) work steps and (iv) tasks. In contrast to existing approaches it facilitates the creation of multiple views on a single process. This makes it possible to specifically select instructions and information needed in targeted work contexts. It also allows to combine multiple variants of one process into one model with only a minimum of redundancy. The application of ARPML is shown with a practical example.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The successful application of augmented reality as a guidance tool for procedural tasks like maintenance or repair requires an easily usable way of modeling support processes. Even though some suggestions have already been made to address this problem, they still have shortcomings and don't provide all the required features. Thus in a first step the requirements a possible solution has to meet are collected and presented. Based on these, the augmented reality process modeling language (ARPML) is developed, which consists of the four building blocks (i) templates, (ii) sensors, (iii) work steps and (iv) tasks. In contrast to existing approaches it facilitates the creation of multiple views on a single process. This makes it possible to specifically select instructions and information needed in targeted work contexts. It also allows to combine multiple variants of one process into one model with only a minimum of redundancy. The application of ARPML is shown with a practical example.",
"fno": "7660a160",
"keywords": [
"Sensors",
"Solid Modeling",
"Augmented Reality",
"Maintenance Engineering",
"Adaptation Models",
"Plugs",
"Sockets",
"Authoring",
"Augmented Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Tobias Muller",
"givenName": "Tobias",
"surname": "Muller",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tim Rieger",
"givenName": "Tim",
"surname": "Rieger",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "160-163",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a156",
"articleId": "12OmNCcKQqU",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a164",
"articleId": "12OmNBLdKJC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2014/6184/0/06948463",
"title": "[Poster] Smartwatch-aided handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948463/12OmNAQrYBV",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a140",
"title": "[POSTER] AR4AR: Using Augmented Reality for guidance in Augmented Reality Systems Setup",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a140/12OmNCd2rIf",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948456",
"title": "[Poster] QR code alteration for augmented reality interactions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948456/12OmNCga1QG",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550237",
"title": "Poster: 3D referencing for remote task assistance in augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550237/12OmNqC2uWf",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a108",
"title": "[POSTER] Transforming Your Website to an Augmented Reality View",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a108/12OmNrIJqv9",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2012/4814/0/4814a077",
"title": "Preliminary Evaluation of an Augmented Reality Collaborative Process Modelling System",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a077/12OmNvlg8kc",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550225",
"title": "Poster: Spatial Augmented Reality user interface techniques for room size modeling tasks",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550225/12OmNvlxJtP",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223429",
"title": "Augmented reality maintenance demonstrator and associated modelling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223429/12OmNylKAXJ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/resacs/2018/8410/0/841000a044",
"title": "Towards Context-Aware Process Guidance in Cyber-Physical Systems with Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/resacs/2018/841000a044/17D45XzbnJN",
"parentPublication": {
"id": "proceedings/resacs/2018/8410/0",
"title": "2018 4th International Workshop on Requirements Engineering for Self-Adaptive, Collaborative, and Cyber Physical Systems (RESACS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199568",
"title": "Fine-Grained Visual Recognition in Mobile Augmented Reality for Technical Support",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199568/1ncglEW2yUo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNrAdsuf",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAle6zC",
"doi": "10.1109/ISMAR.2015.36",
"title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions",
"normalizedTitle": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions",
"abstract": "Most work about instructions in Augmented Reality (AR) does not follow established patterns or design rules -- each approach defines its own method on how to convey instructions. This work describes our initial results and experiences towards defining design guidelines for AR instructions. The guidelines were derived from a survey of the most common visualization techniques and instruction types applied in AR. We studied about how 2D and 3D instructions can be applied in the AR context.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Most work about instructions in Augmented Reality (AR) does not follow established patterns or design rules -- each approach defines its own method on how to convey instructions. This work describes our initial results and experiences towards defining design guidelines for AR instructions. The guidelines were derived from a survey of the most common visualization techniques and instruction types applied in AR. We studied about how 2D and 3D instructions can be applied in the AR context.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Most work about instructions in Augmented Reality (AR) does not follow established patterns or design rules -- each approach defines its own method on how to convey instructions. This work describes our initial results and experiences towards defining design guidelines for AR instructions. The guidelines were derived from a survey of the most common visualization techniques and instruction types applied in AR. We studied about how 2D and 3D instructions can be applied in the AR context.",
"fno": "7660a120",
"keywords": [
"Augmented Reality",
"Visualization",
"Three Dimensional Displays",
"Real Time Systems",
"Assembly",
"Guidelines",
"Instructions",
"Visualization",
"Mixed Reality"
],
"authors": [
{
"affiliation": null,
"fullName": "Cledja Rolim",
"givenName": "Cledja",
"surname": "Rolim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dieter Schmalstieg",
"givenName": "Dieter",
"surname": "Schmalstieg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Denis Kalkofen",
"givenName": "Denis",
"surname": "Kalkofen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Veronica Teichrieb",
"givenName": "Veronica",
"surname": "Teichrieb",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-09-01T00:00:00",
"pubType": "proceedings",
"pages": "120-123",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7660-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7660a116",
"articleId": "12OmNzhELgx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7660a124",
"articleId": "12OmNrIae9B",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671762",
"title": "Improving procedural task performance with Augmented Reality annotations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671762/12OmNB7LvHJ",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836460",
"title": "An Augmented Reality Guide for Assisting Forklift Operation",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836460/12OmNvwTGFS",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836522",
"title": "Exploring Immersive AR Instructions for Procedural Tasks: The Role of Depth, Motion, and Volumetric Representations",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836522/12OmNxETajV",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550228",
"title": "Poster: A wearable augmented reality system with haptic feedback and its performance in virtual assembly tasks",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550228/12OmNxiKrWG",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549411",
"title": "Early steps towards understanding text legibility in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549411/12OmNy6HQV1",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/kelvar/2016/2344/0/07563677",
"title": "Towards the development of guidelines for educational evaluation of augmented reality tools",
"doi": null,
"abstractUrl": "/proceedings-article/kelvar/2016/07563677/12OmNzUxOdN",
"parentPublication": {
"id": "proceedings/kelvar/2016/2344/0",
"title": "2016 IEEE Virtual Reality Workshop on K-12 Embodied Learning through Virtual & Augmented Reality (KELVAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a001",
"title": "Low-Cost Real-Time Mental Load Adaptation for Augmented Reality Instructions - A Feasibility Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a001/1gysjlovPr2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a179",
"title": "Augmented Reality for Manual Assembly in Industry 4.0: Gathering Guidelines",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a179/1oZBDyQMM6c",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a172",
"title": "Design preferences on Industrial Augmented Reality: a survey with potential technical writers",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a172/1pBMjARVuEg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a279",
"title": "Enhancing Visitor Experience or Hindering Docent Roles: Attentional Issues in Augmented Reality Supported Installations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a279/1pysvRpTvr2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyjLoRw",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8Mswo",
"doi": "10.1109/ISMAR.2014.6948462",
"title": "[Poster] A Mobile Augmented reality system to assist auto mechanics",
"normalizedTitle": "[Poster] A Mobile Augmented reality system to assist auto mechanics",
"abstract": "Ground-breaking technologies and innovative design of upcoming vehicles introduce complex maintenance procedures for auto mechanics. In order to present these procedures in an intuitive manner, the Mobile Augmented Reality Technical Assistance (MARTA) project was initiated. The goal was to create an Augmented Reality-aided application running on a tablet computer, which shows maintenance instructions superimposed on a live video feed of the car. Robust image-based tracking of specular surfaces using both edge and texture features as well as the software framework are the most important aspects of the project, which are presented here. The resulting application is deployed and used productively to support maintenance of the Volkswagen XL1 vehicle across the world.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Ground-breaking technologies and innovative design of upcoming vehicles introduce complex maintenance procedures for auto mechanics. In order to present these procedures in an intuitive manner, the Mobile Augmented Reality Technical Assistance (MARTA) project was initiated. The goal was to create an Augmented Reality-aided application running on a tablet computer, which shows maintenance instructions superimposed on a live video feed of the car. Robust image-based tracking of specular surfaces using both edge and texture features as well as the software framework are the most important aspects of the project, which are presented here. The resulting application is deployed and used productively to support maintenance of the Volkswagen XL1 vehicle across the world.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Ground-breaking technologies and innovative design of upcoming vehicles introduce complex maintenance procedures for auto mechanics. In order to present these procedures in an intuitive manner, the Mobile Augmented Reality Technical Assistance (MARTA) project was initiated. The goal was to create an Augmented Reality-aided application running on a tablet computer, which shows maintenance instructions superimposed on a live video feed of the car. Robust image-based tracking of specular surfaces using both edge and texture features as well as the software framework are the most important aspects of the project, which are presented here. The resulting application is deployed and used productively to support maintenance of the Volkswagen XL1 vehicle across the world.",
"fno": "06948462",
"keywords": [
"Maintenance Engineering",
"Solid Modeling",
"Three Dimensional Displays",
"Tablet Computers",
"Image Edge Detection",
"Cameras",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Metaio GmbH",
"fullName": "Darko Stanimirovic",
"givenName": "Darko",
"surname": "Stanimirovic",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Metaio GmbH",
"fullName": "Nina Damasky",
"givenName": "Nina",
"surname": "Damasky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Metaio GmbH",
"fullName": "Sabine Webel",
"givenName": "Sabine",
"surname": "Webel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Volkswagen AG",
"fullName": "Dirk Koriath",
"givenName": "Dirk",
"surname": "Koriath",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Volkswagen AG",
"fullName": "Andrea Spillner",
"givenName": "Andrea",
"surname": "Spillner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Metaio GmbH",
"fullName": "Daniel Kurz",
"givenName": "Daniel",
"surname": "Kurz",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-09-01T00:00:00",
"pubType": "proceedings",
"pages": "305-306",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6184-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06948461",
"articleId": "12OmNB0nWbG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06948463",
"articleId": "12OmNAQrYBV",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a160",
"title": "[POSTER] ARPML: The Augmented Reality Process Modeling Language",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a160/12OmNAOsMIC",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a103",
"title": "[POSTER] Augmented Things: Enhancing AR Applications leveraging the Internet of Things and Universal 3D Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a103/12OmNBlofU9",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948456",
"title": "[Poster] QR code alteration for augmented reality interactions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948456/12OmNCga1QG",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550225",
"title": "Poster: Spatial Augmented Reality user interface techniques for room size modeling tasks",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550225/12OmNvlxJtP",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a166",
"title": "[POSTER] Deformed Reality: Proof of Concept and Preliminary Results",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a166/12OmNwDAC5n",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223429",
"title": "Augmented reality maintenance demonstrator and associated modelling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223429/12OmNylKAXJ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2014/04/07004838",
"title": "Challenges, Opportunities, and Future Trends of Emerging Techniques for Augmented Reality-Based Maintenance",
"doi": null,
"abstractUrl": "/journal/ec/2014/04/07004838/13rRUwbs27k",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699183",
"title": "Industrial Augmented Reality: Requirements for an Augmented Reality Maintenance Worker Support System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699183/19F1MWRWSqs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2022/8810/0/881000a545",
"title": "A Systematic Literature Review of Virtual and Augmented Reality Applications for Maintenance in Manufacturing",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2022/881000a545/1FJ5OxsS4Ba",
"parentPublication": {
"id": "proceedings/compsac/2022/8810/0",
"title": "2022 IEEE 46th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a074",
"title": "Human-centered Augmented Reality Guidance for Industrial Maintenance with Digital Twins: A Use-Case Driven Pilot Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a074/1J7Wfujnyx2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCdk2YF",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqGA584",
"doi": "10.1109/ICALT.2013.165",
"title": "Authoring Augmented Reality Learning Experiences as Learning Objects",
"normalizedTitle": "Authoring Augmented Reality Learning Experiences as Learning Objects",
"abstract": "Engineers and educators alike have prototyped a variety of augmented reality learning experiences (ARLEs). However, adapting ARLEs in educational practice would require an interdisciplinary approach that considers learning theory, pedagogy and instructional design. To address this requirement, we model ARLEs as learning objects by outlining the necessary components, and we propose a participatory design to demonstrate the authoring process of an augmented reality learning object (ARLO). ARLOs can be made useful in many scenarios if teachers are empowered to edit its context elements, content and instructional activity. Lastly, we point to the research questions entailed in modeling ARLEs as ARLOs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Engineers and educators alike have prototyped a variety of augmented reality learning experiences (ARLEs). However, adapting ARLEs in educational practice would require an interdisciplinary approach that considers learning theory, pedagogy and instructional design. To address this requirement, we model ARLEs as learning objects by outlining the necessary components, and we propose a participatory design to demonstrate the authoring process of an augmented reality learning object (ARLO). ARLOs can be made useful in many scenarios if teachers are empowered to edit its context elements, content and instructional activity. Lastly, we point to the research questions entailed in modeling ARLEs as ARLOs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Engineers and educators alike have prototyped a variety of augmented reality learning experiences (ARLEs). However, adapting ARLEs in educational practice would require an interdisciplinary approach that considers learning theory, pedagogy and instructional design. To address this requirement, we model ARLEs as learning objects by outlining the necessary components, and we propose a participatory design to demonstrate the authoring process of an augmented reality learning object (ARLO). ARLOs can be made useful in many scenarios if teachers are empowered to edit its context elements, content and instructional activity. Lastly, we point to the research questions entailed in modeling ARLEs as ARLOs.",
"fno": "5009a506",
"keywords": [
"Augmented Reality",
"Context",
"Context Modeling",
"Solid Modeling",
"Visualization",
"Three Dimensional Displays",
"Computational Modeling",
"Participatory Design",
"Augmented Reality",
"Augmented Reality Learning Experience",
"Authoring",
"Learning Object"
],
"authors": [
{
"affiliation": null,
"fullName": "Marc Ericson C. Santos",
"givenName": "Marc Ericson C.",
"surname": "Santos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Goshiro Yamamoto",
"givenName": "Goshiro",
"surname": "Yamamoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Takafumi Taketomi",
"givenName": "Takafumi",
"surname": "Taketomi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jun Miyazaki",
"givenName": "Jun",
"surname": "Miyazaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hirokazu Kato",
"givenName": "Hirokazu",
"surname": "Kato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-07-01T00:00:00",
"pubType": "proceedings",
"pages": "506-507",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5009-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5009a504",
"articleId": "12OmNBKmXn0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5009a508",
"articleId": "12OmNx9FhRE",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismarw/2016/3740/0/07836510",
"title": "Integrating Building Information Modeling with Augmented Reality for Interdisciplinary Learning",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836510/12OmNCgrD16",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/Ismar-mashd/2015/9628/0/9628a009",
"title": "CI-Spy: Designing A Mobile Augmented Reality System for Scaffolding Historical Inquiry Learning",
"doi": null,
"abstractUrl": "/proceedings-article/Ismar-mashd/2015/9628a009/12OmNvkYxa6",
"parentPublication": {
"id": "proceedings/Ismar-mashd/2015/9628/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality - Media, Art, Social Science, Humanities and Design (ISMAR-MASH'D)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciss/2015/8611/0/07370990",
"title": "Construction of a Synchronized Multi-Display Augmented Reality Simulation Module for Learning Tidal Effects",
"doi": null,
"abstractUrl": "/proceedings-article/iciss/2015/07370990/12OmNwE9Ol3",
"parentPublication": {
"id": "proceedings/iciss/2015/8611/0",
"title": "2015 2nd International Conference on Information Science and Security (ICISS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/digitel/2008/3409/0/3409a215",
"title": "Pedagogy Play: Virtual Instructors for Wearable Augmented Reality during Hands-On Learning and Play",
"doi": null,
"abstractUrl": "/proceedings-article/digitel/2008/3409a215/12OmNxXCGKF",
"parentPublication": {
"id": "proceedings/digitel/2008/3409/0",
"title": "Digital Game and Intelligent Toy Enhanced Learning, IEEE International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2017/0629/0/0629a311",
"title": "Utilizing Augmented Reality to Support Students' Learning in Popular Science Courses",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2017/0629a311/12OmNywxlPq",
"parentPublication": {
"id": "proceedings/eitt/2017/0629/0",
"title": "2017 International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2017/04/07891552",
"title": "Where Does My Augmented Reality Learning Experience (ARLE) Belong? A Student and Teacher Perspective to Positioning ARLEs",
"doi": null,
"abstractUrl": "/journal/lt/2017/04/07891552/13rRUwvBy5n",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/lt/2014/01/06681863",
"title": "Augmented Reality Learning Experiences: Survey of Prototype Design and Evaluation",
"doi": null,
"abstractUrl": "/journal/lt/2014/01/06681863/13rRUxcbnEk",
"parentPublication": {
"id": "trans/lt",
"title": "IEEE Transactions on Learning Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797973",
"title": "A Comparison of Desktop and Augmented Reality Scenario Based Training Authoring Tools",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797973/1cJ0S2MS49O",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/laclo/2018/0382/0/038200a476",
"title": "Augmented Reality Learning Resources in Anatomy",
"doi": null,
"abstractUrl": "/proceedings-article/laclo/2018/038200a476/1cdOkT11fDG",
"parentPublication": {
"id": "proceedings/laclo/2018/0382/0",
"title": "2018 XIII Latin American Conference on Learning Technologies (LACLO)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a413",
"title": "Authoring and Visualization Tool for Augmented Scenic Performances Prototyping and Experience",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a413/1oZBBSo7je8",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNywfKyu",
"title": "2010 IEEE International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvDqsBr",
"doi": "10.1109/ISMAR.2010.5643564",
"title": "The City of Sights: Design, construction, and measurement of an Augmented Reality stage set",
"normalizedTitle": "The City of Sights: Design, construction, and measurement of an Augmented Reality stage set",
"abstract": "We describe the design and implementation of a physical and virtual model of an imaginary urban scene-the “City of Sights”- that can serve as a backdrop or “stage” for a variety of Augmented Reality (AR) research. We argue that the AR research community would benefit from such a standard model dataset which can be used for evaluation of such AR topics as tracking systems, modeling, spatial AR, rendering tests, collaborative AR and user interface design. By openly sharing the digital blueprints and assembly instructions for our models, we allow the proposed set to be physically replicable by anyone and permit customization and experimental changes to the stage design which enable comprehensive exploration of algorithms and methods. Furthermore we provide an accompanying rich dataset consisting of video sequences under varying conditions with ground truth camera pose. We employed three different ground truth acquisition methods to support a broad range of use cases. The goal of our design is to enable and improve the replicability and evaluation of future augmented reality research.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe the design and implementation of a physical and virtual model of an imaginary urban scene-the “City of Sights”- that can serve as a backdrop or “stage” for a variety of Augmented Reality (AR) research. We argue that the AR research community would benefit from such a standard model dataset which can be used for evaluation of such AR topics as tracking systems, modeling, spatial AR, rendering tests, collaborative AR and user interface design. By openly sharing the digital blueprints and assembly instructions for our models, we allow the proposed set to be physically replicable by anyone and permit customization and experimental changes to the stage design which enable comprehensive exploration of algorithms and methods. Furthermore we provide an accompanying rich dataset consisting of video sequences under varying conditions with ground truth camera pose. We employed three different ground truth acquisition methods to support a broad range of use cases. The goal of our design is to enable and improve the replicability and evaluation of future augmented reality research.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe the design and implementation of a physical and virtual model of an imaginary urban scene-the “City of Sights”- that can serve as a backdrop or “stage” for a variety of Augmented Reality (AR) research. We argue that the AR research community would benefit from such a standard model dataset which can be used for evaluation of such AR topics as tracking systems, modeling, spatial AR, rendering tests, collaborative AR and user interface design. By openly sharing the digital blueprints and assembly instructions for our models, we allow the proposed set to be physically replicable by anyone and permit customization and experimental changes to the stage design which enable comprehensive exploration of algorithms and methods. Furthermore we provide an accompanying rich dataset consisting of video sequences under varying conditions with ground truth camera pose. We employed three different ground truth acquisition methods to support a broad range of use cases. The goal of our design is to enable and improve the replicability and evaluation of future augmented reality research.",
"fno": "05643564",
"keywords": [
"Augmented Reality",
"Image Sequences",
"Rendering Computer Graphics",
"Solid Modelling",
"User Interfaces",
"City Of Sights",
"Augmented Reality Stage Set",
"Imaginary Urban Scene",
"Rendering Test",
"Collaborative AR Design",
"User Interface Design",
"Digital Blueprints Sharing",
"Video Sequence",
"Ground Truth Camera Pose",
"Ground Truth Acquisition Methods",
"Solid Modeling",
"Cameras",
"Three Dimensional Displays",
"Calibration",
"Subspace Constraints",
"Accuracy",
"Computational Modeling"
],
"authors": [
{
"affiliation": "Graz University of Technology, Austria",
"fullName": "Lukas Gruber",
"givenName": "Lukas",
"surname": "Gruber",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara, USA",
"fullName": "Steffen Gauglitz",
"givenName": "Steffen",
"surname": "Gauglitz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara, USA",
"fullName": "Jonathan Ventura",
"givenName": "Jonathan",
"surname": "Ventura",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graz University of Technology, Austria",
"fullName": "Stefanie Zollmann",
"givenName": "Stefanie",
"surname": "Zollmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München, Germany",
"fullName": "Manuel Huber",
"givenName": "Manuel",
"surname": "Huber",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München, Germany",
"fullName": "Michael Schlegel",
"givenName": "Michael",
"surname": "Schlegel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München, Germany",
"fullName": "Gudrun Klinker",
"givenName": "Gudrun",
"surname": "Klinker",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graz University of Technology, Austria",
"fullName": "Dieter Schmalstieg",
"givenName": "Dieter",
"surname": "Schmalstieg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California, Santa Barbara, USA",
"fullName": "Tobias Höllerer",
"givenName": "Tobias",
"surname": "Höllerer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "157-163",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-9343-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05643563",
"articleId": "12OmNAJDBux",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05643566",
"articleId": "12OmNzd7byj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a140",
"title": "[POSTER] AR4AR: Using Augmented Reality for guidance in Augmented Reality Systems Setup",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a140/12OmNCd2rIf",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iswc/2007/1452/0/04373785",
"title": "Handy AR: Markerless Inspection of Augmented Reality Objects Using Fingertip Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/iswc/2007/04373785/12OmNzxPTMb",
"parentPublication": {
"id": "proceedings/iswc/2007/1452/0",
"title": "2007 11th IEEE International Symposium on Wearable Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cic/2018/9502/0/950200a453",
"title": "Train and Equip Firefighters with Cognitive Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cic/2018/950200a453/17D45WrVgah",
"parentPublication": {
"id": "proceedings/cic/2018/9502/0",
"title": "2018 IEEE 4th International Conference on Collaboration and Internet Computing (CIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699263",
"title": "Design and Calibration of an Augmented Reality Haploscope",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699263/19F1OYkEmWs",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2021/3734/0/373400a058",
"title": "RealityCheck: A Tool to Evaluate Spatial Inconsistency in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2021/373400a058/1A3j5dgNXXO",
"parentPublication": {
"id": "proceedings/ism/2021/3734/0",
"title": "2021 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09866555",
"title": "Systematic Review of Augmented Reality Training Systems",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09866555/1G6ALnVS50I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cost/2022/6248/0/624800a242",
"title": "Modern Stage Performance Simulation Based on Augmented Reality Style",
"doi": null,
"abstractUrl": "/proceedings-article/cost/2022/624800a242/1H2pqdrjzgs",
"parentPublication": {
"id": "proceedings/cost/2022/6248/0",
"title": "2022 International Conference on Culture-Oriented Science and Technology (CoST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798095",
"title": "Distance Judgments to On- and Off-Ground Objects in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798095/1cJ0Yxz6rrG",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798335",
"title": "Design, Assembly, Calibration, and Measurement of an Augmented Reality Haploscope",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798335/1cJ122q4Cty",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a366",
"title": "NEAR: The NetEase AR Oriented Visual Inertial Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a366/1gysjSArEsM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNyqRn7h",
"title": "Proceedings. International Symposium on Mixed and Augmented Reality",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2002",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx4yvFQ",
"doi": "10.1109/ISMAR.2002.1115105",
"title": "Experimental Evaluation of Augmented Reality in Object Assembly Task",
"normalizedTitle": "Experimental Evaluation of Augmented Reality in Object Assembly Task",
"abstract": "This study evaluated the effectiveness of spatially overlaid instructions using augmented reality (AR) in an assembly task comparing with other traditional media. Results indicate that overlaying 3D instructions on the workspace reduce error rate by 82%, particularly cumulative errors. Measurement of mental effort also suggests some of the mental workload is offloaded to the computer.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study evaluated the effectiveness of spatially overlaid instructions using augmented reality (AR) in an assembly task comparing with other traditional media. Results indicate that overlaying 3D instructions on the workspace reduce error rate by 82%, particularly cumulative errors. Measurement of mental effort also suggests some of the mental workload is offloaded to the computer.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study evaluated the effectiveness of spatially overlaid instructions using augmented reality (AR) in an assembly task comparing with other traditional media. Results indicate that overlaying 3D instructions on the workspace reduce error rate by 82%, particularly cumulative errors. Measurement of mental effort also suggests some of the mental workload is offloaded to the computer.",
"fno": "17810265",
"keywords": [],
"authors": [
{
"affiliation": "Michigan State University",
"fullName": "Arthur Tang",
"givenName": "Arthur",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michigan State University",
"fullName": "Charles Owen",
"givenName": "Charles",
"surname": "Owen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michigan State University",
"fullName": "Frank Biocca",
"givenName": "Frank",
"surname": "Biocca",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Michigan State University",
"fullName": "Weimin Mou",
"givenName": "Weimin",
"surname": "Mou",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2002-09-01T00:00:00",
"pubType": "proceedings",
"pages": "265",
"year": "2002",
"issn": null,
"isbn": "0-7695-1781-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "17810263",
"articleId": "12OmNxEBzaL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "17810267",
"articleId": "12OmNBzAcli",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671762",
"title": "Improving procedural task performance with Augmented Reality annotations",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671762/12OmNB7LvHJ",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750185",
"title": "Augmented Reality (AR) for Assembly Processes ??An Experimental Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750185/12OmNqGRG6Z",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06092386",
"title": "Augmented reality in the psychomotor phase of a procedural task",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06092386/12OmNvjgWlg",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480755",
"title": "Augmented Reality for Industrial Building Acceptance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480755/12OmNwc3wyn",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948512",
"title": "Diminished reality as challenging extension of mixed and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948512/12OmNy68EMC",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2015/8471/0/8471a025",
"title": "Diminished Reality as Challenging Issue in Mixed and Augmented Reality (IWDR2015) Summary",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2015/8471a025/12OmNy6ZrYB",
"parentPublication": {
"id": "proceedings/ismarw/2015/8471/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality Workshops (ISMARW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874259",
"title": "Model-Free Authoring by Demonstration of Assembly Instructions in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874259/1GjwP2mCxs4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a001",
"title": "Low-Cost Real-Time Mental Load Adaptation for Augmented Reality Instructions - A Feasibility Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a001/1gysjlovPr2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a498",
"title": "Enhancing First-Person View Task Instruction Videos with Augmented Reality Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a498/1pyswTqrkZ2",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a486",
"title": "Guideline and Tool for Designing an Assembly Task Support System Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a486/1pysyhDXiw0",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxV4itF",
"title": "2017 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx57HS4",
"doi": "10.1109/VR.2017.7892358",
"title": "Augmented reality: Principles and practice",
"normalizedTitle": "Augmented reality: Principles and practice",
"abstract": "This tutorial will provide a detailed introduction to Augmented Reality (AR). AR is a key user-interface technology for personalized, situated information delivery, navigation, on-demand instruction and games. The widespread availability and rapid evolution of smartphones and new devices such as Hololens enables software-only solutions for AR, where it was previously necessary to assemble custom hardware solutions. However, ergonomic and technical limitations of existing devices make this a challenging endeavor. In particular, it is necessary to design novel efficient real-time computer vision and computer graphics algorithms, and create new lightweight forms of interaction with the environment through small form-factor devices. This tutorial will present selected technical achievements in this field and highlight some examples of successful application prototypes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This tutorial will provide a detailed introduction to Augmented Reality (AR). AR is a key user-interface technology for personalized, situated information delivery, navigation, on-demand instruction and games. The widespread availability and rapid evolution of smartphones and new devices such as Hololens enables software-only solutions for AR, where it was previously necessary to assemble custom hardware solutions. However, ergonomic and technical limitations of existing devices make this a challenging endeavor. In particular, it is necessary to design novel efficient real-time computer vision and computer graphics algorithms, and create new lightweight forms of interaction with the environment through small form-factor devices. This tutorial will present selected technical achievements in this field and highlight some examples of successful application prototypes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This tutorial will provide a detailed introduction to Augmented Reality (AR). AR is a key user-interface technology for personalized, situated information delivery, navigation, on-demand instruction and games. The widespread availability and rapid evolution of smartphones and new devices such as Hololens enables software-only solutions for AR, where it was previously necessary to assemble custom hardware solutions. However, ergonomic and technical limitations of existing devices make this a challenging endeavor. In particular, it is necessary to design novel efficient real-time computer vision and computer graphics algorithms, and create new lightweight forms of interaction with the environment through small form-factor devices. This tutorial will present selected technical achievements in this field and highlight some examples of successful application prototypes.",
"fno": "07892358",
"keywords": [
"Tutorials",
"Augmented Reality",
"Solid Modeling",
"Optical Sensors",
"Smart Phones",
"Computer Vision",
"Augmented Reality",
"Mixed Reality"
],
"authors": [
{
"affiliation": "Graz University of Technology, Austria",
"fullName": "Dieter Schmalstieg",
"givenName": "Dieter",
"surname": "Schmalstieg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of California at Santa Barbara, CA, USA",
"fullName": "Tobias Höllerer",
"givenName": "Tobias",
"surname": "Höllerer",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-01-01T00:00:00",
"pubType": "proceedings",
"pages": "425-426",
"year": "2017",
"issn": "2375-5334",
"isbn": "978-1-5090-6647-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07892357",
"articleId": "12OmNqJZgGI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07892359",
"articleId": "12OmNqzcvM1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327z032",
"title": "Tutorial 3: SOFA, an Open-Source Framework for Physics Simulation in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327z032/12OmNBpEeZr",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948510",
"title": "A ‘Look Into’ Medical augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948510/12OmNx2zjyh",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802118",
"title": "Quantitative and qualitative methods for human-subject experiments in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802118/12OmNxRWI8b",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836436",
"title": "Augmented Reality – Principles and Practice Tutorial",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836436/12OmNxVV5Xg",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948512",
"title": "Diminished reality as challenging extension of mixed and augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948512/12OmNy68EMC",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a517",
"title": "Nurse Perceptions of the Usability of Augmented Reality to Support Clinical Decision Making: Results of a Pilot Study",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a517/1CJdc0rTv0c",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864548",
"title": "The Usability of the Microsoft HoloLens for an Augmented Reality Game to Teach Elementary School Children",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864548/1e5ZpUVkjVS",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212860",
"title": "User Engagement for Collaborative Learning on a Mobile and Desktop Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212860/1nHRTRhZdRK",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800z016",
"title": "Keynote Speaker: User Experience Considerations for Everyday Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800z016/1yeCV2T6UAE",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a082",
"title": "Comparing Distance Judgments in Real and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a082/1yfxMk2JFHW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNwwMf3H",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"acronym": "ismarw",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxETajV",
"doi": "10.1109/ISMAR-Adjunct.2016.0101",
"title": "Exploring Immersive AR Instructions for Procedural Tasks: The Role of Depth, Motion, and Volumetric Representations",
"normalizedTitle": "Exploring Immersive AR Instructions for Procedural Tasks: The Role of Depth, Motion, and Volumetric Representations",
"abstract": "Wearable Augmented Reality (W-AR) is based on getting a computer as intimate as possible with the wearers' bodies and senses. We need to understand the cognitive and perceptual mechanisms leveraged by this technology and use them for designing AR applications. In this study we explored the potential benefit of W-AR to guide a procedural task of assembling a LEGO™ compared to traditional paper instructions. We measured the time used to complete each step and the subjective perception of helpfulness and effectiveness of the instructions along with the perceived time spent doing the task. The results show that adding motion cues to an AR stereo visualization of the instructions (Dynamic 3D) improved performance compared to both the paper instructions and an AR version with stereo only but no motion (Static 3D). Interestingly, performance for the Static 3D condition was the slowest of the three. Subjective reports did not show any difference across different instruction types, suggesting that advantage of Dynamic 3D instructions are not accessible by covert awareness of the participants. The results provide support to the idea that principles of neurosciences may have direct implications for the product development in Wearable Augmented Reality.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Wearable Augmented Reality (W-AR) is based on getting a computer as intimate as possible with the wearers' bodies and senses. We need to understand the cognitive and perceptual mechanisms leveraged by this technology and use them for designing AR applications. In this study we explored the potential benefit of W-AR to guide a procedural task of assembling a LEGO™ compared to traditional paper instructions. We measured the time used to complete each step and the subjective perception of helpfulness and effectiveness of the instructions along with the perceived time spent doing the task. The results show that adding motion cues to an AR stereo visualization of the instructions (Dynamic 3D) improved performance compared to both the paper instructions and an AR version with stereo only but no motion (Static 3D). Interestingly, performance for the Static 3D condition was the slowest of the three. Subjective reports did not show any difference across different instruction types, suggesting that advantage of Dynamic 3D instructions are not accessible by covert awareness of the participants. The results provide support to the idea that principles of neurosciences may have direct implications for the product development in Wearable Augmented Reality.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Wearable Augmented Reality (W-AR) is based on getting a computer as intimate as possible with the wearers' bodies and senses. We need to understand the cognitive and perceptual mechanisms leveraged by this technology and use them for designing AR applications. In this study we explored the potential benefit of W-AR to guide a procedural task of assembling a LEGO™ compared to traditional paper instructions. We measured the time used to complete each step and the subjective perception of helpfulness and effectiveness of the instructions along with the perceived time spent doing the task. The results show that adding motion cues to an AR stereo visualization of the instructions (Dynamic 3D) improved performance compared to both the paper instructions and an AR version with stereo only but no motion (Static 3D). Interestingly, performance for the Static 3D condition was the slowest of the three. Subjective reports did not show any difference across different instruction types, suggesting that advantage of Dynamic 3D instructions are not accessible by covert awareness of the participants. The results provide support to the idea that principles of neurosciences may have direct implications for the product development in Wearable Augmented Reality.",
"fno": "07836522",
"keywords": [
"Augmented Reality",
"Data Visualisation",
"Image Motion Analysis",
"Image Representation",
"Product Development",
"Stereo Image Processing",
"Wearable Computers",
"Immersive AR Instructions",
"Procedural Tasks",
"Depth Representation",
"Motion Representation",
"Volumetric Representation",
"Wearable Augmented Reality",
"W AR",
"LEGO",
"AR Stereo Visualization",
"Static 3 D Condition",
"Dynamic 3 D Instructions",
"Product Development",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Augmented Reality",
"Time Measurement",
"Solid Modeling",
"Training",
"Companies",
"Mixed Reality",
"Augmented Reality",
"Depth Perception",
"Structure From Motion",
"Task Guidance",
"Procedural Tasks"
],
"authors": [
{
"affiliation": null,
"fullName": "Stefano Baldassi",
"givenName": "Stefano",
"surname": "Baldassi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Grace T. Cheng",
"givenName": "Grace T.",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jonathan Chan",
"givenName": "Jonathan",
"surname": "Chan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Moqian Tian",
"givenName": "Moqian",
"surname": "Tian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tim Christie",
"givenName": "Tim",
"surname": "Christie",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Matthew T. Short",
"givenName": "Matthew T.",
"surname": "Short",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismarw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-09-01T00:00:00",
"pubType": "proceedings",
"pages": "300-305",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-3740-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07836521",
"articleId": "12OmNyo1nR0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07836523",
"articleId": "12OmNwFidbp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a120",
"title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a120/12OmNAle6zC",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2007/1749/0/04538826",
"title": "AR-Jig: A Handheld Tangible User Interface for Modification of 3D Digital Form via 2D Physical Curve",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2007/04538826/12OmNAqCtOi",
"parentPublication": {
"id": "proceedings/ismar/2007/1749/0",
"title": "2007 6th IEEE and ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892247",
"title": "MagicToon: A 2D-to-3D creative cartoon modeling system with mobile AR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892247/12OmNxjjEhC",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a172",
"title": "[POSTER] Hands-Free AR Work Support System Monitoring Work Progress with Point-cloud Data Processing",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a172/12OmNyO8tSH",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802077",
"title": "An AR edutainment system supporting bone anatomy learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802077/12OmNylKAKS",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699278",
"title": "Hybrid UIs for Music Exploration in AR and VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699278/19F1NJTrBfi",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a034",
"title": "AR Tips: Augmented First-Person View Task Instruction Videos",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a034/1gysm0mzZlK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090420",
"title": "A Usability Assessment Of Augmented Situated Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090420/1jIxvndUVYA",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a203",
"title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2020/6768/0/676800b267",
"title": "Towards Immersive Comprehension of Software Systems Using Augmented Reality - An Empirical Evaluation",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2020/676800b267/1pP3IvL3Z6w",
"parentPublication": {
"id": "proceedings/ase/2020/6768/0",
"title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNscfI2Y",
"title": "2014 International Conference of Educational Innovation through Technology (EITT)",
"acronym": "eitt",
"groupId": "1804904",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxwENQg",
"doi": "10.1109/EITT.2014.11",
"title": "Design Research and Practice of Augmented Reality Textbook",
"normalizedTitle": "Design Research and Practice of Augmented Reality Textbook",
"abstract": "The research of augmented reality textbook is an important way to make the teaching content vivid and three-dimensional, but its development is lagging behind. This research analyzes on the basis of the principle of the augmented reality technology and its advantages for textbook development, through the case study of children's English textbook design based on augmented reality technology solutions, so as to provide implementation pattern and technical reference for development of similar textbooks.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The research of augmented reality textbook is an important way to make the teaching content vivid and three-dimensional, but its development is lagging behind. This research analyzes on the basis of the principle of the augmented reality technology and its advantages for textbook development, through the case study of children's English textbook design based on augmented reality technology solutions, so as to provide implementation pattern and technical reference for development of similar textbooks.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The research of augmented reality textbook is an important way to make the teaching content vivid and three-dimensional, but its development is lagging behind. This research analyzes on the basis of the principle of the augmented reality technology and its advantages for textbook development, through the case study of children's English textbook design based on augmented reality technology solutions, so as to provide implementation pattern and technical reference for development of similar textbooks.",
"fno": "06982557",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Design Research",
"Augmented Reality Textbook",
"Teaching Content",
"Textbook Development",
"English Textbook Design",
"Augmented Reality",
"Three Dimensional Displays",
"Solid Modeling",
"Cameras",
"Animation",
"Educational Institutions",
"Augmented Reality",
"Textbook",
"Design Research"
],
"authors": [
{
"affiliation": "Sch. of Educ. Sci., Nanjing Normal Univ., Nanjing, China",
"fullName": "Ling Zhou",
"givenName": "Ling",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Educ. Sci., Nanjing Normal Univ., Nanjing, China",
"fullName": "Shuyu Zhang",
"givenName": "Shuyu",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "eitt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "16-20",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-4231-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06982556",
"articleId": "12OmNx5pj4y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06982558",
"articleId": "12OmNAXxX20",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icebe/2015/8002/0/8002a281",
"title": "Applying Augmented Reality Technology to Book Publication Business",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2015/8002a281/12OmNAfy7JF",
"parentPublication": {
"id": "proceedings/icebe/2015/8002/0",
"title": "2015 IEEE 12th International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2015/7334/0/7334a132",
"title": "Augmented Reality Laboratory for High School Electrochemistry Course",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2015/7334a132/12OmNqBbHAA",
"parentPublication": {
"id": "proceedings/icalt/2015/7334/0",
"title": "2015 IEEE 15th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579374",
"title": "Textbook Information Sharing Method Based on ISBN with Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579374/12OmNrY3LBy",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892358",
"title": "Augmented reality: Principles and practice",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892358/12OmNx57HS4",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549362",
"title": "Exploration of spatial augmented reality on person",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549362/12OmNxQOjAC",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402558",
"title": "3D referencing techniques for physical objects in shared augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402558/12OmNxj239f",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223429",
"title": "Augmented reality maintenance demonstrator and associated modelling",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223429/12OmNylKAXJ",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iceee/2019/3910/0/391000a079",
"title": "Desktop Artillery Simulation Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/iceee/2019/391000a079/1cpqGEpXo5O",
"parentPublication": {
"id": "proceedings/iceee/2019/3910/0",
"title": "2019 6th International Conference on Electrical and Electronics Engineering (ICEEE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2017/6274/0/09183487",
"title": "Props Alive: A Framework for Augmented Reality Stop Motion Animation",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2017/09183487/1mLMmFjHXVe",
"parentPublication": {
"id": "proceedings/searis/2017/6274/0",
"title": "2017 IEEE 10th Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2021/4854/0/485400a266",
"title": "Design and Implementation of Auxiliary Application of Middle School Geography Textbook Based on Augmented Reality Technology",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2021/485400a266/1wB6LS7lc8o",
"parentPublication": {
"id": "proceedings/icitbs/2021/4854/0",
"title": "2021 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNBtl1Au",
"title": "Proceedings. 5th IEEE International Conference on Advanced Learning Technologies",
"acronym": "icalt",
"groupId": "1000009",
"volume": "0",
"displayVolume": "0",
"year": "2005",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyfdOZd",
"doi": "10.1109/ICALT.2005.71",
"title": "Augmented Instructions — A Fusion of Augmented Reality and Printed Learning Materials",
"normalizedTitle": "Augmented Instructions — A Fusion of Augmented Reality and Printed Learning Materials",
"abstract": "Augmented Reality (AR), which overlays virtual objects onto real scenes, has large potential to provide learners with a new type of learning material. Although many AR systems have been developed for demonstration, there is a gap between their ideal and practical use. We discuss a concept for Augmented Instructions that mix AR and traditional printed materials. Improvement of human-computer interface is considered to serve as a bridge for the gap. To investigate on characteristics of Augmented Instructions and its appropriate interface, we conducted subjective evaluation, comparing 3D presentation systems; a handheld PC and a head-mounted display. The result suggested that a handheld PC was more suitable for a presentation tool of Augmented Instructions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Augmented Reality (AR), which overlays virtual objects onto real scenes, has large potential to provide learners with a new type of learning material. Although many AR systems have been developed for demonstration, there is a gap between their ideal and practical use. We discuss a concept for Augmented Instructions that mix AR and traditional printed materials. Improvement of human-computer interface is considered to serve as a bridge for the gap. To investigate on characteristics of Augmented Instructions and its appropriate interface, we conducted subjective evaluation, comparing 3D presentation systems; a handheld PC and a head-mounted display. The result suggested that a handheld PC was more suitable for a presentation tool of Augmented Instructions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Augmented Reality (AR), which overlays virtual objects onto real scenes, has large potential to provide learners with a new type of learning material. Although many AR systems have been developed for demonstration, there is a gap between their ideal and practical use. We discuss a concept for Augmented Instructions that mix AR and traditional printed materials. Improvement of human-computer interface is considered to serve as a bridge for the gap. To investigate on characteristics of Augmented Instructions and its appropriate interface, we conducted subjective evaluation, comparing 3D presentation systems; a handheld PC and a head-mounted display. The result suggested that a handheld PC was more suitable for a presentation tool of Augmented Instructions.",
"fno": "23380213",
"keywords": [],
"authors": [
{
"affiliation": "National Institute of Multimedia Education and Graduate University for Advanced Studies",
"fullName": "Kikuo Asai",
"givenName": "Kikuo",
"surname": "Asai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graduate University for Advanced Studies",
"fullName": "Hideaki Kobayashi",
"givenName": "Hideaki",
"surname": "Kobayashi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Institute of Multimedia Education and Graduate University for Advanced Studies",
"fullName": "Tomotsugu Kondo",
"givenName": "Tomotsugu",
"surname": "Kondo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icalt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2005-07-01T00:00:00",
"pubType": "proceedings",
"pages": "213-215",
"year": "2005",
"issn": null,
"isbn": "0-7695-2338-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "23380210",
"articleId": "12OmNBp52xv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "23380216",
"articleId": "12OmNyuPLnS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671841",
"title": "Markerless 3D gesture-based interaction for handheld Augmented Reality interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671841/12OmNAIvcZU",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a120",
"title": "[POSTER] Design Guidelines for Generating Augmented Reality Instructions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a120/12OmNAle6zC",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2012/4660/0/06402556",
"title": "Tablet versus phone: Depth perception in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2012/06402556/12OmNBQ2VVh",
"parentPublication": {
"id": "proceedings/ismar/2012/4660/0",
"title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2011/2183/0/06092390",
"title": "User experiences with augmented reality aided navigation on phones",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2011/06092390/12OmNC1Gudh",
"parentPublication": {
"id": "proceedings/ismar/2011/2183/0",
"title": "2011 10th IEEE International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948447",
"title": "[Poster] Towards mobile augmented reality for the elderly",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948447/12OmNxE2n1D",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836522",
"title": "Exploring Immersive AR Instructions for Procedural Tasks: The Role of Depth, Motion, and Volumetric Representations",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836522/12OmNxETajV",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549411",
"title": "Early steps towards understanding text legibility in handheld augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549411/12OmNy6HQV1",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2016/8985/0/8985b180",
"title": "Teachers' and Students' Perceptions toward Augmented Reality Materials",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2016/8985b180/12OmNyKa66B",
"parentPublication": {
"id": "proceedings/iiai-aai/2016/8985/0",
"title": "2016 5th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/07/mco2012070026",
"title": "Anywhere Interfaces Using Handheld Augmented Reality",
"doi": null,
"abstractUrl": "/magazine/co/2012/07/mco2012070026/13rRUxYrbPM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a001",
"title": "Low-Cost Real-Time Mental Load Adaptation for Augmented Reality Instructions - A Feasibility Study",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a001/1gysjlovPr2",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNCcbEdf",
"title": "2017 Seventh International Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqAU6qm",
"doi": "10.1109/ACII.2017.8273664",
"title": "Dynamic emotion transitions based on emotion hysteresis",
"normalizedTitle": "Dynamic emotion transitions based on emotion hysteresis",
"abstract": "This research attempts to extract and quantify dynamic emotion transition (DET) psychophysiological features. The DET features are defined and explored based on neurophysiological dynamics underlying emotion hysteresis theory, which could potentially explain individual differences in emotion processing of environmental changing stimuli. Directional changing stimuli probes are used for attentive and preattentive stimuli-induced emotional processing. Three progressive paradigms examine hysteresis with multimodal emotion detection techniques including central and peripheral physiological measurements. The DET, an individualized feature, representing abrupt changes in emotion responses, could lead to an enhanced Brain-Computer Interface system for monitoring stress and detecting changes in emotion. This could then provide timely restorative feedback stimuli for emotion regulation training. Moreover, this approach is promising for mental disorder diagnostic and prognostic tools.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This research attempts to extract and quantify dynamic emotion transition (DET) psychophysiological features. The DET features are defined and explored based on neurophysiological dynamics underlying emotion hysteresis theory, which could potentially explain individual differences in emotion processing of environmental changing stimuli. Directional changing stimuli probes are used for attentive and preattentive stimuli-induced emotional processing. Three progressive paradigms examine hysteresis with multimodal emotion detection techniques including central and peripheral physiological measurements. The DET, an individualized feature, representing abrupt changes in emotion responses, could lead to an enhanced Brain-Computer Interface system for monitoring stress and detecting changes in emotion. This could then provide timely restorative feedback stimuli for emotion regulation training. Moreover, this approach is promising for mental disorder diagnostic and prognostic tools.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This research attempts to extract and quantify dynamic emotion transition (DET) psychophysiological features. The DET features are defined and explored based on neurophysiological dynamics underlying emotion hysteresis theory, which could potentially explain individual differences in emotion processing of environmental changing stimuli. Directional changing stimuli probes are used for attentive and preattentive stimuli-induced emotional processing. Three progressive paradigms examine hysteresis with multimodal emotion detection techniques including central and peripheral physiological measurements. The DET, an individualized feature, representing abrupt changes in emotion responses, could lead to an enhanced Brain-Computer Interface system for monitoring stress and detecting changes in emotion. This could then provide timely restorative feedback stimuli for emotion regulation training. Moreover, this approach is promising for mental disorder diagnostic and prognostic tools.",
"fno": "08273664",
"keywords": [
"Behavioural Sciences Computing",
"Emotion Recognition",
"Learning Artificial Intelligence",
"Neurophysiology",
"Psychology",
"DET Psychophysiological Features",
"Restorative Feedback Stimuli",
"Mental Disorder Diagnostic Tools",
"Mental Disorder Prognostic Tools",
"Emotion Regulation Training",
"Emotion Responses",
"Peripheral Physiological Measurements",
"Multimodal Emotion Detection Techniques",
"Stimuli Induced Emotional Processing",
"Stimuli Probes",
"Environmental Changing Stimuli",
"Emotion Processing",
"Emotion Hysteresis Theory",
"Neurophysiological Dynamics",
"Dynamic Emotion Transition Psychophysiological Features",
"Electroencephalography",
"Training",
"Stress",
"Hysteresis",
"Psychology",
"Feature Extraction",
"Magnetic Hysteresis"
],
"authors": [
{
"affiliation": "Department of Design and Environmental Analysis, Cornell University, Ihtaca, NY, USA",
"fullName": "Yu Hao",
"givenName": "Yu",
"surname": "Hao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "606-610",
"year": "2017",
"issn": "2156-8111",
"isbn": "978-1-5386-0563-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08273663",
"articleId": "12OmNxIRxUN",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08273665",
"articleId": "12OmNyQ7FNb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iwcse/2009/3881/1/3881a047",
"title": "Research of 3-Phase Reversible Rectifier Based on Current Hysteresis Regulator",
"doi": null,
"abstractUrl": "/proceedings-article/iwcse/2009/3881a047/12OmNBTJIOt",
"parentPublication": {
"id": "proceedings/iwcse/2009/3881/1",
"title": "Computer Science and Engineering, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icca/2003/7777/0/01595000",
"title": "Adaptive Robust Control of Dynamic Systems with Unknown Input Hysteresis",
"doi": null,
"abstractUrl": "/proceedings-article/icca/2003/01595000/12OmNCeaPTP",
"parentPublication": {
"id": "proceedings/icca/2003/7777/0",
"title": "4th International Conference on Control and Automation. Final Program and Book of Abstracts",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/289/1/05750636",
"title": "Identification of Dynamic Hysteresis Based on Duhem Model",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750636/12OmNvF83rh",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2012/4896/0/4896a200",
"title": "Neuron-MOS Based Schmitt Trigger with Controllable Hysteresis",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2012/4896a200/12OmNyjccy5",
"parentPublication": {
"id": "proceedings/cis/2012/4896/0",
"title": "2012 Eighth International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/date/2006/1/1/01656966",
"title": "HDL Models of Ferromagnetic Core Hysteresis Using Timeless Discretisation of the Magnetic Slope",
"doi": null,
"abstractUrl": "/proceedings-article/date/2006/01656966/12OmNyuy9Rs",
"parentPublication": {
"id": "proceedings/date/2006/1/1",
"title": "2006 Design, Automation and Test in Europe",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2015/8302/0/8302a037",
"title": "An ERP Study of Implicit Emotion Processing in Depressed Suicide Attempters",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2015/8302a037/12OmNyv7m3G",
"parentPublication": {
"id": "proceedings/itme/2015/8302/0",
"title": "2015 7th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecmsm/2017/5582/0/07945864",
"title": "Measurement of magnetic hysteresis swelling-up with frequency: Impact on iron losses in electric machine sheets",
"doi": null,
"abstractUrl": "/proceedings-article/ecmsm/2017/07945864/12OmNzsJ7wj",
"parentPublication": {
"id": "proceedings/ecmsm/2017/5582/0",
"title": "2017 IEEE International Workshop of Electronics, Control, Measurement, Signals and their Application to Mechatronics (ECMSM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09748967",
"title": "Contrastive Learning of Subject-Invariant EEG Representations for Cross-Subject Emotion Recognition",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09748967/1CiyVjtBkK4",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09200685",
"title": "Enhancement of Movement Intention Detection Using EEG Signals Responsive to Emotional Music Stimulus",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09200685/1ndVcP6texO",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600a977",
"title": "Parameter identification and verification of Preisach hysteresis model based on Levenberg–Marquart algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600a977/1x3l4CCfHQk",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNAXxWQv",
"title": "2016 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"acronym": "bibm",
"groupId": "1001586",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvAAtv0",
"doi": "10.1109/BIBM.2016.7822545",
"title": "Emotion recognition from multi-channel EEG data through Convolutional Recurrent Neural Network",
"normalizedTitle": "Emotion recognition from multi-channel EEG data through Convolutional Recurrent Neural Network",
"abstract": "Automatic emotion recognition based on multi-channel neurophysiological signals, as a challenging pattern recognition task, is becoming an important computer-aided method for emotional disorder diagnoses in neurology and psychiatry. Traditional approaches require designing and extracting a range of features from single or multiple channel signals based on extensive domain knowledge. This may be an obstacle for non-domain experts. Moreover, traditional feature fusion method can not fully utilize correlation information between different channels. In this paper, we propose a preprocessing method that encapsulates the multi-channel neurophysiological signals into grid-like frames through wavelet and scalogram transform. We further design a hybrid deep learning model that combines the `Convolutional Neural Network (CNN)' and `Recurrent Neural Network (RNN)', for extracting task-related features, mining inter-channel correlation and incorporating contextual information from those frames. Experiments are carried out, in a trial-level emotion recognition task, on the DEAP benchmarking dataset. Our results demonstrate the effectiveness of the proposed methods, with respect to the emotional dimensions of Valence and Arousal.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Automatic emotion recognition based on multi-channel neurophysiological signals, as a challenging pattern recognition task, is becoming an important computer-aided method for emotional disorder diagnoses in neurology and psychiatry. Traditional approaches require designing and extracting a range of features from single or multiple channel signals based on extensive domain knowledge. This may be an obstacle for non-domain experts. Moreover, traditional feature fusion method can not fully utilize correlation information between different channels. In this paper, we propose a preprocessing method that encapsulates the multi-channel neurophysiological signals into grid-like frames through wavelet and scalogram transform. We further design a hybrid deep learning model that combines the `Convolutional Neural Network (CNN)' and `Recurrent Neural Network (RNN)', for extracting task-related features, mining inter-channel correlation and incorporating contextual information from those frames. Experiments are carried out, in a trial-level emotion recognition task, on the DEAP benchmarking dataset. Our results demonstrate the effectiveness of the proposed methods, with respect to the emotional dimensions of Valence and Arousal.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Automatic emotion recognition based on multi-channel neurophysiological signals, as a challenging pattern recognition task, is becoming an important computer-aided method for emotional disorder diagnoses in neurology and psychiatry. Traditional approaches require designing and extracting a range of features from single or multiple channel signals based on extensive domain knowledge. This may be an obstacle for non-domain experts. Moreover, traditional feature fusion method can not fully utilize correlation information between different channels. In this paper, we propose a preprocessing method that encapsulates the multi-channel neurophysiological signals into grid-like frames through wavelet and scalogram transform. We further design a hybrid deep learning model that combines the `Convolutional Neural Network (CNN)' and `Recurrent Neural Network (RNN)', for extracting task-related features, mining inter-channel correlation and incorporating contextual information from those frames. Experiments are carried out, in a trial-level emotion recognition task, on the DEAP benchmarking dataset. Our results demonstrate the effectiveness of the proposed methods, with respect to the emotional dimensions of Valence and Arousal.",
"fno": "07822545",
"keywords": [
"Data Mining",
"Electroencephalography",
"Emotion Recognition",
"Feature Extraction",
"Medical Disorders",
"Medical Signal Processing",
"Neurophysiology",
"Wavelet Transforms",
"Multichannel EEG Data",
"Convolutional Recurrent Neural Network",
"Automatic Emotion Recognition",
"Multichannel Neurophysiological Signals",
"Pattern Recognition Task",
"Computer Aided Method",
"Emotional Disorder Diagnoses",
"Neurology",
"Psychiatry",
"Single Channel Signals",
"Multiple Channel Signals",
"Extensive Domain Knowledge",
"Nondomain Experts",
"Scalogram Transform",
"Wavelet Transform",
"Grid Like Frames",
"Hybrid Deep Learning Model",
"Convolutional Neural Network",
"Recurrent Neural Network",
"CNN",
"RNN",
"Task Related Feature Extraction",
"Interchannel Correlation Mining",
"Contextual Information",
"Trial Level Emotion Recognition Task",
"DEAP Benchmarking Dataset",
"Valence And Arousal",
"Emotional Dimensions",
"Emotion Recognition",
"Electroencephalography",
"Feature Extraction",
"Continuous Wavelet Transforms",
"Correlation",
"CNN",
"EEG",
"Emotion Recognition",
"LSTM",
"Physiological Signal"
],
"authors": [
{
"affiliation": "Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, China",
"fullName": "Xiang Li",
"givenName": "Xiang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, China",
"fullName": "Dawei Song",
"givenName": "Dawei",
"surname": "Song",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, China",
"fullName": "Peng Zhang",
"givenName": "Peng",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Key Lab. of Cognitive Comput. & Applic., Tianjin Univ., Tianjin, China",
"fullName": "Guangliang Yu",
"givenName": "Guangliang",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tianjin Key Laboratory of Cognitive Computing and Application, Tianjin University, China",
"fullName": "Yuexian Hou",
"givenName": "Yuexian",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Science and Engineering, Lanzhou University, China",
"fullName": "Bin Hu",
"givenName": "Bin",
"surname": "Hu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "352-359",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-1611-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07822544",
"articleId": "12OmNzZEAHd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07822546",
"articleId": "12OmNzwpUcf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2014/4311/0/4311a277",
"title": "Personalized Music Emotion Recognition Using Electroencephalography (EEG)",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a277/12OmNwDj199",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2019/03/07946165",
"title": "Emotions Recognition Using EEG Signals: A Survey",
"doi": null,
"abstractUrl": "/journal/ta/2019/03/07946165/13rRUwd9CJZ",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2014/03/06858031",
"title": "Feature Extraction and Selection for Emotion Recognition from EEG",
"doi": null,
"abstractUrl": "/journal/ta/2014/03/06858031/13rRUy0qnK7",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669646",
"title": "Intelligent Feature Selection for EEG Emotion Classification",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669646/1A9VH7ypmJa",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2021/3965/0/396500a121",
"title": "LSTM vs Plot-based CNN for EEG Emotion Detection Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2021/396500a121/1AIMENHSIyQ",
"parentPublication": {
"id": "proceedings/chase/2021/3965/0",
"title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmbs/2022/6770/0/677000a366",
"title": "TcT: Temporal and channel Transformer for EEG-based Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cmbs/2022/677000a366/1GhW2Gv4v60",
"parentPublication": {
"id": "proceedings/cmbs/2022/6770/0",
"title": "2022 IEEE 35th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09946368",
"title": "MMPosE: Movie-induced Multi-label Positive Emotion Classification Through EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09946368/1IdqYG8gCvC",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icftic/2022/2195/0/10075315",
"title": "Graph Convolutional Neural Network for EEG Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icftic/2022/10075315/1LRl2AN4aqs",
"parentPublication": {
"id": "proceedings/icftic/2022/2195/0",
"title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09204431",
"title": "EEG-Based Emotion Recognition via Channel-Wise Attention and Self Attention",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09204431/1nkyOYiLAAg",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2022/03/09448460",
"title": "Graph-Embedded Convolutional Neural Network for Image-Based EEG Emotion Recognition",
"doi": null,
"abstractUrl": "/journal/ec/2022/03/09448460/1ugE9joZsl2",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1gCd7Sy",
"doi": "10.1109/VR.2018.8446275",
"title": "Using EEG to Decode Subjective Levels of Emotional Arousal During an Immersive VR Roller Coaster Ride",
"normalizedTitle": "Using EEG to Decode Subjective Levels of Emotional Arousal During an Immersive VR Roller Coaster Ride",
"abstract": "Emotional arousal is a key component of a user's experience in immersive virtual reality (VR). Subjective and highly dynamic in nature, emotional arousal involves the whole body and particularly the brain. However, it has been difficult to relate subjective emotional arousal to an objective, neurophysiological marker-especially in naturalistic settings. We tested the association between continuously changing states of emotional arousal and oscillatory power in the brain during a VR roller coaster experience. We used novel spatial filtering approaches to predict self-reported emotional arousal from the electroencephalogram (EEG) signal of 38 participants. Periods of high vs. low emotional arousal could be classified with accuracies significantly above chance level. Our results are consistent with prior findings regarding emotional arousal in less naturalistic settings. We demonstrate a new approach to decode states of subjective emotional arousal from continuous EEG data in an immersive VR experience.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Emotional arousal is a key component of a user's experience in immersive virtual reality (VR). Subjective and highly dynamic in nature, emotional arousal involves the whole body and particularly the brain. However, it has been difficult to relate subjective emotional arousal to an objective, neurophysiological marker-especially in naturalistic settings. We tested the association between continuously changing states of emotional arousal and oscillatory power in the brain during a VR roller coaster experience. We used novel spatial filtering approaches to predict self-reported emotional arousal from the electroencephalogram (EEG) signal of 38 participants. Periods of high vs. low emotional arousal could be classified with accuracies significantly above chance level. Our results are consistent with prior findings regarding emotional arousal in less naturalistic settings. We demonstrate a new approach to decode states of subjective emotional arousal from continuous EEG data in an immersive VR experience.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Emotional arousal is a key component of a user's experience in immersive virtual reality (VR). Subjective and highly dynamic in nature, emotional arousal involves the whole body and particularly the brain. However, it has been difficult to relate subjective emotional arousal to an objective, neurophysiological marker-especially in naturalistic settings. We tested the association between continuously changing states of emotional arousal and oscillatory power in the brain during a VR roller coaster experience. We used novel spatial filtering approaches to predict self-reported emotional arousal from the electroencephalogram (EEG) signal of 38 participants. Periods of high vs. low emotional arousal could be classified with accuracies significantly above chance level. Our results are consistent with prior findings regarding emotional arousal in less naturalistic settings. We demonstrate a new approach to decode states of subjective emotional arousal from continuous EEG data in an immersive VR experience.",
"fno": "08446275",
"keywords": [
"Electroencephalography",
"Feature Extraction",
"Brain Modeling",
"Virtual Reality",
"Oscillators",
"Resists",
"Neuroscience",
"Human Centered Computing Human Computer Interaction HCI HCI Design And Evaluation Methods Laboratory Experiments",
"Applied Computing Life And Medical Sciences Consumer Health"
],
"authors": [
{
"affiliation": "Berlin School of Mind and Brain, MPI Hum. Cog. & Brain Sci., Leipzig, Germany",
"fullName": "F. Klotzsche",
"givenName": "F.",
"surname": "Klotzsche",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Berlin School of Mind and Brain, Sussex Neuroscience, UK, Germany",
"fullName": "A. Mariola",
"givenName": "A.",
"surname": "Mariola",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "MPI Hum. Cog. & Brain Sci., Leipzig, Amsterdam Brain & Cogn, Netherlands, Germany",
"fullName": "S. Hofmann",
"givenName": "S.",
"surname": "Hofmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Berlin School of Mind and Brain, MPI Hum. Cog. & Brain Sci., Leipzig, Germany",
"fullName": "V. V. Nikulin",
"givenName": "V. V.",
"surname": "Nikulin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Berlin School of Mind and Brain, MPI Hum. Cog. & Brain Sci., Leipzig, Germany",
"fullName": "A. Villringer",
"givenName": "A.",
"surname": "Villringer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Berlin School of Mind and Brain, MPI Hum. Cog. & Brain Sci., Leipzig, Germany",
"fullName": "M. Gaebler",
"givenName": "M.",
"surname": "Gaebler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "605-606",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446175",
"articleId": "13bd1eTtWYo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446614",
"articleId": "13bd1h03qOq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/wi-iat/2015/9618/2/9618b376",
"title": "Automatic Sleep Arousal Detection Based on C-ELM",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2015/9618b376/12OmNBKW9D4",
"parentPublication": {
"id": "proceedings/wi-iat/2015/9618/2",
"title": "2015 IEEE / WIC / ACM International Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2015/9642/0/9642a213",
"title": "Evaluating Optimal Arousal Level during the Task Based on Performance and Positive Mood: Extracting Indices Reflecting the Relationship among Arousal, Performance, and Mood",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2015/9642a213/12OmNyo1nRN",
"parentPublication": {
"id": "proceedings/acit-csi/2015/9642/0",
"title": "2015 3rd International Conference on Applied Computing and Information Technology/2nd International Conference on Computational Science and Intelligence (ACIT-CSI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446046",
"title": "The Effect of Immersion on Emotional Responses to Film Viewing in a Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446046/13bd1gCd7Th",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2018/9269/0/926900a128",
"title": "Decoding Subjective Emotional Arousal during a Naturalistic VR Experience from EEG Using LSTMs",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2018/926900a128/17D45VObpOo",
"parentPublication": {
"id": "proceedings/aivr/2018/9269/0",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2018/04/07835688",
"title": "Real-Time Movie-Induced Discrete Emotion Recognition from EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2018/04/07835688/17D45XvMceV",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873987",
"title": "Neurophysiological and Subjective Analysis of VR Emotion Induction Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873987/1GjwIFarTz2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090495",
"title": "Automatic Detection of Cybersickness from Physiological Signal in a Virtual Roller Coaster Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090495/1jIximIpClq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09229513",
"title": "Estimating Affective Taste Experience Using Combined Implicit Behavioral and Neurophysiological Measures",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09229513/1o3nfbzpzhe",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icectt/2020/9928/0/992800a213",
"title": "Emotional Response Increments Induced by Equivalent Enhancement of Different Valence Films",
"doi": null,
"abstractUrl": "/proceedings-article/icectt/2020/992800a213/1oa5ey8lRaE",
"parentPublication": {
"id": "proceedings/icectt/2020/9928/0",
"title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a625",
"title": "Investigating motor skill training and user arousal levels in VR : Pilot Study and Observations",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a625/1tnXrJx6cuc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "17D45VtKisA",
"title": "2018 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45VObpOo",
"doi": "10.1109/AIVR.2018.00026",
"title": "Decoding Subjective Emotional Arousal during a Naturalistic VR Experience from EEG Using LSTMs",
"normalizedTitle": "Decoding Subjective Emotional Arousal during a Naturalistic VR Experience from EEG Using LSTMs",
"abstract": "Emotional arousal (EA) denotes a heightened state of activation that has both subjective and physiological aspects. The neurophysiology of subjective EA, among other mind-brain-body phenomena, can best be tested when subjects are stimulated in a natural fashion. Immersive virtual reality (VR) enables naturalistic experimental stimulation and thus promises to increase the ecological validity of research findings i.e., how well they generalize to real-life settings. In this study, 45 participants experienced virtual rollercoaster rides while their brain activity was recorded using electroencephalography (EEG). A Long Short-Term Memory (LSTM) recurrent neural network (RNN) was then trained on the alpha-frequency (8-12 Hz) component of the EEG signal (input) and the retrospectively acquired continuous reports of subjective EA (target). With the LSTM-based model, subjective EA could be predicted significantly above chance level. This demonstrates a novel EEG-based decoding approach for subjective states of experience in naturalistic research designs using VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Emotional arousal (EA) denotes a heightened state of activation that has both subjective and physiological aspects. The neurophysiology of subjective EA, among other mind-brain-body phenomena, can best be tested when subjects are stimulated in a natural fashion. Immersive virtual reality (VR) enables naturalistic experimental stimulation and thus promises to increase the ecological validity of research findings i.e., how well they generalize to real-life settings. In this study, 45 participants experienced virtual rollercoaster rides while their brain activity was recorded using electroencephalography (EEG). A Long Short-Term Memory (LSTM) recurrent neural network (RNN) was then trained on the alpha-frequency (8-12 Hz) component of the EEG signal (input) and the retrospectively acquired continuous reports of subjective EA (target). With the LSTM-based model, subjective EA could be predicted significantly above chance level. This demonstrates a novel EEG-based decoding approach for subjective states of experience in naturalistic research designs using VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Emotional arousal (EA) denotes a heightened state of activation that has both subjective and physiological aspects. The neurophysiology of subjective EA, among other mind-brain-body phenomena, can best be tested when subjects are stimulated in a natural fashion. Immersive virtual reality (VR) enables naturalistic experimental stimulation and thus promises to increase the ecological validity of research findings i.e., how well they generalize to real-life settings. In this study, 45 participants experienced virtual rollercoaster rides while their brain activity was recorded using electroencephalography (EEG). A Long Short-Term Memory (LSTM) recurrent neural network (RNN) was then trained on the alpha-frequency (8-12 Hz) component of the EEG signal (input) and the retrospectively acquired continuous reports of subjective EA (target). With the LSTM-based model, subjective EA could be predicted significantly above chance level. This demonstrates a novel EEG-based decoding approach for subjective states of experience in naturalistic research designs using VR.",
"fno": "926900a128",
"keywords": [
"Electroencephalography",
"Medical Signal Processing",
"Neurophysiology",
"Recurrent Neural Nets",
"Virtual Reality",
"Naturalistic VR Experience",
"Subjective Aspects",
"Physiological Aspects",
"LSTM",
"Neurophysiology",
"Electroencephalography",
"Alpha Frequency Component",
"EEG Signal",
"EEG Based Decoding Approach",
"Subjective Emotional Arousal",
"Naturalistic Research Designs",
"Long Short Term Memory Recurrent Neural Network",
"Brain Activity",
"Virtual Rollercoaster Rides",
"Ecological Validity",
"Naturalistic Experimental Stimulation",
"Immersive Virtual Reality",
"Natural Fashion",
"Mind Brain Body Phenomena",
"Subjective EA",
"Frequency 8 0 Hz To 12 0 Hz",
"Brain Modeling",
"Electroencephalography",
"Feature Extraction",
"Decoding",
"Biological System Modeling",
"Predictive Models",
"Time Series Analysis",
"Subjective Experience",
"Neural Decoding",
"Emotional Arousal",
"Continuous Time Series",
"Naturalistic Research Designs"
],
"authors": [
{
"affiliation": null,
"fullName": "Simon M. Hofmann",
"givenName": "Simon M.",
"surname": "Hofmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Felix Klotzsche",
"givenName": "Felix",
"surname": "Klotzsche",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Alberto Mariola",
"givenName": "Alberto",
"surname": "Mariola",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Vadim V. Nikulin",
"givenName": "Vadim V.",
"surname": "Nikulin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Arno Villringer",
"givenName": "Arno",
"surname": "Villringer",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michael Gaebler",
"givenName": "Michael",
"surname": "Gaebler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "128-131",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9269-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "926900a124",
"articleId": "17D45WrVg3D",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "926900a132",
"articleId": "17D45VObpMD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2017/3050/0/08217786",
"title": "Emotion classification using deep neural networks and emotional patches",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217786/12OmNwtWfTI",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2017/3050/0/08217975",
"title": "Does tang poetry affect human emotional state? A pilot study by EEG",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217975/12OmNxwWoEb",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446275",
"title": "Using EEG to Decode Subjective Levels of Emotional Arousal During an Immersive VR Roller Coaster Ride",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446275/13bd1gCd7Sy",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995012",
"title": "Prediction Models for Epilepsy Detection on the EEG Signal",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995012/1JC3aN16KiI",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2022/9633/0/963300a032",
"title": "Electroencephalogram Emotion Recognition Based on Three-Dimensional Feature Matrix and Multivariate Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2022/963300a032/1Lz249wyDeM",
"parentPublication": {
"id": "proceedings/cse/2022/9633/0",
"title": "2022 IEEE 25th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2019/3591/0/08844893",
"title": "Temporal measures for analysis of emotional states from human electroencephalography signals",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2019/08844893/1dx8p9c2acw",
"parentPublication": {
"id": "proceedings/ic3/2019/3591/0",
"title": "2019 Twelfth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/2020/6582/0/09092368",
"title": "Multi-level Binarized LSTM in EEG Classification for Wearable Devices",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2020/09092368/1jPb1FStNO8",
"parentPublication": {
"id": "proceedings/pdp/2020/6582/0",
"title": "2020 28th Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09154557",
"title": "An Efficient LSTM Network for Emotion Recognition From Multichannel EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09154557/1lZzEqvllxC",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09200685",
"title": "Enhancement of Movement Intention Detection Using EEG Signals Responsive to Emotional Music Stimulus",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09200685/1ndVcP6texO",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09609641",
"title": "EEG-based Emotional Video Classification via Learning Connectivity Structure",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09609641/1yoxw3nxqcE",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1Df7OJsGc48",
"title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)",
"acronym": "percom-workshops",
"groupId": "1000552",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1Df82pGW23e",
"doi": "10.1109/PerComWorkshops53856.2022.9767281",
"title": "An Exploratory Analysis of Interactive VR-Based Framework for Multi-Componential Analysis of Emotion",
"normalizedTitle": "An Exploratory Analysis of Interactive VR-Based Framework for Multi-Componential Analysis of Emotion",
"abstract": "In this work, we present a preliminary analysis on emotion formation using a full Component Process Model (CPM), which considers emotional experience as multi-processes with synchronized changes across five main components: Appraisal, Motivation, Physiology, Expression and Feeling. We propose an empirical data-driven setup for generating an affective dataset using interactive Virtual Reality (VR) games with objective and subjective measures. We conducted a pilot study with several participants who played 28 VR games. The result confirms the suitability of the proposed experimental setup for investigating emotional experience. VR games were able to trigger diverse emotions, as evidenced by the collected self-reports. Using a CPM-based survey, we found that VR games could activate emotional experience based on cognitive appraisal, motivation, physiology, and facial expressions. We showed that more than two dimensions (expressions, physiology, novelty, valence, motivation, agency, arousal) are required to define the emotional experience. We also analyzed the relationship between discrete emotions and CPM components. These preliminary results suggest that CPM components may have different levels of involvement depending on the experienced emotion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we present a preliminary analysis on emotion formation using a full Component Process Model (CPM), which considers emotional experience as multi-processes with synchronized changes across five main components: Appraisal, Motivation, Physiology, Expression and Feeling. We propose an empirical data-driven setup for generating an affective dataset using interactive Virtual Reality (VR) games with objective and subjective measures. We conducted a pilot study with several participants who played 28 VR games. The result confirms the suitability of the proposed experimental setup for investigating emotional experience. VR games were able to trigger diverse emotions, as evidenced by the collected self-reports. Using a CPM-based survey, we found that VR games could activate emotional experience based on cognitive appraisal, motivation, physiology, and facial expressions. We showed that more than two dimensions (expressions, physiology, novelty, valence, motivation, agency, arousal) are required to define the emotional experience. We also analyzed the relationship between discrete emotions and CPM components. These preliminary results suggest that CPM components may have different levels of involvement depending on the experienced emotion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we present a preliminary analysis on emotion formation using a full Component Process Model (CPM), which considers emotional experience as multi-processes with synchronized changes across five main components: Appraisal, Motivation, Physiology, Expression and Feeling. We propose an empirical data-driven setup for generating an affective dataset using interactive Virtual Reality (VR) games with objective and subjective measures. We conducted a pilot study with several participants who played 28 VR games. The result confirms the suitability of the proposed experimental setup for investigating emotional experience. VR games were able to trigger diverse emotions, as evidenced by the collected self-reports. Using a CPM-based survey, we found that VR games could activate emotional experience based on cognitive appraisal, motivation, physiology, and facial expressions. We showed that more than two dimensions (expressions, physiology, novelty, valence, motivation, agency, arousal) are required to define the emotional experience. We also analyzed the relationship between discrete emotions and CPM components. These preliminary results suggest that CPM components may have different levels of involvement depending on the experienced emotion.",
"fno": "09767281",
"keywords": [
"Cognition",
"Computer Games",
"Interactive Systems",
"Psychology",
"Virtual Reality",
"Exploratory Analysis",
"Interactive VR Based Framework",
"Multicomponential Analysis",
"Emotion Formation",
"Component Process Model",
"Emotional Experience",
"Physiology",
"Interactive Virtual Reality Games",
"VR Games",
"CPM",
"Appraisal",
"Motivation",
"Expression",
"Feeling",
"Atmospheric Measurements",
"Conferences",
"Games",
"Virtual Reality",
"Particle Measurements",
"Solids",
"Physiology",
"Virtual Reality",
"Component Process Model",
"Emotion Recognition"
],
"authors": [
{
"affiliation": "University of New South Wales",
"fullName": "Rukshani Somarathna",
"givenName": "Rukshani",
"surname": "Somarathna",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of New South Wales",
"fullName": "Tomasz Bednarz",
"givenName": "Tomasz",
"surname": "Bednarz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of New South Wales",
"fullName": "Gelareh Mohammadi",
"givenName": "Gelareh",
"surname": "Mohammadi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "percom-workshops",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "353-358",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-1647-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09767306",
"articleId": "1Df8fpbZB4c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09767230",
"articleId": "1Df8cpASV2M",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2017/6647/0/07892315",
"title": "The effect of geometric realism on presence in a virtual reality game",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892315/12OmNBTawwY",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892286",
"title": "Comparing VR and non-VR driving simulations: An experimental user study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892286/12OmNxymobo",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2018/7123/0/08493419",
"title": "Effects of Graphical Styles on Emotional States for VR-Supported Psychotherapy",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2018/08493419/14tNJnLIk5b",
"parentPublication": {
"id": "proceedings/vs-games/2018/7123/0",
"title": "2018 10th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/var4good/2018/5977/0/08576881",
"title": "Transformative Experiences Become More Accessible Through Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/var4good/2018/08576881/17D45VTRoxP",
"parentPublication": {
"id": "proceedings/var4good/2018/5977/0",
"title": "2018 IEEE Workshop on Augmented and Virtual Realities for Good (VAR4Good)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873987",
"title": "Neurophysiological and Subjective Analysis of VR Emotion Induction Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873987/1GjwIFarTz2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a193",
"title": "Comparing Meditation and Immersive Virtual Environment for Relaxation",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a193/1KmFfgROQxO",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090457",
"title": "Affective Embodiment: The effect of avatar appearance and posture representation on emotions in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090457/1jIxjXwO4HS",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a147",
"title": "An Exploratory Study for Designing Social Experience of Watching VR Movies Based on Audience’s Voice Comments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a147/1pBMiVCpEGY",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09386008",
"title": "Floor-vibration VR: Mitigating Cybersickness Using Whole-body Tactile Stimuli in Highly Realistic Vehicle Driving Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09386008/1seiz94oUco",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a437",
"title": "Focus Group on Social Virtual Reality in Social Virtual Reality: Effects on Emotion and Self-Awareness",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a437/1yeQD8KNChO",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1M661e6I5ig",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"acronym": "aciiw",
"groupId": "10085961",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1M665PIA7L2",
"doi": "10.1109/ACIIW57231.2022.10086000",
"title": "Preliminary Study on the Transition of Bio-emotion using Aroma Stimuli",
"normalizedTitle": "Preliminary Study on the Transition of Bio-emotion using Aroma Stimuli",
"abstract": "Kansei engineering has a wide range of applications in design and industry. By considering factors related to Kansei and applying them to the product development and design process, user experience can be improved. To evaluate Kansei quantitatively, researchers used questionnaires such as self-assessment manikin (SAM) in the past years. Recently, researchers have measured physiological signals associated with a person's emotional state, such as electroencephalography (EEG) and heart rate variability (HRV), to evaluate Kansei more objectively. Furthermore, previous studies have proposed a method to estimate emotions using EEG and HRV. They combined these two signals to estimate emotions based on a psychological model, Russell's circumplex model. In order to distinguish inferred emotions based on physiological signals from cognitive emotions, they named their method the bio-emotion estimation method. However, the accuracy of bio-emotion estimation method has not been fully validated. Meanwhile, the traditional SAM questionnaire can obtain users' cognitive emotions with arousal and valence value. The arousal and valence also correspond to the two factors of the bio-emotion estimation method. Therefore, we conducted a SAM questionnaire to verify the accuracy of the bio-emotion estimation method. The results showed that the measured EEG indexes were highly consistent with human arousal perception. Then, we applied the bio-emotion estimation method to present the emotional transitions induced by the stimuli.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Kansei engineering has a wide range of applications in design and industry. By considering factors related to Kansei and applying them to the product development and design process, user experience can be improved. To evaluate Kansei quantitatively, researchers used questionnaires such as self-assessment manikin (SAM) in the past years. Recently, researchers have measured physiological signals associated with a person's emotional state, such as electroencephalography (EEG) and heart rate variability (HRV), to evaluate Kansei more objectively. Furthermore, previous studies have proposed a method to estimate emotions using EEG and HRV. They combined these two signals to estimate emotions based on a psychological model, Russell's circumplex model. In order to distinguish inferred emotions based on physiological signals from cognitive emotions, they named their method the bio-emotion estimation method. However, the accuracy of bio-emotion estimation method has not been fully validated. Meanwhile, the traditional SAM questionnaire can obtain users' cognitive emotions with arousal and valence value. The arousal and valence also correspond to the two factors of the bio-emotion estimation method. Therefore, we conducted a SAM questionnaire to verify the accuracy of the bio-emotion estimation method. The results showed that the measured EEG indexes were highly consistent with human arousal perception. Then, we applied the bio-emotion estimation method to present the emotional transitions induced by the stimuli.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Kansei engineering has a wide range of applications in design and industry. By considering factors related to Kansei and applying them to the product development and design process, user experience can be improved. To evaluate Kansei quantitatively, researchers used questionnaires such as self-assessment manikin (SAM) in the past years. Recently, researchers have measured physiological signals associated with a person's emotional state, such as electroencephalography (EEG) and heart rate variability (HRV), to evaluate Kansei more objectively. Furthermore, previous studies have proposed a method to estimate emotions using EEG and HRV. They combined these two signals to estimate emotions based on a psychological model, Russell's circumplex model. In order to distinguish inferred emotions based on physiological signals from cognitive emotions, they named their method the bio-emotion estimation method. However, the accuracy of bio-emotion estimation method has not been fully validated. Meanwhile, the traditional SAM questionnaire can obtain users' cognitive emotions with arousal and valence value. The arousal and valence also correspond to the two factors of the bio-emotion estimation method. Therefore, we conducted a SAM questionnaire to verify the accuracy of the bio-emotion estimation method. The results showed that the measured EEG indexes were highly consistent with human arousal perception. Then, we applied the bio-emotion estimation method to present the emotional transitions induced by the stimuli.",
"fno": "10086000",
"keywords": [
"Cognition",
"Electroencephalography",
"Emotion Recognition",
"Human Computer Interaction",
"Medical Signal Processing",
"Physiology",
"Product Development",
"Psychology",
"Bio Emotion Estimation Method",
"Cognitive Emotions",
"Emotional Transitions",
"Kansei",
"Person",
"Users",
"Visualization",
"Biological System Modeling",
"Estimation",
"Psychology",
"Brain Modeling",
"Electroencephalography",
"Physiology",
"EEG",
"HRV",
"Emotion Transition",
"Kansei Evaluation",
"Aroma"
],
"authors": [
{
"affiliation": "Shibaura Institute of Technology,Department of Computer Science and Engineering,Tokyo,Japan",
"fullName": "Chen Feng",
"givenName": "Chen",
"surname": "Feng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Innovative Global Program, Shibaura Institute of Technology,Tokyo,Japan",
"fullName": "Peeraya Sripian",
"givenName": "Peeraya",
"surname": "Sripian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Innovative Global Program, Shibaura Institute of Technology,Tokyo,Japan",
"fullName": "Tipporn Laohakangvalvit",
"givenName": "Tipporn",
"surname": "Laohakangvalvit",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research & Development Division, S.T.CORPORATION,Tokyo,Japan",
"fullName": "Toshiaki Tazawa",
"givenName": "Toshiaki",
"surname": "Tazawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Research & Development Division, S.T.CORPORATION,Tokyo,Japan",
"fullName": "Saaya Sakai",
"givenName": "Saaya",
"surname": "Sakai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shibaura Institute of Technology,Department of Computer Science and Engineering,Tokyo,Japan",
"fullName": "Midori Sugaya",
"givenName": "Midori",
"surname": "Sugaya",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aciiw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5490-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "10086007",
"articleId": "1M665m2mDuM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10086033",
"articleId": "1M669B19s0E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "trans/ta/2018/03/08241761",
"title": "Emotion Analysis for Personality Inference from EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2018/03/08241761/13rRUytF47R",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09698041",
"title": "EEG-based Emotion Recognition with Emotion Localization via Hierarchical Self-Attention",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09698041/1AC4g4XC8ww",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09745388",
"title": "Emotion Distribution Learning Based on Peripheral Physiological Signals",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09745388/1Cagp8869u8",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09873987",
"title": "Neurophysiological and Subjective Analysis of VR Emotion Induction Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09873987/1GjwIFarTz2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09946368",
"title": "MMPosE: Movie-induced Multi-label Positive Emotion Classification Through EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09946368/1IdqYG8gCvC",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/01/08847462",
"title": "Emotion Recognition and EEG Analysis Using ADMM-Based Sparse Group Lasso",
"doi": null,
"abstractUrl": "/journal/ta/2022/01/08847462/1dApNQW2KPu",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2019/3891/0/08925097",
"title": "Feedback of Physiological-Based Emotion before Publishing Emotional Expression on Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2019/08925097/1fHFaNacVsQ",
"parentPublication": {
"id": "proceedings/aciiw/2019/3891/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iotdi/2020/6602/0/660200a027",
"title": "SPIDERS: Low-Cost Wireless Glasses for Continuous In-Situ Bio-Signal Acquisition and Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iotdi/2020/660200a027/1k0P4qswoYo",
"parentPublication": {
"id": "proceedings/iotdi/2020/6602/0",
"title": "2020 IEEE/ACM Fifth International Conference on Internet-of-Things Design and Implementation (IoTDI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313522",
"title": "Emotion Classification Based on Brain Functional Connectivity Network",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313522/1qmfKAYXX6U",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09453134",
"title": "CPED: A Chinese Positive Emotion Database for Emotion Elicitation and Analysis",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09453134/1ulCsdie4og",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1oa5cMBK2re",
"title": "2020 5th International Conference on Electromechanical Control Technology and Transportation (ICECTT)",
"acronym": "icectt",
"groupId": "1832064",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1oa5ey8lRaE",
"doi": "10.1109/ICECTT50890.2020.00055",
"title": "Emotional Response Increments Induced by Equivalent Enhancement of Different Valence Films",
"normalizedTitle": "Emotional Response Increments Induced by Equivalent Enhancement of Different Valence Films",
"abstract": "The arrangements of emotional intensity in film which related to the plot and rhythm are important to the audience's feeling of the film. The arousal degree brought by different emotional intensity is the core concern of the film creators. Whether the equal enhancement of positive and negative emotion stimulus will bring equal emotion arousal to audiences has not been determined in current researches. Previous studies usually just simple compare positive and negative films, but the emotional intensity of positive and negative films is already different, so it is difficult to compare them accurately. In this paper, participants were asked to watch one of the formats of Virtual Reality (VR) and 2D (Monoscopic) emotional films, and to compare the arousal increments of participants under the condition of equivalent enhancement of positive and negative valence films. The experiment results show that the same incremental negative films bring more incremental responses than the positive ones. Combined with subjective scales, skin conductivity response and pulse rate can better reflect the physiological indicators of emotional arousal. In addition, in both positive and negative films, skin conductance level, skin conductance response, blood volume pulse and subjective evaluation showed significant differences between VR and 2D, and the valence judgment of negative films was not affected by the display modes.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The arrangements of emotional intensity in film which related to the plot and rhythm are important to the audience's feeling of the film. The arousal degree brought by different emotional intensity is the core concern of the film creators. Whether the equal enhancement of positive and negative emotion stimulus will bring equal emotion arousal to audiences has not been determined in current researches. Previous studies usually just simple compare positive and negative films, but the emotional intensity of positive and negative films is already different, so it is difficult to compare them accurately. In this paper, participants were asked to watch one of the formats of Virtual Reality (VR) and 2D (Monoscopic) emotional films, and to compare the arousal increments of participants under the condition of equivalent enhancement of positive and negative valence films. The experiment results show that the same incremental negative films bring more incremental responses than the positive ones. Combined with subjective scales, skin conductivity response and pulse rate can better reflect the physiological indicators of emotional arousal. In addition, in both positive and negative films, skin conductance level, skin conductance response, blood volume pulse and subjective evaluation showed significant differences between VR and 2D, and the valence judgment of negative films was not affected by the display modes.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The arrangements of emotional intensity in film which related to the plot and rhythm are important to the audience's feeling of the film. The arousal degree brought by different emotional intensity is the core concern of the film creators. Whether the equal enhancement of positive and negative emotion stimulus will bring equal emotion arousal to audiences has not been determined in current researches. Previous studies usually just simple compare positive and negative films, but the emotional intensity of positive and negative films is already different, so it is difficult to compare them accurately. In this paper, participants were asked to watch one of the formats of Virtual Reality (VR) and 2D (Monoscopic) emotional films, and to compare the arousal increments of participants under the condition of equivalent enhancement of positive and negative valence films. The experiment results show that the same incremental negative films bring more incremental responses than the positive ones. Combined with subjective scales, skin conductivity response and pulse rate can better reflect the physiological indicators of emotional arousal. In addition, in both positive and negative films, skin conductance level, skin conductance response, blood volume pulse and subjective evaluation showed significant differences between VR and 2D, and the valence judgment of negative films was not affected by the display modes.",
"fno": "992800a213",
"keywords": [
"Blood",
"Emotion Recognition",
"Physiology",
"Skin",
"Virtual Reality",
"Equal Emotion",
"Positive Films",
"2 D Emotional Films",
"Arousal Increments",
"Equivalent Enhancement",
"Positive Valence Films",
"Negative Valence Films",
"Incremental Negative Films",
"Incremental Responses",
"Skin Conductivity Response",
"Emotional Arousal",
"Emotional Response",
"Arousal Degree",
"Different Emotional Intensity",
"Film Creators",
"Equal Enhancement",
"Positive Emotion Stimulus",
"Negative Emotion Stimulus",
"Valence Films",
"Films",
"Physiology",
"Motion Pictures",
"Two Dimensional Displays",
"Heart Rate",
"Temperature Measurement",
"Temperature Sensors",
"Virtual Reality",
"Emotion Arousal",
"Affective Computing",
"Physiological Signals"
],
"authors": [
{
"affiliation": "Shanghai University",
"fullName": "Feng Tian",
"givenName": "Feng",
"surname": "Tian",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai University",
"fullName": "Xiaofei Hou",
"givenName": "Xiaofei",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shanghai University",
"fullName": "Minlei Hua",
"givenName": "Minlei",
"surname": "Hua",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icectt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-05-01T00:00:00",
"pubType": "proceedings",
"pages": "213-218",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9928-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "992800a207",
"articleId": "1oa5gYRDwyI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "992800a219",
"articleId": "1oa5fTUi1fa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acii/2013/5048/0/5048a582",
"title": "User-centric Affective Video Tagging from MEG and Peripheral Physiological Responses",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a582/12OmNrkjVpA",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iseim/2005/6063/1/01496046",
"title": "Space charge phenomena in polyimide films and effects of absorbed water",
"doi": null,
"abstractUrl": "/proceedings-article/iseim/2005/01496046/12OmNro0HXr",
"parentPublication": {
"id": "proceedings/iseim/2005/6063/1",
"title": "International Symposium on Electrical Insulating Materials",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a362",
"title": "Measuring Emotional Arousal for Online Applications: Evaluation of Ultra-short Term Heart Rate Variability Measures",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a362/12OmNwCaCyL",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mtdt/1994/6245/0/00397202",
"title": "Mechanical stress induced void and hillock formations in thin films",
"doi": null,
"abstractUrl": "/proceedings-article/mtdt/1994/00397202/12OmNwErpAJ",
"parentPublication": {
"id": "proceedings/mtdt/1994/6245/0",
"title": "Proceedings of IEEE International Workshop on Memory Technology, Design, and Test",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciip/2015/0148/0/07414739",
"title": "Emotion recognition based on physiological signals using valence-arousal model",
"doi": null,
"abstractUrl": "/proceedings-article/iciip/2015/07414739/12OmNyQYte9",
"parentPublication": {
"id": "proceedings/iciip/2015/0148/0",
"title": "2015 Third International Conference on Image Information Processing (ICIIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2020/04/08326711",
"title": "Physiological Detection of Affective States in Children with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/journal/ta/2020/04/08326711/13rRUyYBlfe",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2018/04/07835688",
"title": "Real-Time Movie-Induced Discrete Emotion Recognition from EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2018/04/07835688/17D45XvMceV",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925462",
"title": "MEMOA: Introducing the Multi-Modal Emotional Memories of Older Adults Database",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925462/1fHGFhfT8eA",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2020/9574/0/957400a269",
"title": "Automated Emotional Valence Prediction in Mental Health Text via Deep Transfer Learning",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2020/957400a269/1pBMmMWfuZq",
"parentPublication": {
"id": "proceedings/bibe/2020/9574/0",
"title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2021/4261/0/09635346",
"title": "A machine learning approach to predict emotional arousal and valence from gaze extracted features",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2021/09635346/1zmvn0ear7i",
"parentPublication": {
"id": "proceedings/bibe/2021/4261/0",
"title": "2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qmfHK8AjMQ",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"acronym": "bibm",
"groupId": "1001586",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qmfKAYXX6U",
"doi": "10.1109/BIBM49941.2020.9313522",
"title": "Emotion Classification Based on Brain Functional Connectivity Network",
"normalizedTitle": "Emotion Classification Based on Brain Functional Connectivity Network",
"abstract": "Although more and more researchers pay attention to the emotion classification, traditional emotion classification methods can not embrace changes in the global and local areas of the human brain after being stimulated. We propose an emotion classification method based on SVM combining brain functional connectivity. Firstly, the nonlinear phase-locked value (PLV) is used to calculate the multiband brain functional connectivity network, which is then converted into a binary brain network, and seven features of binary brain network are calculated. Secondly, support vector machines (SVM) are used to classify positive and negative emotions at the valence dimension and arousal dimension in the multiband. Experimental results on DEAP show that the best emotion classification accuracy of the proposed method is 86.67% in the arousal dimension, and 84.44% in the valence dimension. The results demonstrate that the classification accuracy of the arousal dimension is better than the valence dimension and the Beta2 frequency band is more suitable for emotion classification. Finally, several findings on brain functional connectivity network is discussed. The left and right areas of brain functional connectivity network are unbalanced in the low frequency band, and the feature values of clustering coefficient, average shortest path length, global efficiency, local efficiency, node degree are positively correlated with the arousal degree in the arousal dimension. Humans emotions are suppressed in the low frequency band, and the brain functional connectivity network after emotional stimulation is strengthened in the high frequency band. Our findings on emotion classification are valuable and consistent with the study of neural mechanisms.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although more and more researchers pay attention to the emotion classification, traditional emotion classification methods can not embrace changes in the global and local areas of the human brain after being stimulated. We propose an emotion classification method based on SVM combining brain functional connectivity. Firstly, the nonlinear phase-locked value (PLV) is used to calculate the multiband brain functional connectivity network, which is then converted into a binary brain network, and seven features of binary brain network are calculated. Secondly, support vector machines (SVM) are used to classify positive and negative emotions at the valence dimension and arousal dimension in the multiband. Experimental results on DEAP show that the best emotion classification accuracy of the proposed method is 86.67% in the arousal dimension, and 84.44% in the valence dimension. The results demonstrate that the classification accuracy of the arousal dimension is better than the valence dimension and the Beta2 frequency band is more suitable for emotion classification. Finally, several findings on brain functional connectivity network is discussed. The left and right areas of brain functional connectivity network are unbalanced in the low frequency band, and the feature values of clustering coefficient, average shortest path length, global efficiency, local efficiency, node degree are positively correlated with the arousal degree in the arousal dimension. Humans emotions are suppressed in the low frequency band, and the brain functional connectivity network after emotional stimulation is strengthened in the high frequency band. Our findings on emotion classification are valuable and consistent with the study of neural mechanisms.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although more and more researchers pay attention to the emotion classification, traditional emotion classification methods can not embrace changes in the global and local areas of the human brain after being stimulated. We propose an emotion classification method based on SVM combining brain functional connectivity. Firstly, the nonlinear phase-locked value (PLV) is used to calculate the multiband brain functional connectivity network, which is then converted into a binary brain network, and seven features of binary brain network are calculated. Secondly, support vector machines (SVM) are used to classify positive and negative emotions at the valence dimension and arousal dimension in the multiband. Experimental results on DEAP show that the best emotion classification accuracy of the proposed method is 86.67% in the arousal dimension, and 84.44% in the valence dimension. The results demonstrate that the classification accuracy of the arousal dimension is better than the valence dimension and the Beta2 frequency band is more suitable for emotion classification. Finally, several findings on brain functional connectivity network is discussed. The left and right areas of brain functional connectivity network are unbalanced in the low frequency band, and the feature values of clustering coefficient, average shortest path length, global efficiency, local efficiency, node degree are positively correlated with the arousal degree in the arousal dimension. Humans emotions are suppressed in the low frequency band, and the brain functional connectivity network after emotional stimulation is strengthened in the high frequency band. Our findings on emotion classification are valuable and consistent with the study of neural mechanisms.",
"fno": "09313522",
"keywords": [
"Electroencephalography",
"Emotion Recognition",
"Feature Extraction",
"Medical Signal Processing",
"Neurophysiology",
"Signal Classification",
"Support Vector Machines",
"Emotional Stimulation",
"Human Brain",
"Multiband Brain Functional Connectivity Network",
"Binary Brain Network",
"Positive Emotions",
"Negative Emotions",
"Emotion Classification",
"Support Vector Machines",
"SVM",
"Clustering Coefficient",
"Neural Mechanism",
"Beta 2 Frequency Band",
"Nonlinear Phase Locked Value",
"Electroencephalography",
"Support Vector Machines",
"Brain",
"Frequency Measurement",
"Manganese",
"Information Science",
"Physiology",
"EEG",
"Brain Functional Connectivity",
"Brain Network",
"Emotion Classification"
],
"authors": [
{
"affiliation": "Shandong Normal University,School of Information Science and Engineering,Jinan,China",
"fullName": "Xiaofang Sun",
"givenName": "Xiaofang",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong Normal University,School of Information Science and Engineering,Jinan,China",
"fullName": "Bin Hu",
"givenName": "Bin",
"surname": "Hu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong Normal University,School of Information Science and Engineering,Jinan,China",
"fullName": "Xiangwei Zheng",
"givenName": "Xiangwei",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong Normal University,School of Information Science and Engineering,Jinan,China",
"fullName": "Yongqiang Yin",
"givenName": "Yongqiang",
"surname": "Yin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Shandong Normal University,School of Information Science and Engineering,Jinan,China",
"fullName": "Cun Ji",
"givenName": "Cun",
"surname": "Ji",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "2082-2089",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-6215-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09313270",
"articleId": "1qmg9EBgOOc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09313581",
"articleId": "1qmfI2SXHr2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icci*cc/2017/0771/0/08109759",
"title": "Functional connectivity assessment for episodic memory",
"doi": null,
"abstractUrl": "/proceedings-article/icci*cc/2017/08109759/12OmNvpNIns",
"parentPublication": {
"id": "proceedings/icci*cc/2017/0771/0",
"title": "2017 IEEE 16th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icics/2018/6483/0/648300a315",
"title": "Emotion Recognition Using Brain Signals",
"doi": null,
"abstractUrl": "/proceedings-article/icics/2018/648300a315/146z4FDodIl",
"parentPublication": {
"id": "proceedings/icics/2018/6483/0",
"title": "2018 International Conference on Intelligent Circuits and Systems (ICICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/02/08520789",
"title": "Brain Dynamics During Arousal-Dependent Pleasant/Unpleasant Visual Elicitation: An Electroencephalographic Study on the Circumplex Model of Affect",
"doi": null,
"abstractUrl": "/journal/ta/2021/02/08520789/17D45VsBU7b",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2018/04/07835688",
"title": "Real-Time Movie-Induced Discrete Emotion Recognition from EEG Signals",
"doi": null,
"abstractUrl": "/journal/ta/2018/04/07835688/17D45XvMceV",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2022/9978/0/997800a610",
"title": "EEG-Based Emotion Recognition Using Partial Directed Coherence Dense Graph Propagation",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2022/997800a610/1ByetDUemly",
"parentPublication": {
"id": "proceedings/icmtma/2022/9978/0",
"title": "2022 14th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icceai/2022/6803/0/680300a684",
"title": "Improved Graph Convolutional Neural Networks based on Granger Causality Analysis for EEG Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icceai/2022/680300a684/1FUVDYsfCus",
"parentPublication": {
"id": "proceedings/icceai/2022/6803/0",
"title": "2022 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icftic/2022/2195/0/10075315",
"title": "Graph Convolutional Neural Network for EEG Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icftic/2022/10075315/1LRl2AN4aqs",
"parentPublication": {
"id": "proceedings/icftic/2022/2195/0",
"title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aciiw/2022/5490/0/10086000",
"title": "Preliminary Study on the Transition of Bio-emotion using Aroma Stimuli",
"doi": null,
"abstractUrl": "/proceedings-article/aciiw/2022/10086000/1M665PIA7L2",
"parentPublication": {
"id": "proceedings/aciiw/2022/5490/0",
"title": "2022 10th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2022/03/09139334",
"title": "Identifying Cortical Brain Directed Connectivity Networks From High-Density EEG for Emotion Recognition",
"doi": null,
"abstractUrl": "/journal/ta/2022/03/09139334/1ls8eA1XF0A",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/5555/01/09543539",
"title": "AT2GRU: A Human Emotion Recognition Model with Mitigated Device Heterogeneity",
"doi": null,
"abstractUrl": "/journal/ta/5555/01/09543539/1x4UFatfrC8",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzCZXhpS0",
"doi": "10.1109/AIVR50618.2020.00076",
"title": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"normalizedTitle": "Annotation Tool for Precise Emotion Ground Truth Label Acquisition while Watching 360° VR Videos",
"abstract": "We demonstrate an HMD-based annotation tool for collecting precise emotion ground truth labels while users are watching 360° videos in Virtual Reality (VR). Our tool uses an HTC VIVE Pro Eye HMD for displaying 360° videos, a Joy-Con controller for inputting emotion annotations, and an Empatica E4 wristband for capturing physiological signals. Timestamps of these devices are synchronized via an NTP server. Following dimensional emotion models, users can report their emotion in terms of valence and arousal as they watch a video in VR. Annotation feedback is provided through two peripheral visualization techniques: HaloLight and DotSize. Our annotation tool provides a starting point for researchers to design momentary and continuous self-reports in virtual environments to enable fine-grained emotion recognition.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We demonstrate an HMD-based annotation tool for collecting precise emotion ground truth labels while users are watching 360° videos in Virtual Reality (VR). Our tool uses an HTC VIVE Pro Eye HMD for displaying 360° videos, a Joy-Con controller for inputting emotion annotations, and an Empatica E4 wristband for capturing physiological signals. Timestamps of these devices are synchronized via an NTP server. Following dimensional emotion models, users can report their emotion in terms of valence and arousal as they watch a video in VR. Annotation feedback is provided through two peripheral visualization techniques: HaloLight and DotSize. Our annotation tool provides a starting point for researchers to design momentary and continuous self-reports in virtual environments to enable fine-grained emotion recognition.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We demonstrate an HMD-based annotation tool for collecting precise emotion ground truth labels while users are watching 360° videos in Virtual Reality (VR). Our tool uses an HTC VIVE Pro Eye HMD for displaying 360° videos, a Joy-Con controller for inputting emotion annotations, and an Empatica E4 wristband for capturing physiological signals. Timestamps of these devices are synchronized via an NTP server. Following dimensional emotion models, users can report their emotion in terms of valence and arousal as they watch a video in VR. Annotation feedback is provided through two peripheral visualization techniques: HaloLight and DotSize. Our annotation tool provides a starting point for researchers to design momentary and continuous self-reports in virtual environments to enable fine-grained emotion recognition.",
"fno": "746300a371",
"keywords": [
"Data Acquisition",
"Emotion Recognition",
"Helmet Mounted Displays",
"Video Signal Processing",
"Virtual Reality",
"Annotation Feedback",
"Emotion Recognition",
"Precise Emotion Ground Truth Label Acquisition",
"VR Videos",
"HMD Based Annotation Tool",
"Virtual Reality",
"HTC VIVE Pro Eye HMD",
"Joy Con Controller",
"Emotion Annotations",
"Empatica E 4 Wristband",
"Dimensional Emotion Models",
"Annotations",
"Videos",
"Tools",
"Resists",
"Physiology",
"Virtual Environments",
"Human Factors",
"360 X 00 B 0 Video",
"Emotion Annotation",
"Continuous",
"Ground Truth Labels"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology Centrum Wiskunde & Informatica (CWI),Beijing,China",
"fullName": "Tong Xue",
"givenName": "Tong",
"surname": "Xue",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Centrum Wiskunde & Informatica (CWI),Amsterdam,The Netherlands",
"fullName": "Abdallah El Ali",
"givenName": "Abdallah El",
"surname": "Ali",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology,Beijing,China",
"fullName": "Gangyi Ding",
"givenName": "Gangyi",
"surname": "Ding",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Delft University of Technology,Centrum Wiskunde & Informatica (CWI),Amsterdam,The Netherlands",
"fullName": "Pablo Cesar",
"givenName": "Pablo",
"surname": "Cesar",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "371-372",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a366",
"articleId": "1qpzDoHMV2M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a373",
"articleId": "1qpzBgQPoWI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2018/1737/0/08486537",
"title": "A Subjective Study of Viewer Navigation Behaviors When Watching 360-Degree Videos on Computers",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2018/08486537/14jQfTvagGm",
"parentPublication": {
"id": "proceedings/icme/2018/1737/0",
"title": "2018 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a001",
"title": "Bullet Comments for 360°Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a001/1CJcgerbwNa",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2022/0876/0/087600a281",
"title": "Head Movement-aware MPEG-DASH SRD-based 360° Video VR Streaming System over Wireless Network",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2022/087600a281/1FHqcfLbws0",
"parentPublication": {
"id": "proceedings/wowmom/2022/0876/0",
"title": "2022 IEEE 23rd International Symposium on a World of Wireless, Mobile and Multimedia Networks (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a491",
"title": "Implementation of Attention-Based Spatial Audio for 360° Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a491/1J7Wlf9IrNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797968",
"title": "Did You See What I Saw?: Comparing User Synchrony When Watching 360° Video In HMD Vs Flat Screen",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797968/1cJ0X6CH9wk",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093262",
"title": "360-Indoor: Towards Learning Real-World Objects in 360° Indoor Equirectangular Images",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093262/1jPbAWPyE8g",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wowmom/2020/7374/0/737400a191",
"title": "A QoE and Visual Attention Evaluation on the Influence of Audio in 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/wowmom/2020/737400a191/1nMQCKTCoeY",
"parentPublication": {
"id": "proceedings/wowmom/2020/7374/0",
"title": "2020 IEEE 21st International Symposium on \"A World of Wireless, Mobile and Multimedia Networks\" (WoWMoM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2020/8697/0/869700a065",
"title": "Between the Frames - Evaluation of Various Motion Interpolation Algorithms to Improve 360° Video Quality",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2020/869700a065/1qBbIgvfx6g",
"parentPublication": {
"id": "proceedings/ism/2020/8697/0",
"title": "2020 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a345",
"title": "A QoE and Visual Attention Evaluation on the Influence of Spatial Audio in 360 Videos",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a345/1qpzDaHLzhu",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a183",
"title": "Enabling Collaborative Interaction with 360° Panoramas between Large-scale Displays and Immersive Headsets",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a183/1yeQBWUxple",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
{
"proceeding": {
"id": "12OmNxisQYM",
"title": "2011 International Conference on Information Science and Applications",
"acronym": "icisa",
"groupId": "1800053",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAWpyrv",
"doi": "10.1109/ICISA.2011.5772406",
"title": "Design and Implementation of an Augmented Reality System Using Gaze Interaction",
"normalizedTitle": "Design and Implementation of an Augmented Reality System Using Gaze Interaction",
"abstract": "An interactive optical see-through HMD (head-mounted device) which makes use of a user's gaze information for the interaction in the AR (augmented reality) environment. In particular, we propose a method to employ a user's half-blink information for more efficient interaction. As the interaction is achieved by using a user's eye gaze and half-blink information, the proposed system can provide more efficient computing environment. In addition, the proposed system can be quite helpful to those who have difficulties in using conventional interaction methods which use hands or feet. The experimental results present the robustness and efficiency of the proposed system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An interactive optical see-through HMD (head-mounted device) which makes use of a user's gaze information for the interaction in the AR (augmented reality) environment. In particular, we propose a method to employ a user's half-blink information for more efficient interaction. As the interaction is achieved by using a user's eye gaze and half-blink information, the proposed system can provide more efficient computing environment. In addition, the proposed system can be quite helpful to those who have difficulties in using conventional interaction methods which use hands or feet. The experimental results present the robustness and efficiency of the proposed system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An interactive optical see-through HMD (head-mounted device) which makes use of a user's gaze information for the interaction in the AR (augmented reality) environment. In particular, we propose a method to employ a user's half-blink information for more efficient interaction. As the interaction is achieved by using a user's eye gaze and half-blink information, the proposed system can provide more efficient computing environment. In addition, the proposed system can be quite helpful to those who have difficulties in using conventional interaction methods which use hands or feet. The experimental results present the robustness and efficiency of the proposed system.",
"fno": "05772406",
"keywords": [
"Augmented Reality",
"Eye",
"User Interfaces",
"Augmented Reality",
"Gaze Interaction",
"HMD",
"Head Mounted Device",
"Eye",
"Eyelids",
"Tracking",
"Augmented Reality",
"Cameras",
"Calibration",
"Laser Mode Locking",
"Brightness"
],
"authors": [
{
"affiliation": null,
"fullName": "Jae-Young Lee",
"givenName": "Jae-Young",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hyung-Min Park",
"givenName": "Hyung-Min",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Seok-Han Lee",
"givenName": "Seok-Han",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tae-Eun Kim",
"givenName": "Tae-Eun",
"surname": "Kim",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jong-Soo Choi",
"givenName": "Jong-Soo",
"surname": "Choi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icisa",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-04-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2011",
"issn": "2162-9048",
"isbn": "978-1-4244-9222-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05772325",
"articleId": "12OmNyUnEBc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05772355",
"articleId": "12OmNzmclZL",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2017/6716/0/07893315",
"title": "Exploring natural eye-gaze-based interaction for immersive virtual reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2017/07893315/12OmNApcuh1",
"parentPublication": {
"id": "proceedings/3dui/2017/6716/0",
"title": "2017 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2009/3789/0/3789a153",
"title": "An Implementation Review of Occlusion-Based Interaction in Augmented Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2009/3789a153/12OmNB7cjly",
"parentPublication": {
"id": "proceedings/cgiv/2009/3789/0",
"title": "2009 Sixth International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icat/2007/3056/0/30560280",
"title": "Interaction Without Gesture or Speech -- A Gaze Controlled AR System",
"doi": null,
"abstractUrl": "/proceedings-article/icat/2007/30560280/12OmNCcKQtv",
"parentPublication": {
"id": "proceedings/icat/2007/3056/0",
"title": "17th International Conference on Artificial Reality and Telexistence (ICAT 2007)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2008/2840/0/04637353",
"title": "Wearable augmented reality system using gaze interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2008/04637353/12OmNxETakw",
"parentPublication": {
"id": "proceedings/ismar/2008/2840/0",
"title": "2008 7th IEEE/ACM International Symposium on Mixed and Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wkdd/2009/3543/0/3543a594",
"title": "Research on Eye-gaze Tracking Network Generated by Augmented Reality Application",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2009/3543a594/12OmNzl3WVn",
"parentPublication": {
"id": "proceedings/wkdd/2009/3543/0",
"title": "2009 Second International Workshop on Knowledge Discovery and Data Mining. WKDD 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09872121",
"title": ": From real infrared eye-images to synthetic sequences of gaze behavior<italic/>",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09872121/1GhRV18KGvC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/11/09874254",
"title": "Gaze-Vergence-Controlled See-Through Vision in Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2022/11/09874254/1GjwOCjuXkY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a315",
"title": "Glance-Box: Multi-LOD Glanceable Interfaces for Machine Shop Guidance in Augmented Reality using Blink and Hand Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a315/1J7Wpsgpk76",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a022",
"title": "Exploring 3D Interaction with Gaze Guidance in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a022/1MNgYOBne5W",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a165",
"title": "Comparing Single-modal and Multimodal Interaction in an Augmented Reality System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a165/1pBMk3pKsEw",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.