data
dict |
|---|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WaTklB",
"doi": "10.1109/CVPR.2018.00317",
"title": "SurfConv: Bridging 3D and 2D Convolution for RGBD Images",
"normalizedTitle": "SurfConv: Bridging 3D and 2D Convolution for RGBD Images",
"abstract": "The last few years have seen approaches trying to combine the increasing popularity of depth sensors and the success of the convolutional neural networks. Using depth as additional channel alongside the RGB input has the scale variance problem present in image convolution based approaches. On the other hand, 3D convolution wastes a large amount of memory on mostly unoccupied 3D space, which consists of only the surface visible to the sensor. Instead, we propose SurfConv, which \"slides\" compact 2D filters along the visible 3D surface. SurfConv is formulated as a simple depth-aware multi-scale 2D convolution, through a new Data-Driven Depth Discretization (D4) scheme. We demonstrate the effectiveness of our method on indoor and outdoor 3D semantic segmentation datasets. Our method achieves state-of-the-art performance while using less than 30% parameters used by the 3D convolution based approaches.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The last few years have seen approaches trying to combine the increasing popularity of depth sensors and the success of the convolutional neural networks. Using depth as additional channel alongside the RGB input has the scale variance problem present in image convolution based approaches. On the other hand, 3D convolution wastes a large amount of memory on mostly unoccupied 3D space, which consists of only the surface visible to the sensor. Instead, we propose SurfConv, which \"slides\" compact 2D filters along the visible 3D surface. SurfConv is formulated as a simple depth-aware multi-scale 2D convolution, through a new Data-Driven Depth Discretization (D4) scheme. We demonstrate the effectiveness of our method on indoor and outdoor 3D semantic segmentation datasets. Our method achieves state-of-the-art performance while using less than 30% parameters used by the 3D convolution based approaches.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The last few years have seen approaches trying to combine the increasing popularity of depth sensors and the success of the convolutional neural networks. Using depth as additional channel alongside the RGB input has the scale variance problem present in image convolution based approaches. On the other hand, 3D convolution wastes a large amount of memory on mostly unoccupied 3D space, which consists of only the surface visible to the sensor. Instead, we propose SurfConv, which \"slides\" compact 2D filters along the visible 3D surface. SurfConv is formulated as a simple depth-aware multi-scale 2D convolution, through a new Data-Driven Depth Discretization (D4) scheme. We demonstrate the effectiveness of our method on indoor and outdoor 3D semantic segmentation datasets. Our method achieves state-of-the-art performance while using less than 30% parameters used by the 3D convolution based approaches.",
"fno": "642000d002",
"keywords": [
"Convolutional Neural Nets",
"Image Colour Analysis",
"Image Filtering",
"Image Segmentation",
"Stereo Image Processing",
"Convolutional Neural Networks",
"RGB Input",
"Scale Variance Problem",
"Image Convolution Based Approaches",
"Surf Conv",
"Data Driven Depth Discretization Scheme",
"3 D Convolution Based Approaches",
"Depth Aware Multiscale 2 D Convolution",
"3 D Semantic Segmentation Datasets",
"2 D Filters",
"Three Dimensional Displays",
"Convolution",
"Two Dimensional Displays",
"Sensors",
"Surface Treatment",
"Semantics",
"Image Sensors"
],
"authors": [
{
"affiliation": null,
"fullName": "Hang Chu",
"givenName": "Hang",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wei-Chiu Ma",
"givenName": "Wei-Chiu",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Kaustav Kundu",
"givenName": "Kaustav",
"surname": "Kundu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Raquel Urtasun",
"givenName": "Raquel",
"surname": "Urtasun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Sanja Fidler",
"givenName": "Sanja",
"surname": "Fidler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "3002-3011",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000c993",
"articleId": "17D45XERmmv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000d012",
"articleId": "17D45WODaoD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457a398",
"title": "Amodal Detection of 3D Objects: Inferring 3D Bounding Boxes from 2D Ones in RGB-Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a398/12OmNvAiSEn",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2016/5407/0/5407a441",
"title": "Robust Real-Time 3D Face Tracking from RGBD Videos under Extreme Pose, Depth, and Expression Variation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2016/5407a441/12OmNzC5TfM",
"parentPublication": {
"id": "proceedings/3dv/2016/5407/0",
"title": "2016 Fourth International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2018/06/07934426",
"title": "Robust 3D Object Tracking from Monocular Images Using Stable Parts",
"doi": null,
"abstractUrl": "/journal/tp/2018/06/07934426/13rRUxASuOq",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000b642",
"title": "Forgery Detection in 3D-Sensor Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000b642/17D45XeKgxC",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a173",
"title": "3D Neighborhood Convolution: Learning Depth-Aware Features for RGB-D and RGB Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a173/1ezRDmQtEY0",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300k0022",
"title": "Learning Joint 2D-3D Representations for Depth Completion",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300k0022/1hVlU98LNbG",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093505",
"title": "Blended Convolution and Synthesis for Efficient Discrimination of 3D Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093505/1jPbfCoY1IQ",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/7.168E297",
"title": "FPConv: Learning Local Flattening for Point Convolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/7.168E297/1m3nafl5rq0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i333",
"title": "RGBD-Dog: Predicting Canine Pose from RGBD Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i333/1m3npw1SCKA",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e533",
"title": "Fusion-Aware Point Convolution for Online Semantic 3D Scene Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e533/1m3o50gS9u8",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "19F1LC52tjO",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "19F1PQOMxWg",
"doi": "10.1109/ISMAR-Adjunct.2018.00131",
"title": "TutAR: Semi-Automatic Generation of Augmented Reality Tutorials for Medical Education",
"normalizedTitle": "TutAR: Semi-Automatic Generation of Augmented Reality Tutorials for Medical Education",
"abstract": "With Augmented Reality (AR) on Optical-See-Through-Head-Mounted Displays (OST-HMD), users can observe the real world and computer graphics at the same time. In this work, we present TutAR, a pipeline that semi-automatically creates AR tutorials out of 2D RGB videos. TutAR extracts relevant 3D hand motion from the input video. The derived motion will be displayed as an animated 3D hand relative to the human body and plays synchronously with the motion in the video on an OST-HMD.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With Augmented Reality (AR) on Optical-See-Through-Head-Mounted Displays (OST-HMD), users can observe the real world and computer graphics at the same time. In this work, we present TutAR, a pipeline that semi-automatically creates AR tutorials out of 2D RGB videos. TutAR extracts relevant 3D hand motion from the input video. The derived motion will be displayed as an animated 3D hand relative to the human body and plays synchronously with the motion in the video on an OST-HMD.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With Augmented Reality (AR) on Optical-See-Through-Head-Mounted Displays (OST-HMD), users can observe the real world and computer graphics at the same time. In this work, we present TutAR, a pipeline that semi-automatically creates AR tutorials out of 2D RGB videos. TutAR extracts relevant 3D hand motion from the input video. The derived motion will be displayed as an animated 3D hand relative to the human body and plays synchronously with the motion in the video on an OST-HMD.",
"fno": "08699222",
"keywords": [
"Augmented Reality",
"Biomedical Education",
"Computer Animation",
"Medical Image Processing",
"Video Signal Processing",
"Tut AR",
"Semiautomatic Generation",
"Augmented Reality Tutorials",
"Medical Education",
"Optical See Through Head Mounted Displays",
"OST HMD",
"Computer Graphics",
"2 D RGB Videos",
"Animated 3 D Hand",
"3 D Hand Motion",
"Three Dimensional Displays",
"Tutorials",
"Two Dimensional Displays",
"Trajectory",
"Augmented Reality",
"Image Reconstruction",
"Animation",
"Augmented Reality",
"Video Tutorials",
"Motion Extraction"
],
"authors": [
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Daniel Eckhoff",
"givenName": "Daniel",
"surname": "Eckhoff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology, Japan",
"fullName": "Christian Sandor",
"givenName": "Christian",
"surname": "Sandor",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Graz University of Technology, Austria",
"fullName": "Denis Kalkoten",
"givenName": "Denis",
"surname": "Kalkoten",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technical University of Munich, Germany",
"fullName": "Ulrich Eck",
"givenName": "Ulrich",
"surname": "Eck",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "OFFIS - Institute for Information Technology, Oldenburg, Germany",
"fullName": "Christian Lins",
"givenName": "Christian",
"surname": "Lins",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Carl von Ossietzky University of Oldenburg, Germany",
"fullName": "Andreas Hein",
"givenName": "Andreas",
"surname": "Hein",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "430-431",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-7592-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08699201",
"articleId": "19F1VvOVhew",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08699228",
"articleId": "19F1MQyNDby",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2015/7660/0/7660a043",
"title": "Simultaneous Direct and Augmented View Distortion Calibration of Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a043/12OmNC1oT64",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836529",
"title": "EyeAR: Refocusable Augmented Reality Content through Eye Measurements",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836529/12OmNxdm4L9",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504717",
"title": "OST Rift: Temporally consistent augmented reality with a consumer optical see-through head-mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504717/12OmNzXFoKD",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460065",
"title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699267",
"title": "Perception and Action in Peripersonal Space: A Comparison Between Video and Optical See-Through Augmented Reality Devices",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699267/19F1NuzXn9u",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a796",
"title": "A Replication Study to Measure the Perceived Three-Dimensional Location of Virtual Objects in Optical See Through Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a796/1CJfrSkdYUE",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/10077744",
"title": "A Comparative Evaluation of Optical See-through Augmented Reality in Surgical Guidance",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/10077744/1LH8EZ3NEGI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09253561",
"title": "AR-Loupe: Magnified Augmented Reality by Combining an Optical See-Through Head-Mounted Display and a Loupe",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09253561/1oDXHeBJHNe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a665",
"title": "Supporting Medical Auxiliary Work: The Central Sterile Services Department as a Challenging Environment for Augmented Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a665/1pysyCXzE8o",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a569",
"title": "Investigation of Microcirculatory Effects of Experiencing Burning Hands in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a569/1tnXxLHfCOQ",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gyshXRzHpK",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gysoyOrm2A",
"doi": "10.1109/ISMAR-Adjunct.2019.00-60",
"title": "Hand ControlAR: An Augmented Reality Application for Learning 3D Geometry",
"normalizedTitle": "Hand ControlAR: An Augmented Reality Application for Learning 3D Geometry",
"abstract": "The traditional way of learning geometry cannot provide a great support for novice students since the geometric figures are 2D on the blackboard or the book. In consideration that Augmented Reality(AR) provides an intuitive way to learn geometry, an interactive AR system that enables students to naturally and directly manipulating 3D objects through hand gesture-based interactions and intuitively explore the spatial relationship between spheres and polyhedrons is proposed in this paper. The proposed gesture-based interaction enables the user manipulate AR objects in the real 3D space instead of 2D space. We design three levels of study to enable students to learn the geometric concepts as well as an experiment to evaluate the effectiveness of the AR system. Analysis of experimental results showed that the proposed system is easy to use, attractive, and helpful for students.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The traditional way of learning geometry cannot provide a great support for novice students since the geometric figures are 2D on the blackboard or the book. In consideration that Augmented Reality(AR) provides an intuitive way to learn geometry, an interactive AR system that enables students to naturally and directly manipulating 3D objects through hand gesture-based interactions and intuitively explore the spatial relationship between spheres and polyhedrons is proposed in this paper. The proposed gesture-based interaction enables the user manipulate AR objects in the real 3D space instead of 2D space. We design three levels of study to enable students to learn the geometric concepts as well as an experiment to evaluate the effectiveness of the AR system. Analysis of experimental results showed that the proposed system is easy to use, attractive, and helpful for students.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The traditional way of learning geometry cannot provide a great support for novice students since the geometric figures are 2D on the blackboard or the book. In consideration that Augmented Reality(AR) provides an intuitive way to learn geometry, an interactive AR system that enables students to naturally and directly manipulating 3D objects through hand gesture-based interactions and intuitively explore the spatial relationship between spheres and polyhedrons is proposed in this paper. The proposed gesture-based interaction enables the user manipulate AR objects in the real 3D space instead of 2D space. We design three levels of study to enable students to learn the geometric concepts as well as an experiment to evaluate the effectiveness of the AR system. Analysis of experimental results showed that the proposed system is easy to use, attractive, and helpful for students.",
"fno": "476500a144",
"keywords": [
"Augmented Reality",
"Computer Aided Instruction",
"Geometry",
"Gesture Recognition",
"Mathematics Computing",
"Geometric Figures",
"3 D Objects Manipulation",
"Hand Gesture Based Interactions",
"Geometric Concepts",
"Interactive AR System",
"Augmented Reality Application",
"Novice Students",
"Hand Control AR",
"3 D Geometry Learning",
"AR Objects Manipulation",
"Three Dimensional Displays",
"Geometry",
"Education",
"Solid Modeling",
"Two Dimensional Displays",
"Shape",
"Tracking",
"Augmented Reality",
"Hand Gesture Interaction",
"User Defined Targets",
"3 D Objects Manipulation",
"Geometry Education"
],
"authors": [
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Rui Cao",
"givenName": "Rui",
"surname": "Cao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Beijing Institute of Technology",
"fullName": "Yue Liu",
"givenName": "Yue",
"surname": "Liu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-10-01T00:00:00",
"pubType": "proceedings",
"pages": "144-149",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-4765-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "476500a138",
"articleId": "1gysl9uVOoM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "476500a150",
"articleId": "1gysnPldm9O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar/2013/2869/0/06671841",
"title": "Markerless 3D gesture-based interaction for handheld Augmented Reality interfaces",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671841/12OmNAIvcZU",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550212",
"title": "Poster: Markerless fingertip-based 3D interaction for handheld augmented reality in a small workspace",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550212/12OmNBsue2b",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a372",
"title": "GeoSolvAR: Augmented Reality Based Solution for Visualizing 3D Solids",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a372/12OmNwMobbg",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/t4e/2016/6115/0/6115a026",
"title": "Geometry via Gestures: Learning 3D Geometry Using Gestures",
"doi": null,
"abstractUrl": "/proceedings-article/t4e/2016/6115a026/12OmNx5GTY6",
"parentPublication": {
"id": "proceedings/t4e/2016/6115/0",
"title": "2016 IEEE Eighth International Conference on Technology for Education (T4E)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2014/3922/0/07044193",
"title": "From 2D to 3D: Teaching terrain representation in engineering studies through Augmented reality: Comparative versus 3D pdf",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2014/07044193/12OmNxwENpC",
"parentPublication": {
"id": "proceedings/fie/2014/3922/0",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/latice/2015/9967/0/9967a221",
"title": "Learning Geometry with Augmented Reality to Enhance Spatial Ability",
"doi": null,
"abstractUrl": "/proceedings-article/latice/2015/9967a221/12OmNy5zsoL",
"parentPublication": {
"id": "proceedings/latice/2015/9967/0",
"title": "2015 International Conference on Learning and Teaching in Computing and Engineering (LaTiCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699236",
"title": "The Trouble with Augmented Reality/Virtual Reality Authoring Tools",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699236/19F1TNjWjtK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/t4e/2019/4227/0/422700a193",
"title": "Collaborative Approaches to Problem-Solving on Lines and Angles Using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/t4e/2019/422700a193/1hgtGoEU0OA",
"parentPublication": {
"id": "proceedings/t4e/2019/4227/0",
"title": "2019 IEEE Tenth International Conference on Technology for Education (T4E)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998140",
"title": "Live Semantic 3D Perception for Immersive Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998140/1hpPDSYGijK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a203",
"title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1m3n9N02qgE",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1m3o50gS9u8",
"doi": "10.1109/CVPR42600.2020.00459",
"title": "Fusion-Aware Point Convolution for Online Semantic 3D Scene Segmentation",
"normalizedTitle": "Fusion-Aware Point Convolution for Online Semantic 3D Scene Segmentation",
"abstract": "Online semantic 3D segmentation in company with real-time RGB-D reconstruction poses special challenges such as how to perform 3D convolution directly over the progressively fused 3D geometric data, and how to smartly fuse information from frame to frame. We propose a novel fusion-aware 3D point convolution which operates directly on the geometric surface being reconstructed and exploits effectively the inter-frame correlation for high-quality 3D feature learning. This is enabled by a dedicated dynamic data structure that organizes the online acquired point cloud with local-global trees. Globally, we compile the online reconstructed 3D points into an incrementally growing coordinate interval tree, enabling fast point insertion and neighborhood query. Locally, we maintain the neighborhood information for each point using an octree whose construction benefits from the fast query of the global tree. The local octrees facilitate efficient surface-aware point convolution. Both levels of trees update dynamically and help the 3D convolution effectively exploits the temporal coherence for effective information fusion across RGB-D frames.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Online semantic 3D segmentation in company with real-time RGB-D reconstruction poses special challenges such as how to perform 3D convolution directly over the progressively fused 3D geometric data, and how to smartly fuse information from frame to frame. We propose a novel fusion-aware 3D point convolution which operates directly on the geometric surface being reconstructed and exploits effectively the inter-frame correlation for high-quality 3D feature learning. This is enabled by a dedicated dynamic data structure that organizes the online acquired point cloud with local-global trees. Globally, we compile the online reconstructed 3D points into an incrementally growing coordinate interval tree, enabling fast point insertion and neighborhood query. Locally, we maintain the neighborhood information for each point using an octree whose construction benefits from the fast query of the global tree. The local octrees facilitate efficient surface-aware point convolution. Both levels of trees update dynamically and help the 3D convolution effectively exploits the temporal coherence for effective information fusion across RGB-D frames.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Online semantic 3D segmentation in company with real-time RGB-D reconstruction poses special challenges such as how to perform 3D convolution directly over the progressively fused 3D geometric data, and how to smartly fuse information from frame to frame. We propose a novel fusion-aware 3D point convolution which operates directly on the geometric surface being reconstructed and exploits effectively the inter-frame correlation for high-quality 3D feature learning. This is enabled by a dedicated dynamic data structure that organizes the online acquired point cloud with local-global trees. Globally, we compile the online reconstructed 3D points into an incrementally growing coordinate interval tree, enabling fast point insertion and neighborhood query. Locally, we maintain the neighborhood information for each point using an octree whose construction benefits from the fast query of the global tree. The local octrees facilitate efficient surface-aware point convolution. Both levels of trees update dynamically and help the 3D convolution effectively exploits the temporal coherence for effective information fusion across RGB-D frames.",
"fno": "716800e533",
"keywords": [
"Convolution",
"Data Structures",
"Image Colour Analysis",
"Image Fusion",
"Image Reconstruction",
"Image Segmentation",
"Learning Artificial Intelligence",
"Octrees",
"Solid Modelling",
"Fusion Aware Point Convolution",
"Online Semantic 3 D Scene Segmentation",
"Online Semantic 3 D Segmentation",
"Real Time RGB D Reconstruction",
"3 D Geometric Data",
"Geometric Surface",
"Inter Frame Correlation",
"High Quality 3 D Feature Learning",
"Dynamic Data Structure",
"Local Global Trees",
"Point Insertion",
"Neighborhood Query",
"Octrees",
"Information Fusion",
"RGB D Frames",
"Surface Aware Point Convolution",
"Coordinate Interval Tree",
"Point Cloud",
"3 D Convolution",
"Fusion Aware 3 D Point Convolution",
"Online Reconstructed 3 D Points",
"Three Dimensional Displays",
"Convolution",
"Two Dimensional Displays",
"Semantics",
"Octrees",
"Geometry"
],
"authors": [
{
"affiliation": "National University of Defense Technology",
"fullName": "Jiazhao Zhang",
"givenName": "Jiazhao",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defense Technology",
"fullName": "Chenyang Zhu",
"givenName": "Chenyang",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defense Technology",
"fullName": "Lintao Zheng",
"givenName": "Lintao",
"surname": "Zheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National University of Defense Technology; SpeedBot Robotics Ltd.",
"fullName": "Kai Xu",
"givenName": "Kai",
"surname": "Xu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-06-01T00:00:00",
"pubType": "proceedings",
"pages": "4533-4542",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7168-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "716800e523",
"articleId": "1m3nF0i0RCo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "716800e543",
"articleId": "1m3nuyjc5Ta",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200e945",
"title": "Adaptive Graph Convolution for Point Cloud Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200e945/1BmFzgP28iA",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10024001",
"title": "AGConv: Adaptive Graph Convolution on 3D Point Clouds",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10024001/1K9spf0w0Ug",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2019/3131/0/313100a173",
"title": "3D Neighborhood Convolution: Learning Depth-Aware Features for RGB-D and RGB Semantic Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2019/313100a173/1ezRDmQtEY0",
"parentPublication": {
"id": "proceedings/3dv/2019/3131/0",
"title": "2019 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g410",
"title": "KPConv: Flexible and Deformable Convolution for Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g410/1hVluNPWu1a",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093505",
"title": "Blended Convolution and Synthesis for Efficient Discrimination of 3D Shapes",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093505/1jPbfCoY1IQ",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800e573",
"title": "SpSequenceNet: Semantic Segmentation Network on 4D Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800e573/1m3nZX73Hna",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/7.168E297",
"title": "FPConv: Learning Local Flattening for Point Convolution",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/7.168E297/1m3nafl5rq0",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d348",
"title": "Anisotropic Convolutional Networks for 3D Semantic Scene Completion",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d348/1m3nnvKHeVi",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b797",
"title": "Convolution in the Cloud: Learning Deformable Kernels in 3D Graph Convolution Networks for Point Cloud Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b797/1m3onRq4x3y",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/08/09355025",
"title": "Learning of 3D Graph Convolution Networks for Point Cloud Analysis",
"doi": null,
"abstractUrl": "/journal/tp/2022/08/09355025/1rgCbgC4Z8s",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pP3EDJNvUc",
"title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"acronym": "ase",
"groupId": "1000064",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pP3IvL3Z6w",
"doi": null,
"title": "Towards Immersive Comprehension of Software Systems Using Augmented Reality - An Empirical Evaluation",
"normalizedTitle": "Towards Immersive Comprehension of Software Systems Using Augmented Reality - An Empirical Evaluation",
"abstract": "While traditionally, software comprehension relies on approaches like reading through the code or looking at charts on screens, which are 2D mediums, there have been some recent approaches that advocate exploring 3D approaches like Augmented or Virtual Reality (AR/VR) to have a richer experience towards understanding software and its internal relationships. However, there is a dearth of objective studies that compare such 3D representations with their traditional 2D counterparts in the context of software comprehension. In this paper, we present an evaluation study to quantitatively and qualitatively compare 2D and 3D software representations with respect to typical comprehension tasks. For the 3D medium, we utilize an AR-based approach for 3D visualizations of a software system (XRaSE), while the 2D medium comprises of textual IDEs and 2D graph representations. The study, which has been conducted using 20 professional developers, shows that for most comprehension tasks, the developers perform much better using the 3D representation, especially in terms of velocity and recollection, while also displaying reduced cognitive load and better engagement.",
"abstracts": [
{
"abstractType": "Regular",
"content": "While traditionally, software comprehension relies on approaches like reading through the code or looking at charts on screens, which are 2D mediums, there have been some recent approaches that advocate exploring 3D approaches like Augmented or Virtual Reality (AR/VR) to have a richer experience towards understanding software and its internal relationships. However, there is a dearth of objective studies that compare such 3D representations with their traditional 2D counterparts in the context of software comprehension. In this paper, we present an evaluation study to quantitatively and qualitatively compare 2D and 3D software representations with respect to typical comprehension tasks. For the 3D medium, we utilize an AR-based approach for 3D visualizations of a software system (XRaSE), while the 2D medium comprises of textual IDEs and 2D graph representations. The study, which has been conducted using 20 professional developers, shows that for most comprehension tasks, the developers perform much better using the 3D representation, especially in terms of velocity and recollection, while also displaying reduced cognitive load and better engagement.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "While traditionally, software comprehension relies on approaches like reading through the code or looking at charts on screens, which are 2D mediums, there have been some recent approaches that advocate exploring 3D approaches like Augmented or Virtual Reality (AR/VR) to have a richer experience towards understanding software and its internal relationships. However, there is a dearth of objective studies that compare such 3D representations with their traditional 2D counterparts in the context of software comprehension. In this paper, we present an evaluation study to quantitatively and qualitatively compare 2D and 3D software representations with respect to typical comprehension tasks. For the 3D medium, we utilize an AR-based approach for 3D visualizations of a software system (XRaSE), while the 2D medium comprises of textual IDEs and 2D graph representations. The study, which has been conducted using 20 professional developers, shows that for most comprehension tasks, the developers perform much better using the 3D representation, especially in terms of velocity and recollection, while also displaying reduced cognitive load and better engagement.",
"fno": "676800b267",
"keywords": [
"Augmented Reality",
"Cognition",
"Data Visualisation",
"Graph Theory",
"Software Maintenance",
"User Interfaces",
"Virtual Reality",
"Augmented Reality",
"Empirical Evaluation",
"Software Comprehension",
"Virtual Reality",
"Richer Experience",
"Understanding Software",
"Objective Studies",
"Traditional 2 D Counterparts",
"Evaluation Study",
"3 D Software Representations",
"Typical Comprehension Tasks",
"Software System",
"2 D Graph Representations",
"Towards Immersive Comprehension",
"Visualization",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Software Systems",
"Task Analysis",
"Augmented Reality",
"Software Engineering",
"Software Visualization",
"Augmented Reality",
"3 D Software",
"User Study"
],
"authors": [
{
"affiliation": "Accenture Labs,India",
"fullName": "Rohit Mehra",
"givenName": "Rohit",
"surname": "Mehra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Accenture Labs,India",
"fullName": "Vibhu Saujanya Sharma",
"givenName": "Vibhu Saujanya",
"surname": "Sharma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Accenture Labs,India",
"fullName": "Vikrant Kaulgud",
"givenName": "Vikrant",
"surname": "Kaulgud",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Accenture Labs,India",
"fullName": "Sanjay Podder",
"givenName": "Sanjay",
"surname": "Podder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Accenture,Singapore",
"fullName": "Adam P. Burden",
"givenName": "Adam P.",
"surname": "Burden",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ase",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1267-1269",
"year": "2020",
"issn": null,
"isbn": "978-1-4503-6768-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "676800b264",
"articleId": "1pP3Ngd3rJS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "676800b270",
"articleId": "1pP3FGF77t6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2015/6886/0/07131755",
"title": "Mapping 2D input to 3D immersive spatial augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131755/12OmNwAKCNT",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a372",
"title": "GeoSolvAR: Augmented Reality Based Solution for Visualizing 3D Solids",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a372/12OmNwMobbg",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a077",
"title": "PPV: Pixel-Point-Volume Segmentation for Object Referencing in Collaborative Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a077/12OmNxy4N6P",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019876",
"title": "The Hologram in My Hand: How Effective is Interactive Exploration of 3D Visualizations in Immersive Tangible Augmented Reality?",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019876/13rRUzp02oy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699222",
"title": "TutAR: Semi-Automatic Generation of Augmented Reality Tutorials for Medical Education",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699222/19F1PQOMxWg",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ase/2019/2508/0/250800b194",
"title": "XRaSE: Towards Virtually Tangible Software using Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ase/2019/250800b194/1gysShIy9Es",
"parentPublication": {
"id": "proceedings/ase/2019/2508/0",
"title": "2019 34th IEEE/ACM International Conference on Automated Software Engineering (ASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998140",
"title": "Live Semantic 3D Perception for Immersive Augmented Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998140/1hpPDSYGijK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/inciscos/2019/5581/0/558100a116",
"title": "ARTOUR: Augmented Reality for Tourism - A Case Study in Riobamba, Ecuador",
"doi": null,
"abstractUrl": "/proceedings-article/inciscos/2019/558100a116/1iHUGrjAlXO",
"parentPublication": {
"id": "proceedings/inciscos/2019/5581/0",
"title": "2019 International Conference on Information Systems and Computer Science (INCISCOS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090401",
"title": "Learning to Match 2D Images and 3D LiDAR Point Clouds for Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090401/1jIxmhXvH7a",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a203",
"title": "Industrial Augmented Reality: 3D-Content Editor for Augmented Reality Maintenance Worker Support System",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a203/1pBMigKK7F6",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx0A7K1",
"title": "Face and Gesture 2011",
"acronym": "fg",
"groupId": "1000065",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAi6vUx",
"doi": "10.1109/FG.2011.5771364",
"title": "Facial expression recognition using emotion avatar image",
"normalizedTitle": "Facial expression recognition using emotion avatar image",
"abstract": "Existing facial expression recognition techniques analyze the spatial and temporal information for every single frame in a human emotion video. On the contrary, we create the Emotion Avatar Image (EAI) as a single good representation for each video or image sequence for emotion recognition. In this paper, we adopt the recently introduced SIFT flow algorithm to register every frame with respect to an Avatar reference face model. Then, an iterative algorithm is used not only to super-resolve the EAI representation for each video and the Avatar reference, but also to improve the recognition performance. Subsequently, we extract the features from EAIs using both Local Binary Pattern (LBP) and Local Phase Quantization (LPQ). Then the results from both texture descriptors are tested on the Facial Expression Recognition and Analysis Challenge (FERA2011) data, GEMEP-FERA dataset. To evaluate this simple yet powerful idea, we train our algorithm only using the given 155 videos of training data from GEMEP-FERA dataset. The result shows that our algorithm eliminates the person-specific information for emotion and performs well on unseen data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Existing facial expression recognition techniques analyze the spatial and temporal information for every single frame in a human emotion video. On the contrary, we create the Emotion Avatar Image (EAI) as a single good representation for each video or image sequence for emotion recognition. In this paper, we adopt the recently introduced SIFT flow algorithm to register every frame with respect to an Avatar reference face model. Then, an iterative algorithm is used not only to super-resolve the EAI representation for each video and the Avatar reference, but also to improve the recognition performance. Subsequently, we extract the features from EAIs using both Local Binary Pattern (LBP) and Local Phase Quantization (LPQ). Then the results from both texture descriptors are tested on the Facial Expression Recognition and Analysis Challenge (FERA2011) data, GEMEP-FERA dataset. To evaluate this simple yet powerful idea, we train our algorithm only using the given 155 videos of training data from GEMEP-FERA dataset. The result shows that our algorithm eliminates the person-specific information for emotion and performs well on unseen data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Existing facial expression recognition techniques analyze the spatial and temporal information for every single frame in a human emotion video. On the contrary, we create the Emotion Avatar Image (EAI) as a single good representation for each video or image sequence for emotion recognition. In this paper, we adopt the recently introduced SIFT flow algorithm to register every frame with respect to an Avatar reference face model. Then, an iterative algorithm is used not only to super-resolve the EAI representation for each video and the Avatar reference, but also to improve the recognition performance. Subsequently, we extract the features from EAIs using both Local Binary Pattern (LBP) and Local Phase Quantization (LPQ). Then the results from both texture descriptors are tested on the Facial Expression Recognition and Analysis Challenge (FERA2011) data, GEMEP-FERA dataset. To evaluate this simple yet powerful idea, we train our algorithm only using the given 155 videos of training data from GEMEP-FERA dataset. The result shows that our algorithm eliminates the person-specific information for emotion and performs well on unseen data.",
"fno": "05771364",
"keywords": [
"Avatars",
"Emotion Recognition",
"Face Recognition",
"Feature Extraction",
"Image Representation",
"Image Sequences",
"Iterative Methods",
"Transforms",
"Video Signal Processing",
"Facial Expression Recognition Techniques",
"Emotion Avatar Image",
"Human Emotion Video",
"Video Sequence",
"Image Sequence",
"Emotion Recognition",
"SIFT Flow Algorithm",
"Avatar Reference Face Model",
"Iterative Algorithm",
"Local Binary Pattern",
"Local Phase Quantization",
"Feature Extraction",
"GEMEP FERA Dataset",
"Face",
"Avatars",
"Feature Extraction",
"Face Recognition",
"Image Recognition",
"Pixel",
"Facial Features",
"Face Registration",
"Level Of Emotion Avatar Image",
"Person Independent Emotion Recognition",
"SIFT Flow"
],
"authors": [
{
"affiliation": "Center for Research in Intelligent Systems, University of California, Riverside n, Riverside, CA 92521, US",
"fullName": "Songfan Yang",
"givenName": "Songfan",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Research in Intelligent Systems, University of California, Riverside n, Riverside, CA 92521, US",
"fullName": "Bir Bhanu",
"givenName": "Bir",
"surname": "Bhanu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-03-01T00:00:00",
"pubType": "proceedings",
"pages": "866-871",
"year": "2011",
"issn": null,
"isbn": "978-1-4244-9140-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05771363",
"articleId": "12OmNrEL2Cd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05771365",
"articleId": "12OmNAXxXdS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2011/9140/0/05771371",
"title": "Accumulated motion images for facial expression recognition in videos",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771371/12OmNCxbXCN",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2016/0641/0/07477679",
"title": "Multimodal emotion recognition using deep learning architectures",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477679/12OmNviHKjy",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgciot/2015/7910/0/07380558",
"title": "Facial components extraction and expression recognition in static images",
"doi": null,
"abstractUrl": "/proceedings-article/icgciot/2015/07380558/12OmNxvO0a6",
"parentPublication": {
"id": "proceedings/icgciot/2015/7910/0",
"title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130506",
"title": "Facial action unit detection using kernel partial least squares",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130506/12OmNzCF4Vd",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a479",
"title": "Reading Personality: Avatar vs. Human Faces",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a479/12OmNzUgdfd",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771366",
"title": "Emotion recognition using PHOG and LPQ features",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771366/12OmNzZWbMh",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771362",
"title": "Emotion recognition by two view SVM_2K classifier on dynamic facial expression features",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771362/12OmNzcxZkT",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2016/04/07296631",
"title": "Facial Expression Recognition in the Presence of Speech Using Blind Lexical Compensation",
"doi": null,
"abstractUrl": "/journal/ta/2016/04/07296631/13rRUEgarro",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iucc-cit-dsci-smartcns/2021/6667/0/666700a375",
"title": "Facial Expression Study Based on 3D Facial Emotion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/iucc-cit-dsci-smartcns/2021/666700a375/1BrAKBj8Tug",
"parentPublication": {
"id": "proceedings/iucc-cit-dsci-smartcns/2021/6667/0",
"title": "2021 20th International Conference on Ubiquitous Computing and Communications (IUCC/CIT/DSCI/SmartCNS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBaT60w",
"title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"acronym": "fg",
"groupId": "1000065",
"volume": "1",
"displayVolume": "1",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNApcuBK",
"doi": "10.1109/FG.2015.7163173",
"title": "Real-time facial character animation",
"normalizedTitle": "Real-time facial character animation",
"abstract": "This demonstration paper presents a real-time facial character animation application where the facial expressions of a person are simultaneously synthesized on a virtual avatar. The proposed method does not require any training or calibration for the person interacting with the system. An Active Appearance Model based technique is used to track more than 500 points on the face to create the animated expression of the virtual avatar. The sex, age or ethnicity of the subject in front of the camera can also be automatically analyzed and hence the visualization of the avatar could be adapted accordingly. This application requires a standard web cam and is intended for gaming, entertainment or video conference purposes and will be presented in a real-time setup during the demo session.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This demonstration paper presents a real-time facial character animation application where the facial expressions of a person are simultaneously synthesized on a virtual avatar. The proposed method does not require any training or calibration for the person interacting with the system. An Active Appearance Model based technique is used to track more than 500 points on the face to create the animated expression of the virtual avatar. The sex, age or ethnicity of the subject in front of the camera can also be automatically analyzed and hence the visualization of the avatar could be adapted accordingly. This application requires a standard web cam and is intended for gaming, entertainment or video conference purposes and will be presented in a real-time setup during the demo session.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This demonstration paper presents a real-time facial character animation application where the facial expressions of a person are simultaneously synthesized on a virtual avatar. The proposed method does not require any training or calibration for the person interacting with the system. An Active Appearance Model based technique is used to track more than 500 points on the face to create the animated expression of the virtual avatar. The sex, age or ethnicity of the subject in front of the camera can also be automatically analyzed and hence the visualization of the avatar could be adapted accordingly. This application requires a standard web cam and is intended for gaming, entertainment or video conference purposes and will be presented in a real-time setup during the demo session.",
"fno": "07163173",
"keywords": [
"Avatars",
"Face",
"Real Time Systems",
"Cameras",
"Games",
"Three Dimensional Displays",
"Animation"
],
"authors": [
{
"affiliation": "Vicarious Perception Technol., Amsterdam, Netherlands",
"fullName": "H. Emrah Tasli",
"givenName": "H. Emrah",
"surname": "Tasli",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Vicarious Perception Technol., Amsterdam, Netherlands",
"fullName": "Tim M. den Uyl",
"givenName": "Tim M.",
"surname": "den Uyl",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. Mines-Telecom, SudParis, Paris, France",
"fullName": "Hugo Boujut",
"givenName": "Hugo",
"surname": "Boujut",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Inst. Mines-Telecom, SudParis, Paris, France",
"fullName": "Titus Zaharia",
"givenName": "Titus",
"surname": "Zaharia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fg",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-1",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-6026-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07163172",
"articleId": "12OmNrnJ6YV",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2011/9140/0/05771364",
"title": "Facial expression recognition using emotion avatar image",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771364/12OmNAi6vUx",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771400",
"title": "Real-time avatar animation from a single image",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771400/12OmNButpYT",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2011/9140/0/05771383",
"title": "Real-time avatar animation from a single image",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771383/12OmNqNXEmD",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a132",
"title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a132/12OmNrHjqI9",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a185",
"title": "Real-Time 2.5D Facial Cartoon Animation Based on Pose and Expression Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a185/12OmNvSKNRM",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720342",
"title": "Eyes and Eyebrows Detection for Performance Driven Animation",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720342/12OmNvoWV3q",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2014/4761/0/06890231",
"title": "Real-time control of 3D facial animation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2014/06890231/12OmNyOHG1A",
"parentPublication": {
"id": "proceedings/icme/2014/4761/0",
"title": "2014 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a039",
"title": "HeadBox: A Facial Blendshape Animation Toolkit for the Microsoft Rocketbox Library",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a039/1CJeXP9uYta",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a429",
"title": "Real-time Expressive Avatar Animation Generation based on Monocular Videos",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a429/1J7Wj0kJrJm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzBOhX1",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"acronym": "acii",
"groupId": "1002992",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzUgdfd",
"doi": "10.1109/ACII.2013.85",
"title": "Reading Personality: Avatar vs. Human Faces",
"normalizedTitle": "Reading Personality: Avatar vs. Human Faces",
"abstract": "Studies have suggested that facial appearance leads to judgment of a person's character, e.g. wide-faced males are considered aggressive and untrustworthy, large eyes make a person appear honest and non-dominant, and short ears and nose give impressions of warmth and honesty. In this study we investigate whether we make similar judgment with an avatar's face. Using Second Life avatars as stimuli, we employ Paired-Comparison Tests to determine the implications of certain facial features. Our results suggest that people judge an avatar by its look, and such judgment is sensitive to eeriness and baby facedness of the avatar.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Studies have suggested that facial appearance leads to judgment of a person's character, e.g. wide-faced males are considered aggressive and untrustworthy, large eyes make a person appear honest and non-dominant, and short ears and nose give impressions of warmth and honesty. In this study we investigate whether we make similar judgment with an avatar's face. Using Second Life avatars as stimuli, we employ Paired-Comparison Tests to determine the implications of certain facial features. Our results suggest that people judge an avatar by its look, and such judgment is sensitive to eeriness and baby facedness of the avatar.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Studies have suggested that facial appearance leads to judgment of a person's character, e.g. wide-faced males are considered aggressive and untrustworthy, large eyes make a person appear honest and non-dominant, and short ears and nose give impressions of warmth and honesty. In this study we investigate whether we make similar judgment with an avatar's face. Using Second Life avatars as stimuli, we employ Paired-Comparison Tests to determine the implications of certain facial features. Our results suggest that people judge an avatar by its look, and such judgment is sensitive to eeriness and baby facedness of the avatar.",
"fno": "5048a479",
"keywords": [
"Avatars",
"Ear",
"Nose",
"Facial Features",
"Games",
"Economics",
"Personality",
"Physiognomy",
"Avatar",
"Perception"
],
"authors": [
{
"affiliation": "Coll. of Comput. & Inf. Sci., Rochester Inst. of Technol., Rochester, NY, USA",
"fullName": "Yuqiong Wang",
"givenName": null,
"surname": "Yuqiong Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Comput. Sci. Dept., Rochester Inst. of Technol., Rochester, NY, USA",
"fullName": "Joe Geigel",
"givenName": "Joe",
"surname": "Geigel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Psychol. Dept., Rochester Inst. of Technol., Rochester, NY, USA",
"fullName": "Andrew Herbert",
"givenName": "Andrew",
"surname": "Herbert",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "acii",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-09-01T00:00:00",
"pubType": "proceedings",
"pages": "479-484",
"year": "2013",
"issn": "2156-8103",
"isbn": "978-0-7695-5048-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5048a473",
"articleId": "12OmNwBBqbT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5048a485",
"articleId": "12OmNz5apIf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/fg/2011/9140/0/05771364",
"title": "Facial expression recognition using emotion avatar image",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2011/05771364/12OmNAi6vUx",
"parentPublication": {
"id": "proceedings/fg/2011/9140/0",
"title": "Face and Gesture 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dmdcm/2011/4413/0/4413a132",
"title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a132/12OmNrHjqI9",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480759",
"title": "High-Fidelity Avatar Eye-Representation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480759/12OmNrJAdQR",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480758",
"title": "Creating a Speech Enabled Avatar from a Single Photograph",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480758/12OmNrMHOjV",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892245",
"title": "Recognition and mapping of facial expressions to avatar by embedded photo reflective sensors in head mounted display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892245/12OmNwkR5tU",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040591",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040591/13rRUyYBlgz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a770",
"title": "Emotional Empathy and Facial Mimicry of Avatar Faces",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a770/1CJdHd5yTSM",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998371",
"title": "The Impact of a Self-Avatar, Hand Collocation, and Hand Proximity on Embodiment and Stroop Interference",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998371/1hrXiia6v9C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c100",
"title": "Landmark-Guided Deformation Transfer of Template Facial Expressions for Automatic Generation of Avatar Blendshapes",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c100/1i5mNnnOzlu",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJeXP9uYta",
"doi": "10.1109/VRW55335.2022.00015",
"title": "HeadBox: A Facial Blendshape Animation Toolkit for the Microsoft Rocketbox Library",
"normalizedTitle": "HeadBox: A Facial Blendshape Animation Toolkit for the Microsoft Rocketbox Library",
"abstract": "HeadBox is a series of opensource tools to do facial animation on the Microsoft Rocketbox avatar library. It includes a tool to create blendshapes out of the facial bones inside Maya and transfer the new blendshapes to the other avatars in the library. We have created a total of 15 visemes, 48 FACS, 30 for the Vive facial tracker. These blendshapes have been released with the original library. An additional Unity demo shows the use these tools with Openface and Oculus Lipsync.",
"abstracts": [
{
"abstractType": "Regular",
"content": "HeadBox is a series of opensource tools to do facial animation on the Microsoft Rocketbox avatar library. It includes a tool to create blendshapes out of the facial bones inside Maya and transfer the new blendshapes to the other avatars in the library. We have created a total of 15 visemes, 48 FACS, 30 for the Vive facial tracker. These blendshapes have been released with the original library. An additional Unity demo shows the use these tools with Openface and Oculus Lipsync.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "HeadBox is a series of opensource tools to do facial animation on the Microsoft Rocketbox avatar library. It includes a tool to create blendshapes out of the facial bones inside Maya and transfer the new blendshapes to the other avatars in the library. We have created a total of 15 visemes, 48 FACS, 30 for the Vive facial tracker. These blendshapes have been released with the original library. An additional Unity demo shows the use these tools with Openface and Oculus Lipsync.",
"fno": "840200a039",
"keywords": [
"Avatars",
"Computer Animation",
"Facial Blendshape Animation Toolkit",
"Head Box",
"Opensource Tools",
"Facial Animation",
"Microsoft Rocketbox Avatar Library",
"Facial Bones",
"Vive Facial Tracker",
"Original Library",
"Openface",
"Oculus Lipsync",
"Three Dimensional Displays",
"Conferences",
"Avatars",
"User Interfaces",
"Bones",
"Libraries",
"Facial Animation",
"Microsoft Rocketbox",
"Avatars",
"Virtual Humans",
"Open Source",
"Blendshapes",
"Animation",
"Facial",
"Rigging"
],
"authors": [
{
"affiliation": "Northeastern University",
"fullName": "Matias Volonte",
"givenName": "Matias",
"surname": "Volonte",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft",
"fullName": "Eyal Ofek",
"givenName": "Eyal",
"surname": "Ofek",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft",
"fullName": "Ken Jakubzak",
"givenName": "Ken",
"surname": "Jakubzak",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft",
"fullName": "Shawn Bruner",
"givenName": "Shawn",
"surname": "Bruner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft",
"fullName": "Mar Gonzalez-Franco",
"givenName": "Mar",
"surname": "Gonzalez-Franco",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "39-42",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJeXCuVGWQ",
"name": "pvrw202284020-09757409s1-mm_840200a039.zip",
"size": "31.7 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202284020-09757409s1-mm_840200a039.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "840200a031",
"articleId": "1CJeVETw35e",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a043",
"articleId": "1CJcLOvkmfS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/culture-computing/2011/4546/0/4546a121",
"title": "Thai Speech-Driven Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/culture-computing/2011/4546a121/12OmNARiM0A",
"parentPublication": {
"id": "proceedings/culture-computing/2011/4546/0",
"title": "International Conference on Culture and Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccsee/2012/4647/3/4647c434",
"title": "A Survey of Computer Facial Animation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/iccsee/2012/4647c434/12OmNAXxXhU",
"parentPublication": {
"id": "proceedings/iccsee/2012/4647/3",
"title": "Computer Science and Electronics Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2018/2335/0/233501a059",
"title": "OpenFace 2.0: Facial Behavior Analysis Toolkit",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2018/233501a059/12OmNBNM991",
"parentPublication": {
"id": "proceedings/fg/2018/2335/0",
"title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2010/8420/0/05720335",
"title": "3D Linear Facial Animation Based on Real Data",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2010/05720335/12OmNqGiu2b",
"parentPublication": {
"id": "proceedings/sibgrapi/2010/8420/0",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1996/7588/0/75880098",
"title": "Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1996/75880098/12OmNvT2oR2",
"parentPublication": {
"id": "proceedings/ca/1996/7588/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00324006",
"title": "Langwidere: a new facial animation system",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00324006/12OmNwErpsG",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2017/3588/0/3588a078",
"title": "The Development of a Facial Animation System Based on Performance and the Use of an RGB-D Camera",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2017/3588a078/12OmNxV4iA1",
"parentPublication": {
"id": "proceedings/svr/2017/3588/0",
"title": "2017 19th Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/2002/1594/0/15940248",
"title": "Prototyping and Transforming Visemes for Animated Speech",
"doi": null,
"abstractUrl": "/proceedings-article/ca/2002/15940248/12OmNymjN2c",
"parentPublication": {
"id": "proceedings/ca/2002/1594/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2005/03/v0341",
"title": "Creating Speech-Synchronized Animation",
"doi": null,
"abstractUrl": "/journal/tg/2005/03/v0341/13rRUxE04tq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJcAaH6aYg",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJfmJhWzMQ",
"doi": "10.1109/VRW55335.2022.00130",
"title": "CV-Mora Based Lip Sync Facial Animations for Japanese Speech",
"normalizedTitle": "CV-Mora Based Lip Sync Facial Animations for Japanese Speech",
"abstract": "To generate authentic real-time facial animations using face mesh data, which corresponds to fifty-six consonant and vowel (CV) types of morae that form the basis of Japanese speech, we propose a new method. Our method produces facial expressions by the weighted addition of fifty-three face meshes based on the real-time mapping of voice streaming to registered morae. In the user study, results showed that facial expressions produced during Japanese speech were more natural using our method than those using popular methods to generate real-time English-based Oculus lip sync and volume intensity-based facial animations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "To generate authentic real-time facial animations using face mesh data, which corresponds to fifty-six consonant and vowel (CV) types of morae that form the basis of Japanese speech, we propose a new method. Our method produces facial expressions by the weighted addition of fifty-three face meshes based on the real-time mapping of voice streaming to registered morae. In the user study, results showed that facial expressions produced during Japanese speech were more natural using our method than those using popular methods to generate real-time English-based Oculus lip sync and volume intensity-based facial animations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "To generate authentic real-time facial animations using face mesh data, which corresponds to fifty-six consonant and vowel (CV) types of morae that form the basis of Japanese speech, we propose a new method. Our method produces facial expressions by the weighted addition of fifty-three face meshes based on the real-time mapping of voice streaming to registered morae. In the user study, results showed that facial expressions produced during Japanese speech were more natural using our method than those using popular methods to generate real-time English-based Oculus lip sync and volume intensity-based facial animations.",
"fno": "840200a558",
"keywords": [
"Computer Animation",
"Face Recognition",
"Mesh Generation",
"Speech Processing",
"Mora Based Lip Sync Facial Animations",
"Japanese Speech",
"Face Mesh Data",
"Facial Expressions",
"Face Meshes",
"Registered Morae",
"Volume Intensity Based Facial Animations",
"Voice Streaming",
"Consonant And Vowel",
"Three Dimensional Displays",
"Lips",
"Conferences",
"Virtual Reality",
"User Interfaces",
"Real Time Systems",
"Facial Animation",
"Computing Methodologies Computer Graphics Animation Procedural Animation",
"Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Virtual Reality"
],
"authors": [
{
"affiliation": "Tokyo Metropolitan University",
"fullName": "Ryoto Kato",
"givenName": "Ryoto",
"surname": "Kato",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Metropolitan University",
"fullName": "Yusuke Kikuchi",
"givenName": "Yusuke",
"surname": "Kikuchi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tokyo Metropolitan University",
"fullName": "Vibol Yem",
"givenName": "Vibol",
"surname": "Yem",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "The University of Tokyo",
"fullName": "Yasushi Ikei",
"givenName": "Yasushi",
"surname": "Ikei",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "558-559",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8402-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "840200a556",
"articleId": "1CJdXqzjctO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "840200a560",
"articleId": "1CJf1glFEzu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/dmdcm/2011/4413/0/4413a132",
"title": "Towards 3D Communications: Real Time Emotion Driven 3D Virtual Facial Animation",
"doi": null,
"abstractUrl": "/proceedings-article/dmdcm/2011/4413a132/12OmNrHjqI9",
"parentPublication": {
"id": "proceedings/dmdcm/2011/4413/0",
"title": "Digital Media and Digital Content Management, Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgiv/2010/4166/0/4166a009",
"title": "Expressive MPEG-4 Facial Animation Using Quadratic Deformation Models",
"doi": null,
"abstractUrl": "/proceedings-article/cgiv/2010/4166a009/12OmNxH9Xgx",
"parentPublication": {
"id": "proceedings/cgiv/2010/4166/0",
"title": "2010 Seventh International Conference on Computer Graphics, Imaging and Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446364",
"title": "Empirical Evaluation of Virtual Human Conversational and Affective Animations on Visual Attention in Inter-Personal Simulations",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446364/13bd1hyoTxR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040030",
"title": "Data-Driven Approach to Synthesizing Facial Animation Using Motion Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040030/13rRUyeTVkv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09992151",
"title": "Personalized Audio-Driven 3D Facial Animation Via Style-Content Disentanglement",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09992151/1JevBLSiUqA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/assic/2022/6109/0/10088386",
"title": "Computer Vision Lip Reading(CV)",
"doi": null,
"abstractUrl": "/proceedings-article/assic/2022/10088386/1M4rEB951ny",
"parentPublication": {
"id": "proceedings/assic/2022/6109/0",
"title": "2022 International Conference on Advancements in Smart, Secure and Intelligent Computing (ASSIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090662",
"title": "Perception of Head Motion Effect on Emotional Facial Expression in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090662/1jIxmuXW5Es",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a401",
"title": "Effects of Virtual Instructor’s Facial Expressions in a 3D Game on Japanese Language Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a401/1tnXNtPK2Wc",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a413",
"title": "A pipeline for facial animations on low budget VR productions",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a413/1tnXUqfjdlu",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNCctfjA",
"title": "1993 (25th) Southeastern Symposium on System Theory",
"acronym": "ssst",
"groupId": "1000732",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxb5hxq",
"doi": "10.1109/SSST.1993.522766",
"title": "A heuristic approach to the computation of 3D-ray trajectories in step index optical fibers",
"normalizedTitle": "A heuristic approach to the computation of 3D-ray trajectories in step index optical fibers",
"abstract": "Three-dimensional-ray trajectories in an optical fiber are derived using a simple correspondence principle. The ray trajectories are linked to the mode number of the exact waveguide solutions. The analysis and simulations presented are in terms of dimensionless parameters needed to characterize the optical fiber. Specifically, this includes the V-parameter, the core index of refraction, and the numerical aperture. The curves for allowed radial and azimuthal mode numbers are presented in terms of dimensions that are normalized by the core radius. The approach is extensible to graded-index fibers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Three-dimensional-ray trajectories in an optical fiber are derived using a simple correspondence principle. The ray trajectories are linked to the mode number of the exact waveguide solutions. The analysis and simulations presented are in terms of dimensionless parameters needed to characterize the optical fiber. Specifically, this includes the V-parameter, the core index of refraction, and the numerical aperture. The curves for allowed radial and azimuthal mode numbers are presented in terms of dimensions that are normalized by the core radius. The approach is extensible to graded-index fibers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Three-dimensional-ray trajectories in an optical fiber are derived using a simple correspondence principle. The ray trajectories are linked to the mode number of the exact waveguide solutions. The analysis and simulations presented are in terms of dimensionless parameters needed to characterize the optical fiber. Specifically, this includes the V-parameter, the core index of refraction, and the numerical aperture. The curves for allowed radial and azimuthal mode numbers are presented in terms of dimensions that are normalized by the core radius. The approach is extensible to graded-index fibers.",
"fno": "00522766",
"keywords": [
"Optical Fibres",
"Refractive Index",
"Ray Tracing",
"Heuristic Programming",
"Correspondence Principle",
"Helmholtz Equations",
"Radial Mode Numbers",
"Heuristic",
"3 D Ray Trajectories",
"Step Index Optical Fibers",
"Correspondence Principle",
"Simulations",
"Dimensionless Parameters",
"V Parameter",
"Core Index Of Refraction",
"Numerical Aperture",
"Azimuthal Mode Numbers",
"Optical Computing",
"Optical Fibers",
"Optical Waveguides",
"Equations",
"Optical Refraction",
"Optical Propagation",
"Computational Modeling",
"Analytical Models",
"Apertures",
"Visualization"
],
"authors": [
{
"affiliation": "Dept. of Electr. & Comput. Eng., US Naval Postgraduate Sch., Monterey, CA, USA",
"fullName": "A. Nassopoulos",
"givenName": "A.",
"surname": "Nassopoulos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Electr. & Comput. Eng., US Naval Postgraduate Sch., Monterey, CA, USA",
"fullName": "R. Pieper",
"givenName": "R.",
"surname": "Pieper",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ssst",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-01-01T00:00:00",
"pubType": "proceedings",
"pages": "179,180,181,182,183",
"year": "1993",
"issn": "0094-2898",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00522765",
"articleId": "12OmNy3iFgy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00522767",
"articleId": "12OmNwcl7KM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lcn/1988/0891/0/00010209",
"title": "Proposed Federal Standard 1070",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/1988/00010209/12OmNCxtyL8",
"parentPublication": {
"id": "proceedings/lcn/1988/0891/0",
"title": "Proceedings [1988] 13th Conference on Local Computer Networks",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051914",
"title": "Towards ray optics formalization of optical imaging systems",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051914/12OmNvq5jzp",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chinacom/2014/5970/0/07054303",
"title": "Simulation step-size optimization in the split-step fourier simulation of polarized optical signal propagation through single mode optical fiber",
"doi": null,
"abstractUrl": "/proceedings-article/chinacom/2014/07054303/12OmNwEJ10X",
"parentPublication": {
"id": "proceedings/chinacom/2014/5970/0",
"title": "2014 9th International Conference on Communications and Networking in China (CHINACOM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1994/5320/0/00287827",
"title": "Graded-index linear tapers",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1994/00287827/12OmNySosHl",
"parentPublication": {
"id": "proceedings/ssst/1994/5320/0",
"title": "Proceedings of 26th Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1992/2855/0/00223244",
"title": "Motion trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1992/00223244/12OmNzA6GJ0",
"parentPublication": {
"id": "proceedings/cvpr/1992/2855/0",
"title": "Proceedings 1992 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2009/4672/0/05202360",
"title": "Impact of chromatic and modal dispersion on frequency response of optical multimode fibers",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2009/05202360/12OmNzUPpdr",
"parentPublication": {
"id": "proceedings/iscc/2009/4672/0",
"title": "2009 IEEE Symposium on Computers and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2014/4173/0/4173a371",
"title": "Optical Ray Tracing Based on Dijkstra Algorithm in Inhomogeneous Medium",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2014/4173a371/12OmNzXFozK",
"parentPublication": {
"id": "proceedings/bwcca/2014/4173/0",
"title": "2014 Ninth International Conference on Broadband and Wireless Computing, Communication and Applications (BWCCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682378",
"title": "Study on the Interaction of Optical Field and Transverse Acoustic Mode in Silicon Optical Fibers",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682378/12OmNzwZ6tL",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/04/07383324",
"title": "Effects of Configuration of Optical Combiner on Near-Field Depth Perception in Optical See-Through Head-Mounted Displays",
"doi": null,
"abstractUrl": "/journal/tg/2016/04/07383324/13rRUwI5Ugg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642529",
"title": "Manufacturing Application-Driven Foveated Near-Eye Displays",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642529/17PYEjG6pn1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxuXcDF",
"title": "2009 IEEE Symposium on Computers and Communications",
"acronym": "iscc",
"groupId": "1000156",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzUPpdr",
"doi": "10.1109/ISCC.2009.5202360",
"title": "Impact of chromatic and modal dispersion on frequency response of optical multimode fibers",
"normalizedTitle": "Impact of chromatic and modal dispersion on frequency response of optical multimode fibers",
"abstract": "In this work, we have studied chromatic and modal dispersion of silica graded-index optical fibers as a function of mode depending parameters and a launching condition in local area network (LAN) context. We have investigated mode-depending parameters, namely modal delay, modal attenuation and mode-coupling effects as a function of wavelength. We have proved that the number of excited mode groups depend on spot radius beam when the fiber is excited with a Gaussian input beam. On the other hand, we have demonstrated that we can get a good frequency response of system with a proper wavelength value and under an optimal launching condition in order to decrease the modal dispersion effects of multimode optical fiber. Finally, we have investigated the transfer function of MMF with taking into account chromatic and modal dispersion and considering two structured of studied optical fiber.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we have studied chromatic and modal dispersion of silica graded-index optical fibers as a function of mode depending parameters and a launching condition in local area network (LAN) context. We have investigated mode-depending parameters, namely modal delay, modal attenuation and mode-coupling effects as a function of wavelength. We have proved that the number of excited mode groups depend on spot radius beam when the fiber is excited with a Gaussian input beam. On the other hand, we have demonstrated that we can get a good frequency response of system with a proper wavelength value and under an optimal launching condition in order to decrease the modal dispersion effects of multimode optical fiber. Finally, we have investigated the transfer function of MMF with taking into account chromatic and modal dispersion and considering two structured of studied optical fiber.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we have studied chromatic and modal dispersion of silica graded-index optical fibers as a function of mode depending parameters and a launching condition in local area network (LAN) context. We have investigated mode-depending parameters, namely modal delay, modal attenuation and mode-coupling effects as a function of wavelength. We have proved that the number of excited mode groups depend on spot radius beam when the fiber is excited with a Gaussian input beam. On the other hand, we have demonstrated that we can get a good frequency response of system with a proper wavelength value and under an optimal launching condition in order to decrease the modal dispersion effects of multimode optical fiber. Finally, we have investigated the transfer function of MMF with taking into account chromatic and modal dispersion and considering two structured of studied optical fiber.",
"fno": "05202360",
"keywords": [
"Frequency Response",
"Optical Fibre Dispersion",
"Optical Fibre LAN",
"Silicon Compounds",
"Transfer Functions",
"Chromatic Dispersion",
"Modal Dispersion",
"Frequency Response",
"Optical Multimode Fibers",
"Silica Graded Index Optical Fibers",
"Mode Depending Parameters",
"Launching Condition",
"Local Area Network",
"LAN",
"Modal Delay",
"Modal Attenuation",
"Mode Coupling Effects",
"Excited Mode Groups",
"Spot Radius Beam",
"Gaussian Input Beam",
"Transfer Function",
"Si O Sub 2 Sub",
"Chromatic Dispersion",
"Frequency Response",
"Optical Fiber Dispersion",
"Optical Fiber LAN",
"Optical Fibers",
"Optical Attenuators",
"Silicon Compounds",
"Local Area Networks",
"Delay Effects",
"Propagation Delay",
"Graded Index Optical Fiber",
"Chromatic Dispersion",
"Modal Dispersion",
"LAN",
"Launching Condition"
],
"authors": [
{
"affiliation": "Tunisian Polytechnic school, EPT, BP 743-2078, La Marsa, Tunis Tunisia",
"fullName": "Hichem Mrabet",
"givenName": "Hichem",
"surname": "Mrabet",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "UHVC-IEMN/DOAE (CNRS UMR 8520), Le Mont Houy 59313 Valenciennes Cedex 9, France",
"fullName": "Iyad Dayoub",
"givenName": "Iyad",
"surname": "Dayoub",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tunisian Polytechnic school, EPT, BP 743-2078, La Marsa, Tunis Tunisia",
"fullName": "Rabah Attia",
"givenName": "Rabah",
"surname": "Attia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Detroit Mercy, 4001 W. McNichols RD, MI 48221, USA",
"fullName": "Nizar AL-Holou",
"givenName": "Nizar",
"surname": "AL-Holou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INRETS, 20 rue Elisee Reclus, 59650 Villeneuve D Ascq, France",
"fullName": "Charles Tatkeu",
"givenName": "Charles",
"surname": "Tatkeu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iscc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-07-01T00:00:00",
"pubType": "proceedings",
"pages": "188-194",
"year": "2009",
"issn": "1530-1346",
"isbn": "978-1-4244-4672-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05202359",
"articleId": "12OmNqFJhTj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05202361",
"articleId": "12OmNAkWvc2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciev/2013/0400/0/06572530",
"title": "Dispersion and confinement loss control with decagonal photonic crystal fibers for wideband transmission systems",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572530/12OmNBsueaX",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2016/1269/0/07760128",
"title": "Design of an ultra-high nonlinear dispersion compensating Hybrid Hexagonal Photonic Crystal fiber",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2016/07760128/12OmNBv2CiO",
"parentPublication": {
"id": "proceedings/iciev/2016/1269/0",
"title": "2016 International Conference on Informatics, Electronics and Vision (ICIEV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bwcca/2011/4532/0/4532a313",
"title": "Mixed Chromatic Dispersion Compensation Methods for Combined HDWDM Systems",
"doi": null,
"abstractUrl": "/proceedings-article/bwcca/2011/4532a313/12OmNqJZgEU",
"parentPublication": {
"id": "proceedings/bwcca/2011/4532/0",
"title": "2011 International Conference on Broadband and Wireless Computing, Communication and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sicon/1993/1445/1/00515728",
"title": "Single-mode single-polarisation fibres with zero chromatic dispersion at /spl lambda/=1.55 /spl mu/m",
"doi": null,
"abstractUrl": "/proceedings-article/sicon/1993/00515728/12OmNvUsopV",
"parentPublication": {
"id": "proceedings/sicon/1993/1445/1",
"title": "Proceedings of IEEE Singapore International Conference on Networks/International Conference on Information Engineering '93",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572575",
"title": "Profile optimization of dispersion shifted fiber based on optifiber design, simulation and performance analysis",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572575/12OmNvxsSQK",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cmc/2009/3501/2/3501b497",
"title": "Optimization Analysis of Transmission Performance of 10Gb/s Optical Signal Using Adaptive Decision Feedback Equalizer",
"doi": null,
"abstractUrl": "/proceedings-article/cmc/2009/3501b497/12OmNwnYFZ3",
"parentPublication": {
"id": "proceedings/cmc/2009/3501/2",
"title": "Communications and Mobile Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572528",
"title": "Modelling of dispersion flattened photonic crystal fibers for communication application",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572528/12OmNz2TCC1",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cecit/2021/3757/0/375700a373",
"title": "Chromatic Dispersion Monitoring for Long-haul 28 GBaud PDM-QPSK Optical Signal based on CNN",
"doi": null,
"abstractUrl": "/proceedings-article/cecit/2021/375700a373/1CdEKMf54nm",
"parentPublication": {
"id": "proceedings/cecit/2021/3757/0",
"title": "2021 2nd International Conference on Electronics, Communications and Information Technology (CECIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaml/2019/3908/0/390800a277",
"title": "Design of Bend-Limited Large-Mode Area Dispersion Shifted Few-Mode Fiber for Fast Communication",
"doi": null,
"abstractUrl": "/proceedings-article/icaml/2019/390800a277/1hrLJkYWEBW",
"parentPublication": {
"id": "proceedings/icaml/2019/3908/0",
"title": "2019 International Conference on Applied Machine Learning (ICAML)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fpl/2020/9902/0/990200a357",
"title": "High-Speed Chromatic Dispersion Compensation Filtering in FPGAs for Coherent Optical Communication",
"doi": null,
"abstractUrl": "/proceedings-article/fpl/2020/990200a357/1nTu8xR4gzS",
"parentPublication": {
"id": "proceedings/fpl/2020/9902/0",
"title": "2020 30th International Conference on Field-Programmable Logic and Applications (FPL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwwMf3w",
"title": "2010 23rd SIBGRAPI Conference on Graphics, Patterns and Images",
"acronym": "sibgrapi",
"groupId": "1000131",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzX6cf5",
"doi": "10.1109/SIBGRAPI.2010.50",
"title": "PyFibers: A Semi-automatic Tool for Contour Extraction from Cross Section Images of Photonic Crystal Fibers",
"normalizedTitle": "PyFibers: A Semi-automatic Tool for Contour Extraction from Cross Section Images of Photonic Crystal Fibers",
"abstract": "A photonic crystal fiber is a new type of optical fiber that presents an array of air holes running along its whole length and whose properties can be predicted upon precise knowledge of its cross section. Up to now, the cross section of these fibers has been estimated through manual segmentation of the contours in scanning electron microscopy (SEM) images. In this work, an image processing tool, called PyFibers, was developed, based on mathematical morphology (MM) operations. The tool extracts the contours from the images with minimal user intervention, and outputs a text file containing the contours coordinates and a DXF file with the contours. Either of these files can, subsequently, be used as input to softwares that evaluate the optical properties of the fibers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A photonic crystal fiber is a new type of optical fiber that presents an array of air holes running along its whole length and whose properties can be predicted upon precise knowledge of its cross section. Up to now, the cross section of these fibers has been estimated through manual segmentation of the contours in scanning electron microscopy (SEM) images. In this work, an image processing tool, called PyFibers, was developed, based on mathematical morphology (MM) operations. The tool extracts the contours from the images with minimal user intervention, and outputs a text file containing the contours coordinates and a DXF file with the contours. Either of these files can, subsequently, be used as input to softwares that evaluate the optical properties of the fibers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A photonic crystal fiber is a new type of optical fiber that presents an array of air holes running along its whole length and whose properties can be predicted upon precise knowledge of its cross section. Up to now, the cross section of these fibers has been estimated through manual segmentation of the contours in scanning electron microscopy (SEM) images. In this work, an image processing tool, called PyFibers, was developed, based on mathematical morphology (MM) operations. The tool extracts the contours from the images with minimal user intervention, and outputs a text file containing the contours coordinates and a DXF file with the contours. Either of these files can, subsequently, be used as input to softwares that evaluate the optical properties of the fibers.",
"fno": "05720385",
"keywords": [
"Feature Extraction",
"Holey Fibres",
"Image Segmentation",
"Photonic Crystals",
"Scanning Electron Microscopy",
"Semiautomatic Tool",
"Contour Extraction",
"Cross Section Image",
"Photonic Crystal Fiber",
"Optical Fiber",
"Air Hole",
"Image Segmentation",
"Electron Microscopy Image",
"Image Processing Tool",
"Py Fibers",
"Mathematical Morphology",
"Text File",
"DXF File",
"Optical Property",
"Pixel",
"Image Segmentation",
"Brightness",
"Image Edge Detection",
"Optical Imaging",
"Optical Sensors",
"Nonlinear Optics",
"Photonic Crystal Fiber",
"Microstructured Optical Fiber",
"Contour Extraction",
"Mathematical Morphology",
"Scanning Electron Microscopy"
],
"authors": [
{
"affiliation": null,
"fullName": "Anderson Mariano",
"givenName": "Anderson",
"surname": "Mariano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gabriela Castellano",
"givenName": "Gabriela",
"surname": "Castellano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cristiano M. B. Cordeiro",
"givenName": "Cristiano M. B.",
"surname": "Cordeiro",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sibgrapi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "323-330",
"year": "2010",
"issn": "1530-1834",
"isbn": "978-1-4244-8420-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05720384",
"articleId": "12OmNApLGSk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05720386",
"articleId": "12OmNyQGS6t",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/uic-atc-scalcom/2015/7211/0/07518477",
"title": "Highly Birefringent Octagonal Photonic Crystal Fibers",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom/2015/07518477/12OmNBpVQ6G",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom/2015/7211/0",
"title": "2015 IEEE 12th Intl Conf on Ubiquitous Intelligence and Computing and 2015 IEEE 12th Intl Conf on Autonomic and Trusted Computing and 2015 IEEE 15th Intl Conf on Scalable Computing and Communications and Its Associated Workshops (UIC-ATC-ScalCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572530",
"title": "Dispersion and confinement loss control with decagonal photonic crystal fibers for wideband transmission systems",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572530/12OmNBsueaX",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2017/3038/0/08203910",
"title": "Zero dispersion photonic crystal fibers for nonlinear applications",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2017/08203910/12OmNwE9OA2",
"parentPublication": {
"id": "proceedings/icccnt/2017/3038/0",
"title": "2017 8th International Conference on Computing, Communication and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cnsr/2009/3649/0/3649a252",
"title": "Multicriteria PCF Design: An Accurate Photonic Crystal Fiber Design Tool",
"doi": null,
"abstractUrl": "/proceedings-article/cnsr/2009/3649a252/12OmNwNwzMn",
"parentPublication": {
"id": "proceedings/cnsr/2009/3649/0",
"title": "Communication Networks and Services Research, Annual Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1993/3560/0/00522766",
"title": "A heuristic approach to the computation of 3D-ray trajectories in step index optical fibers",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1993/00522766/12OmNxb5hxq",
"parentPublication": {
"id": "proceedings/ssst/1993/3560/0",
"title": "1993 (25th) Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567230",
"title": "Cross-phase-modulation-induced instabilities and frequency shifts of ultrashort laser pulses in a photonic-crystal fiber",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567230/12OmNyp9MkO",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev/2013/0400/0/06572528",
"title": "Modelling of dispersion flattened photonic crystal fibers for communication application",
"doi": null,
"abstractUrl": "/proceedings-article/iciev/2013/06572528/12OmNz2TCC1",
"parentPublication": {
"id": "proceedings/iciev/2013/0400/0",
"title": "2013 2nd International Conference on Informatics, Electronics and Vision (ICIEV 2013)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2009/4672/0/05202360",
"title": "Impact of chromatic and modal dispersion on frequency response of optical multimode fibers",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2009/05202360/12OmNzUPpdr",
"parentPublication": {
"id": "proceedings/iscc/2009/4672/0",
"title": "2009 IEEE Symposium on Computers and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682378",
"title": "Study on the Interaction of Optical Field and Transverse Acoustic Mode in Silicon Optical Fibers",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682378/12OmNzwZ6tL",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2004/05/01344015",
"title": "On trading wavelengths with fibers: a cost-performance based study",
"doi": null,
"abstractUrl": "/journal/nt/2004/05/01344015/13rRUILtJwd",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvk7JKB",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"acronym": "greencom-ithingscpscom",
"groupId": "1800308",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzwZ6tL",
"doi": "10.1109/GreenCom-iThings-CPSCom.2013.367",
"title": "Study on the Interaction of Optical Field and Transverse Acoustic Mode in Silicon Optical Fibers",
"normalizedTitle": "Study on the Interaction of Optical Field and Transverse Acoustic Mode in Silicon Optical Fibers",
"abstract": "The interaction of optical field and medium (transverse acoustic mode) leads to the inelastic scattering called forward stimulated Raman-like scattering (SRLS), the gain factor of SRLS has been theoretically studied in optical fibers with different Kerr nonlinearities and diameters. An analytical model of SRLS is established, considering all the acoustic modes. It is revealed the influence of Kerr nonlinearity on the gain and line shape of SRLS. With Kerr effect increasing, the gain of (anti-) Stokes beam increases, the line width of (anti-) Stokes beam broadens and the line shape profile on the anti-Stokes side of the pump beam are asymmetric, peak of gain deviating from central frequency shifts to lower frequency. The acousto-optic interaction is studied in microstructure fibers, Acoustic frequency variety of R01 mode is against to the fiber radius with a direct ratio, the interaction of light and sound is related to fiber diameter and the largest gain factor appears at a radius of 0.58 μm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The interaction of optical field and medium (transverse acoustic mode) leads to the inelastic scattering called forward stimulated Raman-like scattering (SRLS), the gain factor of SRLS has been theoretically studied in optical fibers with different Kerr nonlinearities and diameters. An analytical model of SRLS is established, considering all the acoustic modes. It is revealed the influence of Kerr nonlinearity on the gain and line shape of SRLS. With Kerr effect increasing, the gain of (anti-) Stokes beam increases, the line width of (anti-) Stokes beam broadens and the line shape profile on the anti-Stokes side of the pump beam are asymmetric, peak of gain deviating from central frequency shifts to lower frequency. The acousto-optic interaction is studied in microstructure fibers, Acoustic frequency variety of R01 mode is against to the fiber radius with a direct ratio, the interaction of light and sound is related to fiber diameter and the largest gain factor appears at a radius of 0.58 μm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The interaction of optical field and medium (transverse acoustic mode) leads to the inelastic scattering called forward stimulated Raman-like scattering (SRLS), the gain factor of SRLS has been theoretically studied in optical fibers with different Kerr nonlinearities and diameters. An analytical model of SRLS is established, considering all the acoustic modes. It is revealed the influence of Kerr nonlinearity on the gain and line shape of SRLS. With Kerr effect increasing, the gain of (anti-) Stokes beam increases, the line width of (anti-) Stokes beam broadens and the line shape profile on the anti-Stokes side of the pump beam are asymmetric, peak of gain deviating from central frequency shifts to lower frequency. The acousto-optic interaction is studied in microstructure fibers, Acoustic frequency variety of R01 mode is against to the fiber radius with a direct ratio, the interaction of light and sound is related to fiber diameter and the largest gain factor appears at a radius of 0.58 μm.",
"fno": "06682378",
"keywords": [
"Optical Fibers",
"Acoustics",
"Kerr Effect",
"Fiber Nonlinear Optics",
"Optical Fiber Communication",
"Gain Factor",
"Acoustic Mode",
"Interaction"
],
"authors": [
{
"affiliation": "Sci. & Technol. on Opt. Radiat. Lab., Beijing, China",
"fullName": "Jing Wang",
"givenName": null,
"surname": "Jing Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sci. & Technol. on Opt. Radiat. Lab., Beijing, China",
"fullName": "Yuan-nan Xu",
"givenName": null,
"surname": "Yuan-nan Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sci. & Technol. on Opt. Radiat. Lab., Beijing, China",
"fullName": "Yan-bing Dong",
"givenName": null,
"surname": "Yan-bing Dong",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "greencom-ithingscpscom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1967-1971",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5046-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06682377",
"articleId": "12OmNynJMJd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06682379",
"articleId": "12OmNzC5Tkz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/eisic/2013/5062/0/06657181",
"title": "OptaSense® Distributed Acoustic and Seismic Sensing Performance for Multi-threat, Multi-environment Border Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2013/06657181/12OmNAle6GO",
"parentPublication": {
"id": "proceedings/eisic/2013/5062/0",
"title": "2013 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1992/2665/0/00712310",
"title": "Experimental Investigation of the Optical Noise Sensing Capabilities of the Brillouin Fiber Ring Amplifier",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1992/00712310/12OmNAlvI3Q",
"parentPublication": {
"id": "proceedings/ssst/1992/2665/0",
"title": "The 24th Southeastern Symposium on System Theory and The 3rd Annual Symposium on Communications, Signal Processing Expert Systems, and ASIC VLSI Design",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icimt/2009/3922/0/3922a465",
"title": "Analysis of Phase Distortion of Optical OFDM Signal in Optical Fiber Transmission",
"doi": null,
"abstractUrl": "/proceedings-article/icimt/2009/3922a465/12OmNBPtJEq",
"parentPublication": {
"id": "proceedings/icimt/2009/3922/0",
"title": "Information and Multimedia Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2011/4353/2/05750909",
"title": "All-Optical Logic XOR Gate Exploiting XPM and Polarization Rotation in Single Highly Nonlinear Fiber",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2011/05750909/12OmNs5rl1Y",
"parentPublication": {
"id": "icicta/2011/4353/2",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567440",
"title": "Efficient polarization squeezing in optical fibers",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567440/12OmNxVlTJn",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567446",
"title": "Generation of quantum correlation between co-propagating pulses in optical fibers",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567446/12OmNy2rRZy",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567316",
"title": "Hysteresis phenomena in passively mode-locked fibre laser with nonlinear polarization rotation",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567316/12OmNyKrH9r",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2009/4672/0/05202360",
"title": "Impact of chromatic and modal dispersion on frequency response of optical multimode fibers",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2009/05202360/12OmNzUPpdr",
"parentPublication": {
"id": "proceedings/iscc/2009/4672/0",
"title": "2009 IEEE Symposium on Computers and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icispc/2020/8548/0/854800a062",
"title": "An Investigation on Kerr Nonlinear Compensation Technology Assisted by DSP in Optical Fiber Transmission Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icispc/2020/854800a062/1u6KDo79VtK",
"parentPublication": {
"id": "proceedings/icispc/2020/8548/0",
"title": "2020 4th International Conference on Imaging, Signal Processing and Communications (ICISPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsgea/2021/3263/0/326300a134",
"title": "Sound Source Localization with Enhanced Distributed Acoustic Sensing Based on Fiber Bragg Gratings",
"doi": null,
"abstractUrl": "/proceedings-article/icsgea/2021/326300a134/1vb9lHguLMQ",
"parentPublication": {
"id": "proceedings/icsgea/2021/3263/0",
"title": "2021 6th International Conference on Smart Grid and Electrical Automation (ICSGEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAR1b0Z",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAlNiEz",
"doi": "10.1109/CVPRW.2017.73",
"title": "3D Pose Regression Using Convolutional Neural Networks",
"normalizedTitle": "3D Pose Regression Using Convolutional Neural Networks",
"abstract": "3D pose estimation is a key component of many important computer vision tasks like autonomous navigation and robot manipulation. Current state-of-the-art approaches for 3D object pose estimation, like Viewpoints & Keypoints and Render for CNN, solve this problem by discretizing the pose space into bins and solving a pose-classification task. We argue that 3D pose is continuous and can be solved in a regression framework if done with the right representation, data augmentation and loss function. We modify a standard VGG network for the task of 3D pose regression and show competitive performance compared to state-of-the-art.",
"abstracts": [
{
"abstractType": "Regular",
"content": "3D pose estimation is a key component of many important computer vision tasks like autonomous navigation and robot manipulation. Current state-of-the-art approaches for 3D object pose estimation, like Viewpoints & Keypoints and Render for CNN, solve this problem by discretizing the pose space into bins and solving a pose-classification task. We argue that 3D pose is continuous and can be solved in a regression framework if done with the right representation, data augmentation and loss function. We modify a standard VGG network for the task of 3D pose regression and show competitive performance compared to state-of-the-art.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "3D pose estimation is a key component of many important computer vision tasks like autonomous navigation and robot manipulation. Current state-of-the-art approaches for 3D object pose estimation, like Viewpoints & Keypoints and Render for CNN, solve this problem by discretizing the pose space into bins and solving a pose-classification task. We argue that 3D pose is continuous and can be solved in a regression framework if done with the right representation, data augmentation and loss function. We modify a standard VGG network for the task of 3D pose regression and show competitive performance compared to state-of-the-art.",
"fno": "0733a494",
"keywords": [
"Three Dimensional Displays",
"Two Dimensional Displays",
"Azimuth",
"Solid Modeling",
"Training",
"Cameras"
],
"authors": [
{
"affiliation": null,
"fullName": "Siddharth Mahendran",
"givenName": "Siddharth",
"surname": "Mahendran",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Haider Ali",
"givenName": "Haider",
"surname": "Ali",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "René Vidal",
"givenName": "René",
"surname": "Vidal",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-07-01T00:00:00",
"pubType": "proceedings",
"pages": "494-495",
"year": "2017",
"issn": "2160-7516",
"isbn": "978-1-5386-0733-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "0733a492",
"articleId": "12OmNxw5Bxq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "0733a496",
"articleId": "12OmNzQhP89",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457f759",
"title": "3D Human Pose Estimation = 2D Pose Estimation + Matching",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f759/12OmNAKcNOh",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f679",
"title": "3D Convolutional Neural Networks for Efficient and Robust Hand Pose Estimation from Single Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f679/12OmNBQC895",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f689",
"title": "Lifting from the Deep: Convolutional 3D Pose Estimation from a Single Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f689/12OmNBRbkrc",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c621",
"title": "Compositional Human Pose Regression",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c621/12OmNqBtiU5",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034c174",
"title": "3D Pose Regression Using Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c174/12OmNxEBzn3",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2017/2937/0/2937a169",
"title": "Towards Efficient 3D Pose Retrieval and Reconstruction from 2D Landmarks",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2017/2937a169/12OmNyS6RK2",
"parentPublication": {
"id": "proceedings/ism/2017/2937/0",
"title": "2017 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457b561",
"title": "3D Human Pose Estimation from a Single Image via Distance Matrix Regression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457b561/12OmNzTH0Sa",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2019/04/08338122",
"title": "Real-Time 3D Hand Pose Estimation with 3D Convolutional Neural Networks",
"doi": null,
"abstractUrl": "/journal/tp/2019/04/08338122/13rRUx0xPJX",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000f147",
"title": "Dense 3D Regression for Hand Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000f147/17D45WaTkeL",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300d420",
"title": "Semantic Graph Convolutional Networks for 3D Human Pose Regression",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300d420/1gyrWM4kyk0",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyfdOIW",
"title": "2012 IEEE Symposium on 3D User Interfaces (3DUI)",
"acronym": "3dui",
"groupId": "1001623",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBvkdnk",
"doi": "10.1109/3DUI.2012.6184216",
"title": "Poster: Manipulation techniques of 3D objects represented as multi-viewpoint images in a 3D scene",
"normalizedTitle": "Poster: Manipulation techniques of 3D objects represented as multi-viewpoint images in a 3D scene",
"abstract": "In this poster, we explore manipulation of an object represented by an image-based rendering approach in a 3D scene. We focus on two manipulation techniques that address the problems with using an image-based rendering approach and the constraints imposed by implementing such a system on a mobile device. We present results from our preliminary experiments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this poster, we explore manipulation of an object represented by an image-based rendering approach in a 3D scene. We focus on two manipulation techniques that address the problems with using an image-based rendering approach and the constraints imposed by implementing such a system on a mobile device. We present results from our preliminary experiments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this poster, we explore manipulation of an object represented by an image-based rendering approach in a 3D scene. We focus on two manipulation techniques that address the problems with using an image-based rendering approach and the constraints imposed by implementing such a system on a mobile device. We present results from our preliminary experiments.",
"fno": "06184216",
"keywords": [
"Three Dimensional Displays",
"Rendering Computer Graphics",
"Mobile Handsets",
"Solid Modeling",
"Computational Modeling",
"Mobile Communication",
"I 3 6 Computer Graphics Methodology And Techniques Interaction Techniques",
"H 5 2 Information Interfaces And Presentation User Interfaces Interaction Styles"
],
"authors": [
{
"affiliation": null,
"fullName": "J. C. Yu",
"givenName": "J. C.",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "G. Yamamoto",
"givenName": "G.",
"surname": "Yamamoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "J. Miyazaki",
"givenName": "J.",
"surname": "Miyazaki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "M. Billinghurst",
"givenName": "M.",
"surname": "Billinghurst",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "H. Kato",
"givenName": "H.",
"surname": "Kato",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "3dui",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-03-01T00:00:00",
"pubType": "proceedings",
"pages": "171-172",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-1204-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06184215",
"articleId": "12OmNzFdt4N",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06184217",
"articleId": "12OmNxG1yBH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/acsat/2013/2758/0/2758a228",
"title": "3D Mobile Map Visualization Concept for Remote Rendered Dataset",
"doi": null,
"abstractUrl": "/proceedings-article/acsat/2013/2758a228/12OmNBCqbDK",
"parentPublication": {
"id": "proceedings/acsat/2013/2758/0",
"title": "2013 International Conference on Advanced Computer Science Applications and Technologies (ACSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2010/6425/0/05453470",
"title": "Depth Compression of 3D Object Represented by Layered Depth Image",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2010/05453470/12OmNBOUxkQ",
"parentPublication": {
"id": "proceedings/dcc/2010/6425/0",
"title": "2010 Data Compression Conference (DCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvgip/2008/3476/0/3476a551",
"title": "Real-Time Rendering and Manipulation of Large Terrains",
"doi": null,
"abstractUrl": "/proceedings-article/icvgip/2008/3476a551/12OmNqFrGFh",
"parentPublication": {
"id": "proceedings/icvgip/2008/3476/0",
"title": "Computer Vision, Graphics & Image Processing, Indian Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2014/6184/0/06948467",
"title": "[Poster] Touch gestures for improved 3D object manipulation in mobile augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2014/06948467/12OmNrkT7xo",
"parentPublication": {
"id": "proceedings/ismar/2014/6184/0",
"title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2015/7673/0/7673a216",
"title": "Viewpoint-Predicting-Based Remote Rendering on Mobile Devices Using Multiple Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a216/12OmNxcMSkQ",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2013/2869/0/06671804",
"title": "Interactive exploration of augmented aerial scenes with free-viewpoint image generation from pre-rendered images",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2013/06671804/12OmNzsJ7ks",
"parentPublication": {
"id": "proceedings/ismar/2013/2869/0",
"title": "2013 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446445",
"title": "A Framework for Virtual 3D Manipulation of Face in Video",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446445/13bd1AITnaH",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600e410",
"title": "Text and Image Guided 3D Avatar Generation and Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600e410/1KxUFsh4ZdS",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600e329",
"title": "Control-NeRF: Editable Feature Volumes for Scene Rendering and Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600e329/1L8qzXVyRlS",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/04/09668999",
"title": "Cross-Domain and Disentangled Face Manipulation With 3D Guidance",
"doi": null,
"abstractUrl": "/journal/tg/2023/04/09668999/1zTfZzq1wqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAOsMGr",
"title": "2016 IEEE Eighth International Conference on Technology for Education (T4E)",
"acronym": "t4e",
"groupId": "1002941",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx5GTY6",
"doi": "10.1109/T4E.2016.014",
"title": "Geometry via Gestures: Learning 3D Geometry Using Gestures",
"normalizedTitle": "Geometry via Gestures: Learning 3D Geometry Using Gestures",
"abstract": "Concepts of 3-dimensional (3D) Geometry are challenging to grasp for school students. The skill of manipulating 3D objects and interpreting their structure and properties are difficult. Traditionally to teach topics that have three dimensions, 3D artifacts have been used. However the opportunity of the learner to interact during the construction and manipulation of 3D objects is desirable. In this paper, we present an application - Geometry via Gestures (G-v-G), which enables learners to interact with 3D objects using their gestures. We report observation and analysis from an exploratory study that was performed to identify the different aspects of 3D geometry that students could learn in the process of using the application. We also examined the students' perception of learning with the application during the study. The analysis indicates that students learn about structure and property of 3D geometrical objects after using G-v-G. In addition, students participating in the study, expressed keen interest in learning additional topics of geometry using gestures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Concepts of 3-dimensional (3D) Geometry are challenging to grasp for school students. The skill of manipulating 3D objects and interpreting their structure and properties are difficult. Traditionally to teach topics that have three dimensions, 3D artifacts have been used. However the opportunity of the learner to interact during the construction and manipulation of 3D objects is desirable. In this paper, we present an application - Geometry via Gestures (G-v-G), which enables learners to interact with 3D objects using their gestures. We report observation and analysis from an exploratory study that was performed to identify the different aspects of 3D geometry that students could learn in the process of using the application. We also examined the students' perception of learning with the application during the study. The analysis indicates that students learn about structure and property of 3D geometrical objects after using G-v-G. In addition, students participating in the study, expressed keen interest in learning additional topics of geometry using gestures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Concepts of 3-dimensional (3D) Geometry are challenging to grasp for school students. The skill of manipulating 3D objects and interpreting their structure and properties are difficult. Traditionally to teach topics that have three dimensions, 3D artifacts have been used. However the opportunity of the learner to interact during the construction and manipulation of 3D objects is desirable. In this paper, we present an application - Geometry via Gestures (G-v-G), which enables learners to interact with 3D objects using their gestures. We report observation and analysis from an exploratory study that was performed to identify the different aspects of 3D geometry that students could learn in the process of using the application. We also examined the students' perception of learning with the application during the study. The analysis indicates that students learn about structure and property of 3D geometrical objects after using G-v-G. In addition, students participating in the study, expressed keen interest in learning additional topics of geometry using gestures.",
"fno": "6115a026",
"keywords": [
"Computational Geometry",
"Computer Aided Instruction",
"Gesture Recognition",
"Mathematics Computing",
"Teaching",
"School Students",
"3 D Objects",
"Student Perception",
"G V G",
"3 D Geometrical Objects",
"Geometry Via Gestures",
"3 Dimensional Geometry",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Geometry",
"Shape",
"Visualization",
"Solid Modeling",
"Context",
"3 D Geometry",
"Gestures",
"3 D Structure",
"3 D Property",
"Volume"
],
"authors": [
{
"affiliation": "IDP Educ. Technol., IIT Bombay, Mumbai, India",
"fullName": "Soumya Narayana",
"givenName": "Soumya",
"surname": "Narayana",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IDP Educ. Technol., IIT Bombay, Mumbai, India",
"fullName": "Prajish Prasad",
"givenName": "Prajish",
"surname": "Prasad",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IDP Educ. Technol., IIT Bombay, Mumbai, India",
"fullName": "T. G. Lakshmi",
"givenName": "T. G.",
"surname": "Lakshmi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "IDP Educ. Technol., IIT Bombay, Mumbai, India",
"fullName": "Sahana Murthy",
"givenName": "Sahana",
"surname": "Murthy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "t4e",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2016-12-01T00:00:00",
"pubType": "proceedings",
"pages": "26-33",
"year": "2016",
"issn": null,
"isbn": "978-1-5090-6115-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6115a019",
"articleId": "12OmNznkJW7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6115a034",
"articleId": "12OmNCfjezT",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icalt/2018/6049/0/604901a372",
"title": "GeoSolvAR: Augmented Reality Based Solution for Visualizing 3D Solids",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a372/12OmNwMobbg",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/06/06919312",
"title": "Learning 3D Object Templates by Quantizing Geometry and Appearance Spaces",
"doi": null,
"abstractUrl": "/journal/tp/2015/06/06919312/13rRUB7a1h1",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2018/3788/0/08545378",
"title": "3D Geometry-Aware Semantic Labeling of Outdoor Street Scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545378/17D45VtKiwd",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a144",
"title": "Hand ControlAR: An Augmented Reality Application for Learning 3D Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a144/1gysoyOrm2A",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102866",
"title": "Lossy Geometry Compression Of 3d Point Cloud Data Via An Adaptive Octree-Guided Network",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102866/1kwr58J4kWQ",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800b057",
"title": "Learning Unsupervised Hierarchical Part Decomposition of 3D Objects From a Single RGB Image",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800b057/1m3nrPwotuE",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2533",
"title": "DSGN: Deep Stereo Geometry Network for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2533/1m3ntst40ta",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/10/09462521",
"title": "View-Aware Geometry-Structure Joint Learning for Single-View 3D Shape Reconstruction",
"doi": null,
"abstractUrl": "/journal/tp/2022/10/09462521/1uDSvbmzJQc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100c110",
"title": "3D Shapes Local Geometry Codes Learning with SDF",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100c110/1yNi23FEXja",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2021/2688/0/268800a453",
"title": "Synergy between 3DMM and 3D Landmarks for Accurate 3D Facial Geometry",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2021/268800a453/1zWEnuGbFte",
"parentPublication": {
"id": "proceedings/3dv/2021/2688/0",
"title": "2021 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1AITnaH",
"doi": "10.1109/VR.2018.8446445",
"title": "A Framework for Virtual 3D Manipulation of Face in Video",
"normalizedTitle": "A Framework for Virtual 3D Manipulation of Face in Video",
"abstract": "This paper presents a framework that enables a user to manipulate his/her face shape three-dimensionally in video. Existing face manipulation applications and methods have some limitations: single photo, manipulation on image domain, or limited deformation. In the proposed framework, face is tracked from video by using landmark tracking and fitting 3d morphable face model to image, and the face model is further deformed according to the user input with mesh deformation method and rendered with texture from the frame image onto the camera preview. Therefore, unlike conventional applications and researches for face manipulation, the proposed framework allows the user to perform free-form 3d deformation of face in video and to view the deformed face at various viewpoints.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a framework that enables a user to manipulate his/her face shape three-dimensionally in video. Existing face manipulation applications and methods have some limitations: single photo, manipulation on image domain, or limited deformation. In the proposed framework, face is tracked from video by using landmark tracking and fitting 3d morphable face model to image, and the face model is further deformed according to the user input with mesh deformation method and rendered with texture from the frame image onto the camera preview. Therefore, unlike conventional applications and researches for face manipulation, the proposed framework allows the user to perform free-form 3d deformation of face in video and to view the deformed face at various viewpoints.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a framework that enables a user to manipulate his/her face shape three-dimensionally in video. Existing face manipulation applications and methods have some limitations: single photo, manipulation on image domain, or limited deformation. In the proposed framework, face is tracked from video by using landmark tracking and fitting 3d morphable face model to image, and the face model is further deformed according to the user input with mesh deformation method and rendered with texture from the frame image onto the camera preview. Therefore, unlike conventional applications and researches for face manipulation, the proposed framework allows the user to perform free-form 3d deformation of face in video and to view the deformed face at various viewpoints.",
"fno": "08446445",
"keywords": [
"Face",
"Three Dimensional Displays",
"Shape",
"Solid Modeling",
"Deformable Models",
"Strain",
"Cameras",
"Computing Methodologies Computer Graphics Graphics Systems And Interface Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "Hanyang University",
"fullName": "Jungsik Park",
"givenName": "Jungsik",
"surname": "Park",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Hanyang University",
"fullName": "Jong-II Park",
"givenName": "Jong-II",
"surname": "Park",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "649-650",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446318",
"articleId": "13bd1AITna8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446173",
"articleId": "13bd1eW2l9y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdh/2014/4284/0/4284a146",
"title": "A Fast 3-D Face Reconstruction Method",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2014/4284a146/12OmNvjgWRV",
"parentPublication": {
"id": "proceedings/icdh/2014/4284/0",
"title": "2014 5th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209e477",
"title": "Pose Independent Face Recognition by Localizing Local Binary Patterns via Deformation Components",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209e477/12OmNvq5jv0",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c257",
"title": "3D Face Reconstruction via Feature Point Depth Estimation and Shape Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c257/12OmNvwTGE3",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2012/4814/0/4814a273",
"title": "Towards Reconstructing a 3D Face Model from an Uncontrolled Video Sequence",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a273/12OmNx5piPH",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460370",
"title": "Fast-accurate 3D face model generation using a single video camera",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460370/12OmNz4Bdhq",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2014/4258/0/4258a001",
"title": "3D Face Reconstruction from Video Using 3D Morphable Model and Silhouette",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2014/4258a001/12OmNz5JBYT",
"parentPublication": {
"id": "proceedings/sibgrapi/2014/4258/0",
"title": "2014 27th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2011/0529/0/05981841",
"title": "Photorealistic 3D face modeling on a smartphone",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2011/05981841/12OmNzd7bgz",
"parentPublication": {
"id": "proceedings/cvprw/2011/0529/0",
"title": "CVPR 2011 WORKSHOPS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000h336",
"title": "Alive Caricature from 2D to 3D",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000h336/17D45W2Wyzi",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/07/08580421",
"title": "CaricatureShop: Personalized and Photorealistic Caricature Sketching",
"doi": null,
"abstractUrl": "/journal/tg/2020/07/08580421/17D45XfSEU4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2020/9331/0/09306533",
"title": "Towards Detailed 3D Modeling: Mesh Super-Resolution via Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306533/1qcicuvmpcQ",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2020/9331/0",
"title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1sx4Zta",
"doi": "10.1109/VR.2018.8446317",
"title": "Analysis of Proximity-Based Multimodal Feedback for 3D Selection in Immersive Virtual Environments",
"normalizedTitle": "Analysis of Proximity-Based Multimodal Feedback for 3D Selection in Immersive Virtual Environments",
"abstract": "Interaction tasks in virtual reality (VR) such as three-dimensional (3D) selection or manipulation of objects often suffer from reduced performance due to missing or different feedback provided by VR systems than during corresponding realworld interactions. Vibrotactile and auditory feedback have been suggested as additional perceptual cues complementing the visual channel to improve interaction in VR. However, it has rarely been shown that multimodal feedback improves performance or reduces errors during 3D object selection. Only little research has been conducted in the area of proximity-based multimodal feedback, in which stimulus intensities depend on spatiotemporal relations between input device and the virtual target object. In this paper, we analyzed the effects of unimodal and bimodal feedback provided through the visual, auditory and tactile modalities, while users perform 3D object selections in VEs, by comparing both binary and continuous proximity-based feedback. We conducted a Fitts' Law experiment and evaluated the different feedback approaches. The results show that the feedback types affect ballistic and correction phases of the selection movement, and significantly influence the user performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interaction tasks in virtual reality (VR) such as three-dimensional (3D) selection or manipulation of objects often suffer from reduced performance due to missing or different feedback provided by VR systems than during corresponding realworld interactions. Vibrotactile and auditory feedback have been suggested as additional perceptual cues complementing the visual channel to improve interaction in VR. However, it has rarely been shown that multimodal feedback improves performance or reduces errors during 3D object selection. Only little research has been conducted in the area of proximity-based multimodal feedback, in which stimulus intensities depend on spatiotemporal relations between input device and the virtual target object. In this paper, we analyzed the effects of unimodal and bimodal feedback provided through the visual, auditory and tactile modalities, while users perform 3D object selections in VEs, by comparing both binary and continuous proximity-based feedback. We conducted a Fitts' Law experiment and evaluated the different feedback approaches. The results show that the feedback types affect ballistic and correction phases of the selection movement, and significantly influence the user performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interaction tasks in virtual reality (VR) such as three-dimensional (3D) selection or manipulation of objects often suffer from reduced performance due to missing or different feedback provided by VR systems than during corresponding realworld interactions. Vibrotactile and auditory feedback have been suggested as additional perceptual cues complementing the visual channel to improve interaction in VR. However, it has rarely been shown that multimodal feedback improves performance or reduces errors during 3D object selection. Only little research has been conducted in the area of proximity-based multimodal feedback, in which stimulus intensities depend on spatiotemporal relations between input device and the virtual target object. In this paper, we analyzed the effects of unimodal and bimodal feedback provided through the visual, auditory and tactile modalities, while users perform 3D object selections in VEs, by comparing both binary and continuous proximity-based feedback. We conducted a Fitts' Law experiment and evaluated the different feedback approaches. The results show that the feedback types affect ballistic and correction phases of the selection movement, and significantly influence the user performance.",
"fno": "08446317",
"keywords": [
"Feedback",
"Haptic Interfaces",
"Human Computer Interaction",
"Virtual Reality",
"Proximity Based Multimodal Feedback",
"3 D Selection",
"Immersive Virtual Environments",
"Interaction Tasks",
"Virtual Reality",
"Missing Feedback",
"VR Systems",
"Vibrotactile Feedback",
"Auditory Feedback",
"3 D Object Selection",
"Virtual Target Object",
"Unimodal Feedback",
"Bimodal Feedback",
"Visual Modalities",
"Auditory Modalities",
"Tactile Modalities",
"Binary Proximity Based Feedback",
"Continuous Proximity Based Feedback",
"Selection Movement",
"User Performance",
"Perceptual Cues",
"Fitts Law Experiment",
"Three Dimensional Displays",
"Task Analysis",
"Visualization",
"Haptic Interfaces",
"Feeds",
"Performance Evaluation",
"Two Dimensional Displays",
"H 5 2 Information Interfaces And Presentation User Interfaces Input Devices And Strategies",
"Evaluation"
],
"authors": [
{
"affiliation": "Universität Hamburg",
"fullName": "Oscar Ariza",
"givenName": "Oscar",
"surname": "Ariza",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Central Florida",
"fullName": "Gerd Bruder",
"givenName": "Gerd",
"surname": "Bruder",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Nicholas Katzakis",
"givenName": "Nicholas",
"surname": "Katzakis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Universität Hamburg",
"fullName": "Frank Steinicke",
"givenName": "Frank",
"surname": "Steinicke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "327-334",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08447559",
"articleId": "13bd1fHrlS0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08447552",
"articleId": "13bd1tl2oml",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504679",
"title": "Keynote speaker: Towards immersive multimodal display: Interactive auditory rendering for complex virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504679/12OmNC1GugS",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a093",
"title": "Exploring the Design Space for Immersive Embodiment in Dance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a093/1CJc1vWLV6w",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a085",
"title": "MEinVR: Multimodal Interaction Paradigms in Immersive Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a085/1J7W98ABKwM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798119",
"title": "The Effect of Elastic Feedback on the Perceived User Experience and Presence of Travel Methods in Immersive Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798119/1cJ0Ka6I3jq",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998292",
"title": "Immersive Process Model Exploration in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998292/1hpPCy1gJoI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089598",
"title": "Implementation and Evaluation of Touch-based Interaction Using Electrovibration Haptic Feedback in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089598/1jIxb4ZNizS",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089663",
"title": "Slicing-Volume: Hybrid 3D/2D Multi-target Selection Technique for Dense Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089663/1jIxdJFH8as",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ds-rt/2020/7343/0/09213691",
"title": "Laying the path to consumer-level immersive simulation environments",
"doi": null,
"abstractUrl": "/proceedings-article/ds-rt/2020/09213691/1nHRMLS54By",
"parentPublication": {
"id": "proceedings/ds-rt/2020/7343/0",
"title": "2020 IEEE/ACM 24th International Symposium on Distributed Simulation and Real Time Applications (DS-RT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222313",
"title": "ShuttleSpace: Exploring and Analyzing Movement Trajectory in Immersive Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222313/1nTr29xEpkk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a189",
"title": "Effects of Different Auditory Feedback Frequencies in Virtual Reality 3D Pointing Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a189/1tnWFBgcYAo",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKirt",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45Xtvp98",
"doi": "10.1109/CVPR.2018.00104",
"title": "3D Object Detection with Latent Support Surfaces",
"normalizedTitle": "3D Object Detection with Latent Support Surfaces",
"abstract": "We develop a 3D object detection algorithm that uses latent support surfaces to capture contextual relationships in indoor scenes. Existing 3D representations for RGB-D images capture the local shape and appearance of object categories, but have limited power to represent objects with different visual styles. The detection of small objects is also challenging because the search space is very large in 3D scenes. However, we observe that much of the shape variation within 3D object categories can be explained by the location of a latent support surface, and smaller objects are often supported by larger objects. Therefore, we explicitly use latent support surfaces to better represent the 3D appearance of large objects, and provide contextual cues to improve the detection of small objects. We evaluate our model with 19 object categories from the SUN RGB-D database, and demonstrate state-of-the-art performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We develop a 3D object detection algorithm that uses latent support surfaces to capture contextual relationships in indoor scenes. Existing 3D representations for RGB-D images capture the local shape and appearance of object categories, but have limited power to represent objects with different visual styles. The detection of small objects is also challenging because the search space is very large in 3D scenes. However, we observe that much of the shape variation within 3D object categories can be explained by the location of a latent support surface, and smaller objects are often supported by larger objects. Therefore, we explicitly use latent support surfaces to better represent the 3D appearance of large objects, and provide contextual cues to improve the detection of small objects. We evaluate our model with 19 object categories from the SUN RGB-D database, and demonstrate state-of-the-art performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We develop a 3D object detection algorithm that uses latent support surfaces to capture contextual relationships in indoor scenes. Existing 3D representations for RGB-D images capture the local shape and appearance of object categories, but have limited power to represent objects with different visual styles. The detection of small objects is also challenging because the search space is very large in 3D scenes. However, we observe that much of the shape variation within 3D object categories can be explained by the location of a latent support surface, and smaller objects are often supported by larger objects. Therefore, we explicitly use latent support surfaces to better represent the 3D appearance of large objects, and provide contextual cues to improve the detection of small objects. We evaluate our model with 19 object categories from the SUN RGB-D database, and demonstrate state-of-the-art performance.",
"fno": "642000a937",
"keywords": [
"Image Colour Analysis",
"Image Representation",
"Object Detection",
"Search Problems",
"Latent Support Surfaces",
"Indoor Scenes",
"3 D Representations",
"RGB D Images",
"Local Shape",
"Local Appearance",
"Visual Styles",
"Search Space",
"3 D Scenes",
"3 D Object Categories",
"3 D Appearance",
"Contextual Cues",
"SUN RGB D Database",
"3 D Object Detection Algorithm",
"Three Dimensional Displays",
"Object Detection",
"Feature Extraction",
"Two Dimensional Displays",
"Solid Modeling",
"Shape",
"Proposals"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhile Ren",
"givenName": "Zhile",
"surname": "Ren",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Erik B. Sudderth",
"givenName": "Erik B.",
"surname": "Sudderth",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-06-01T00:00:00",
"pubType": "proceedings",
"pages": "937-946",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-6420-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "642000a928",
"articleId": "17D45XeKgnH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "642000a947",
"articleId": "17D45VN31h0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2017/0457/0/0457a398",
"title": "Amodal Detection of 3D Objects: Inferring 3D Bounding Boxes from 2D Ones in RGB-Depth Images",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457a398/12OmNvAiSEn",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a918",
"title": "Frustum PointNets for 3D Object Detection from RGB-D Data",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a918/17D45WK5Ang",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/04/08883058",
"title": "Weakly-Supervised Learning of Category-Specific 3D Object Shapes",
"doi": null,
"abstractUrl": "/journal/tp/2021/04/08883058/1epRQBbBei4",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300c212",
"title": "3D-RelNet: Joint Object and Relational Network for 3D Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300c212/1hVl8eX0Hok",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093316",
"title": "3D Hand Pose Estimation with Disentangled Cross-Modal Latent Space",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093316/1jPbFBfZZAI",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800d193",
"title": "HOnnotate: A Method for 3D Annotation of Hand and Object Poses",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800d193/1m3nmGf4ktq",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800k0444",
"title": "MLCVNet: Multi-Level Context VoteNet for 3D Object Detection",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800k0444/1m3o9xtotGM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800g876",
"title": "Joint Spatial-Temporal Optimization for Stereo 3D Object Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800g876/1m3osz6Ch7W",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2020/10/08737746",
"title": "Clouds of Oriented Gradients for 3D Detection of Objects, Surfaces, and Indoor Scene Layouts",
"doi": null,
"abstractUrl": "/journal/tp/2020/10/08737746/1mP22G2vmOA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900m2778",
"title": "KeypointDeformer: Unsupervised 3D Keypoint Discovery for Shape Control",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900m2778/1yeLHtZPkZi",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzAYILRRe",
"doi": "10.1109/AIVR50618.2020.00073",
"title": "Workload, Presence and Task Performance of Virtual Object Manipulation on WebVR",
"normalizedTitle": "Workload, Presence and Task Performance of Virtual Object Manipulation on WebVR",
"abstract": "WebVR technology is widely used as a visualization approach to display virtual objects on 2D webpages. Much of the current literature on virtual object manipulation on the 2D screen pays particular attention to task performance, but few studies focus on users' psychological feedback and no literature aims at its relationship with task performance. This paper compares manipulation modes with different degrees of freedom (DoF) in translation and rotation on WebVR to explore users' workload and presence by self-reported data, and task performance by measuring completion time and error rate. The experiment results present that the increase of DoF is associated with lower perceived workload, while people may feel a higher level of presence during tasks. Additionally, this study only finds a positive correlation between workload and efficiency, and a negative correlation between presence and efficiency, which means that when feeling less workload or more presence, people tend to spend less time to complete translation and rotation tasks on WebVR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "WebVR technology is widely used as a visualization approach to display virtual objects on 2D webpages. Much of the current literature on virtual object manipulation on the 2D screen pays particular attention to task performance, but few studies focus on users' psychological feedback and no literature aims at its relationship with task performance. This paper compares manipulation modes with different degrees of freedom (DoF) in translation and rotation on WebVR to explore users' workload and presence by self-reported data, and task performance by measuring completion time and error rate. The experiment results present that the increase of DoF is associated with lower perceived workload, while people may feel a higher level of presence during tasks. Additionally, this study only finds a positive correlation between workload and efficiency, and a negative correlation between presence and efficiency, which means that when feeling less workload or more presence, people tend to spend less time to complete translation and rotation tasks on WebVR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "WebVR technology is widely used as a visualization approach to display virtual objects on 2D webpages. Much of the current literature on virtual object manipulation on the 2D screen pays particular attention to task performance, but few studies focus on users' psychological feedback and no literature aims at its relationship with task performance. This paper compares manipulation modes with different degrees of freedom (DoF) in translation and rotation on WebVR to explore users' workload and presence by self-reported data, and task performance by measuring completion time and error rate. The experiment results present that the increase of DoF is associated with lower perceived workload, while people may feel a higher level of presence during tasks. Additionally, this study only finds a positive correlation between workload and efficiency, and a negative correlation between presence and efficiency, which means that when feeling less workload or more presence, people tend to spend less time to complete translation and rotation tasks on WebVR.",
"fno": "746300a358",
"keywords": [
"Data Visualisation",
"Haptic Interfaces",
"Human Computer Interaction",
"Internet",
"Virtual Reality",
"Web Sites",
"Task Performance",
"Virtual Object Manipulation",
"Web VR Technology",
"Perceived Workload",
"Visualization",
"2 D Web Pages",
"Degrees Of Freedom",
"Touch Based Interfaces",
"Task Analysis",
"Psychology",
"Correlation",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Correlation Coefficient",
"Error Analysis",
"Workload",
"Presence",
"Task Performance",
"Virtual Object Manipulation",
"Degree Of Freedom",
"Web VR"
],
"authors": [
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Design School,Suzhou,China",
"fullName": "Wenxin Sun",
"givenName": "Wenxin",
"surname": "Sun",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Design School,Suzhou,China",
"fullName": "Mengjie Huang",
"givenName": "Mengjie",
"surname": "Huang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,School of Advanced Technology,Suzhou,China",
"fullName": "Rui Yang",
"givenName": "Rui",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Design School,Suzhou,China",
"fullName": "Jingjing Zhang",
"givenName": "Jingjing",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,Design School,Suzhou,China",
"fullName": "Liu Wang",
"givenName": "Liu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Liverpool,Department of Civil Engineering and Industrial Design,Liverpool,United Kingdom",
"fullName": "Ji Han",
"givenName": "Ji",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Xi’an Jiaotong-Liverpool University,School of Advanced Technology,Suzhou,China",
"fullName": "Yong Yue",
"givenName": "Yong",
"surname": "Yue",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "358-361",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a351",
"articleId": "1qpzzTXUIgw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a362",
"articleId": "1qpzDD2wZiM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a010",
"title": "Duplication Based Distance-Free Freehand Virtual Object Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a010/12OmNApcu9E",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2017/2818/0/2818a329",
"title": "4-DoF Tracking for Robot Fine Manipulation Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2017/2818a329/12OmNz61cYb",
"parentPublication": {
"id": "proceedings/crv/2017/2818/0",
"title": "2017 14th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460065",
"title": "Smartwatch-assisted robust 6-DOF hand tracker for object manipulation in HMD-based augmented reality",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460065/12OmNzlUKES",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/03/ttg2012030369",
"title": "Integrality and Separability of Multitouch Interaction Techniques in 3D Manipulation Tasks",
"doi": null,
"abstractUrl": "/journal/tg/2012/03/ttg2012030369/13rRUx0gepW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a170",
"title": "Relationships between Oculo-Motor Mesures as Task-evoked Mental Workloads During an Manipulation Task",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a170/1cMFbe8mHx6",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300h677",
"title": "CDPN: Coordinates-Based Disentangled Pose Network for Real-Time RGB-Based 6-DoF Object Pose Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300h677/1hVlCutzsLS",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998368",
"title": "On Motor Performance in Virtual 3D Object Manipulation",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998368/1hrXfCmEWHe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09144483",
"title": "Evaluating the Effects of Non-Isomorphic Rotation on 3D Manipulation Tasks in Mixed Reality Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09144483/1lClltCZfOg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2021/0158/0/015800a403",
"title": "Separation, Composition, or Hybrid? – Comparing Collaborative 3D Object Manipulation Techniques for Handheld Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2021/015800a403/1yeD8DDATSw",
"parentPublication": {
"id": "proceedings/ismar/2021/0158/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900e495",
"title": "ManipulaTHOR: A Framework for Visual Object Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900e495/1yeKXyjY3Wo",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAXxXaK",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"acronym": "iccv",
"groupId": "1000149",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBrlPxE",
"doi": "10.1109/ICCV.2017.110",
"title": "Focal Track: Depth and Accommodation with Oscillating Lens Deformation",
"normalizedTitle": "Focal Track: Depth and Accommodation with Oscillating Lens Deformation",
"abstract": "The focal track sensor is a monocular and computationally efficient depth sensor that is based on defocus controlled by a liquid membrane lens. It synchronizes small lens oscillations with a photosensor to produce real-time depth maps by means of differential defocus, and it couples these oscillations with bigger lens deformations that adapt the defocus working range to track objects over large axial distances. To create the focal track sensor, we derive a texture-invariant family of equations that relate image derivatives to scene depth when a lens changes its focal length differentially. Based on these equations, we design a feed-forward sequence of computations that: robustly incorporates image derivatives at multiple scales; produces confidence maps along with depth; and can be trained endto- end to mitigate against noise, aberrations, and other non-idealities. Our prototype with 1-inch optics produces depth and confidence maps at 100 frames per second over an axial range of more than 75cm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The focal track sensor is a monocular and computationally efficient depth sensor that is based on defocus controlled by a liquid membrane lens. It synchronizes small lens oscillations with a photosensor to produce real-time depth maps by means of differential defocus, and it couples these oscillations with bigger lens deformations that adapt the defocus working range to track objects over large axial distances. To create the focal track sensor, we derive a texture-invariant family of equations that relate image derivatives to scene depth when a lens changes its focal length differentially. Based on these equations, we design a feed-forward sequence of computations that: robustly incorporates image derivatives at multiple scales; produces confidence maps along with depth; and can be trained endto- end to mitigate against noise, aberrations, and other non-idealities. Our prototype with 1-inch optics produces depth and confidence maps at 100 frames per second over an axial range of more than 75cm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The focal track sensor is a monocular and computationally efficient depth sensor that is based on defocus controlled by a liquid membrane lens. It synchronizes small lens oscillations with a photosensor to produce real-time depth maps by means of differential defocus, and it couples these oscillations with bigger lens deformations that adapt the defocus working range to track objects over large axial distances. To create the focal track sensor, we derive a texture-invariant family of equations that relate image derivatives to scene depth when a lens changes its focal length differentially. Based on these equations, we design a feed-forward sequence of computations that: robustly incorporates image derivatives at multiple scales; produces confidence maps along with depth; and can be trained endto- end to mitigate against noise, aberrations, and other non-idealities. Our prototype with 1-inch optics produces depth and confidence maps at 100 frames per second over an axial range of more than 75cm.",
"fno": "1032a966",
"keywords": [
"Aberrations",
"Image Colour Analysis",
"Image Sensors",
"Image Texture",
"Lenses",
"Optical Design Techniques",
"Optical Focusing",
"Focal Length",
"Scene Depth",
"Defocus Working Range",
"Bigger Lens Deformations",
"Differential Defocus",
"Real Time Depth Maps",
"Lens Oscillations",
"Liquid Membrane Lens",
"Computationally Efficient Depth Sensor",
"Monocular Depth Sensor",
"Focal Track Sensor",
"Oscillating Lens Deformation",
"Confidence Maps",
"Image Derivatives",
"Lenses",
"Mathematical Model",
"Strain",
"Oscillators",
"Optical Imaging",
"Robot Sensing Systems",
"Optical Sensors"
],
"authors": [
{
"affiliation": null,
"fullName": "Qi Guo",
"givenName": "Qi",
"surname": "Guo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Emma Alexander",
"givenName": "Emma",
"surname": "Alexander",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Todd Zickler",
"givenName": "Todd",
"surname": "Zickler",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-10-01T00:00:00",
"pubType": "proceedings",
"pages": "966-974",
"year": "2017",
"issn": "2380-7504",
"isbn": "978-1-5386-1032-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1032a957",
"articleId": "12OmNy5R3sS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1032a975",
"articleId": "12OmNBpEeLC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2018/2526/0/08368469",
"title": "Focal sweep imaging with multi-focal diffractive optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368469/12OmNBV9Ii2",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a202",
"title": "Non-frontal Camera Calibration Using Focal Stack Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a202/12OmNC943xl",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528316",
"title": "What does an aberrated photo tell us about the lens and the scene?",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528316/12OmNCesrcF",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2010/7023/0/05585101",
"title": "Spectral Focal Sweep: Extended depth of field from chromatic aberrations",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2010/05585101/12OmNrYCXIW",
"parentPublication": {
"id": "proceedings/iccp/2010/7023/0",
"title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2009/4534/0/05559009",
"title": "Image destabilization: Programmable defocus using lens and sensor motion",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2009/05559009/12OmNvxsSTw",
"parentPublication": {
"id": "proceedings/iccp/2009/4534/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ised/2014/6965/0/6965a025",
"title": "Plasmonic Lens Based on Elliptically Tapered Metallic Nano Slits",
"doi": null,
"abstractUrl": "/proceedings-article/ised/2014/6965a025/12OmNwdtw8Y",
"parentPublication": {
"id": "proceedings/ised/2014/6965/0",
"title": "2014 Fifth International Symposium on Electronic System Design (ISED)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d505",
"title": "Extended Depth of Field Catadioptric Imaging Using Focal Sweep",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d505/12OmNxEjXTz",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528302",
"title": "Focal sweep videography with deformable optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528302/12OmNxUMHo6",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eqec/2005/8973/0/01567514",
"title": "Near-field-light lens for nano-focusing of atoms",
"doi": null,
"abstractUrl": "/proceedings-article/eqec/2005/01567514/12OmNzvz6Ip",
"parentPublication": {
"id": "proceedings/eqec/2005/8973/0",
"title": "2005 European Quantum Electronics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2020/8128/0/812800b206",
"title": "A Novel Depth from Defocus Framework Based on a Thick Lens Camera Model",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2020/812800b206/1qyxoyCLZbW",
"parentPublication": {
"id": "proceedings/3dv/2020/8128/0",
"title": "2020 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxV4iwa",
"title": "2014 IEEE International Conference on Computational Photography (ICCP)",
"acronym": "iccp",
"groupId": "1800125",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzDNtsg",
"doi": "10.1109/ICCPHOT.2014.6831818",
"title": "Which side of the focal plane are you on?",
"normalizedTitle": "Which side of the focal plane are you on?",
"abstract": "Defocus blur is an indicator for the depth structure of a scene. However, given a single input image from a conventional camera one cannot distinguish between blurred objects lying in front or behind the focal plane, as they may be subject to exactly the same amount of blur. In this paper we address this limitation by exploiting coded apertures. Previous work in this area focuses on setups where the scene is placed either entirely in front or entirely behind the focal plane. We demonstrate that asymmetric apertures result in unique blurs for all distances from the camera. To exploit asymmetric apertures we propose an algorithm that can unambiguously estimate scene depth and texture from a single input image. One of the main advantages of our method is that, within the same depth range, we can work with less blurred data than in other methods. The technique is tested on both synthetic and real images.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Defocus blur is an indicator for the depth structure of a scene. However, given a single input image from a conventional camera one cannot distinguish between blurred objects lying in front or behind the focal plane, as they may be subject to exactly the same amount of blur. In this paper we address this limitation by exploiting coded apertures. Previous work in this area focuses on setups where the scene is placed either entirely in front or entirely behind the focal plane. We demonstrate that asymmetric apertures result in unique blurs for all distances from the camera. To exploit asymmetric apertures we propose an algorithm that can unambiguously estimate scene depth and texture from a single input image. One of the main advantages of our method is that, within the same depth range, we can work with less blurred data than in other methods. The technique is tested on both synthetic and real images.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Defocus blur is an indicator for the depth structure of a scene. However, given a single input image from a conventional camera one cannot distinguish between blurred objects lying in front or behind the focal plane, as they may be subject to exactly the same amount of blur. In this paper we address this limitation by exploiting coded apertures. Previous work in this area focuses on setups where the scene is placed either entirely in front or entirely behind the focal plane. We demonstrate that asymmetric apertures result in unique blurs for all distances from the camera. To exploit asymmetric apertures we propose an algorithm that can unambiguously estimate scene depth and texture from a single input image. One of the main advantages of our method is that, within the same depth range, we can work with less blurred data than in other methods. The technique is tested on both synthetic and real images.",
"fno": "06831818",
"keywords": [
"Apertures",
"Estimation",
"Kernel",
"Cameras",
"Measurement",
"Shape",
"Lenses"
],
"authors": [
{
"affiliation": "Institut für Informatik und Angewandte Mathematik, Universität Bern, Switzerland",
"fullName": "Anita Sellent",
"givenName": "Anita",
"surname": "Sellent",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institut für Informatik und Angewandte Mathematik, Universität Bern, Switzerland",
"fullName": "Paolo Favaro",
"givenName": "Paolo",
"surname": "Favaro",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccp",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-05-01T00:00:00",
"pubType": "proceedings",
"pages": "1-8",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-5188-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06831817",
"articleId": "12OmNy4r3Xa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06831819",
"articleId": "12OmNzgeLBy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2018/2526/0/08368469",
"title": "Focal sweep imaging with multi-focal diffractive optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2018/08368469/12OmNBV9Ii2",
"parentPublication": {
"id": "proceedings/iccp/2018/2526/0",
"title": "2018 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dv/2015/8332/0/8332a326",
"title": "Optimal Camera Parameters for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a326/12OmNC8MsHb",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2010/7023/0/05585101",
"title": "Spectral Focal Sweep: Extended depth of field from chromatic aberrations",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2010/05585101/12OmNrYCXIW",
"parentPublication": {
"id": "proceedings/iccp/2010/7023/0",
"title": "2010 IEEE International Conference on Computational Photography (ICCP 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/crv/2016/2491/0/2491a281",
"title": "Blur Calibration for Depth from Defocus",
"doi": null,
"abstractUrl": "/proceedings-article/crv/2016/2491a281/12OmNs59JP3",
"parentPublication": {
"id": "proceedings/crv/2016/2491/0",
"title": "2016 13th Conference on Computer and Robot Vision (CRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi-t/2011/4549/0/06076747",
"title": "A gentle introduction to coded computational photography",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi-t/2011/06076747/12OmNx6xHtq",
"parentPublication": {
"id": "proceedings/sibgrapi-t/2011/4549/0",
"title": "2011 24th SIBGRAPI Conference on Graphics, Patterns, and Images Tutorials",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d505",
"title": "Extended Depth of Field Catadioptric Imaging Using Focal Sweep",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d505/12OmNxEjXTz",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2013/6463/0/06528302",
"title": "Focal sweep videography with deformable optics",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2013/06528302/12OmNxUMHo6",
"parentPublication": {
"id": "proceedings/iccp/2013/6463/0",
"title": "2013 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000g393",
"title": "Aperture Supervision for Monocular Depth Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000g393/17D45WIXbNB",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acpr/2017/3354/0/3354a250",
"title": "Focal Stack Representation and Focus Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/acpr/2017/3354a250/17D45Xi9rWm",
"parentPublication": {
"id": "proceedings/acpr/2017/3354/0",
"title": "2017 4th IAPR Asian Conference on Pattern Recognition (ACPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08999805",
"title": "Illuminated Focus: Vision Augmentation using Spatial Defocusing via Focal Sweep Eyeglasses and High-Speed Projector",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08999805/1hpPCtKIAaA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzZWbH0",
"doi": "",
"title": "Removal of dust artifacts in focal stack image sequences",
"normalizedTitle": "Removal of dust artifacts in focal stack image sequences",
"abstract": "We propose a technique for removing the appearance of sensor dust in a focal stack image sequence captured with multiple focus settings. Our method is based on the key observation that sensor dust artifacts shift in image position with respect to focus setting, which allows scene information occluded by dust in one image to be inferred from other images in the focal stack. To deal with complications arising from differences in local defocus blur among the images, we analyze the relative blur among corresponding image regions in detecting and removing dust artifacts. Our results show improvements over the state-of-art technique for automatic removal of sensor dust.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a technique for removing the appearance of sensor dust in a focal stack image sequence captured with multiple focus settings. Our method is based on the key observation that sensor dust artifacts shift in image position with respect to focus setting, which allows scene information occluded by dust in one image to be inferred from other images in the focal stack. To deal with complications arising from differences in local defocus blur among the images, we analyze the relative blur among corresponding image regions in detecting and removing dust artifacts. Our results show improvements over the state-of-art technique for automatic removal of sensor dust.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a technique for removing the appearance of sensor dust in a focal stack image sequence captured with multiple focus settings. Our method is based on the key observation that sensor dust artifacts shift in image position with respect to focus setting, which allows scene information occluded by dust in one image to be inferred from other images in the focal stack. To deal with complications arising from differences in local defocus blur among the images, we analyze the relative blur among corresponding image regions in detecting and removing dust artifacts. Our results show improvements over the state-of-art technique for automatic removal of sensor dust.",
"fno": "06460700",
"keywords": [
"Dust",
"Image Denoising",
"Image Restoration",
"Image Sensors",
"Image Sequences",
"Natural Scenes",
"Object Detection",
"Optical Focusing",
"Focal Stack Image Sequence",
"Sensor Dust Artifacts Shift",
"Image Position Shift",
"Focus Setting",
"Scene Information Occlusion",
"Local Defocus Blur Image",
"Dust Artifact Detection",
"Lenses",
"Cameras",
"Image Sequences",
"Integrated Optics",
"Optical Imaging",
"Optical Sensors"
],
"authors": [
{
"affiliation": "Zhejiang University",
"fullName": "Chen Li",
"givenName": "Chen",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Zhejiang University",
"fullName": "Kun Zhou",
"givenName": "Kun",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Microsoft Research Asia",
"fullName": "Stephen Lin",
"givenName": "Stephen",
"surname": "Lin",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "2602-2605",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460699",
"articleId": "12OmNyRPgBQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460701",
"articleId": "12OmNzwHvuk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccp/2011/707/0/05753120",
"title": "Modeling and removing spatially-varying optical blur",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2011/05753120/12OmNAFWOQu",
"parentPublication": {
"id": "proceedings/iccp/2011/707/0",
"title": "IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2012/4639/0/4639a638",
"title": "Spray Dust Removal Device Based on the Image Contrast",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2012/4639a638/12OmNAKM03b",
"parentPublication": {
"id": "proceedings/cdciem/2012/4639/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504749",
"title": "SharpView: Improved clarity of defocussed content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504749/12OmNBBhN9g",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460049",
"title": "SharpView: Improved clarity of defocused content on optical see-through head-mounted displays",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460049/12OmNBWzHQi",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032a966",
"title": "Focal Track: Depth and Accommodation with Oscillating Lens Deformation",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032a966/12OmNBrlPxE",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209a202",
"title": "Non-frontal Camera Calibration Using Focal Stack Imagery",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209a202/12OmNC943xl",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccp/2012/1662/0/06215219",
"title": "Variable focus video: Reconstructing depth and video for dynamic scenes",
"doi": null,
"abstractUrl": "/proceedings-article/iccp/2012/06215219/12OmNvD8RD9",
"parentPublication": {
"id": "proceedings/iccp/2012/1662/0",
"title": "2012 IEEE International Conference on Computational Photography (ICCP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391d505",
"title": "Extended Depth of Field Catadioptric Imaging Using Focal Sweep",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391d505/12OmNxEjXTz",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2007/1179/0/04270285",
"title": "Removal of Image Artifacts Due to Sensor Dust",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2007/04270285/12OmNxdDFOl",
"parentPublication": {
"id": "proceedings/cvpr/2007/1179/0",
"title": "2007 IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08999805",
"title": "Illuminated Focus: Vision Augmentation using Spatial Defocusing via Focal Sweep Eyeglasses and High-Speed Projector",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08999805/1hpPCtKIAaA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyUWQR6",
"title": "Virtual Reality Annual International Symposium",
"acronym": "vrais",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "1993",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNAPSMme",
"doi": "10.1109/VRAIS.1993.380793",
"title": "Presence in immersive virtual environments",
"normalizedTitle": "Presence in immersive virtual environments",
"abstract": "Immersive virtual environments (IVEs) provide a tightly coupled human-computer interface; input to the sensory organs of the human participant are directly generated through computer displays, in the visual, auditory, tactile and haptic modalities. Some of the results of a pilot experimental study of presence in IVEs are outlined. This is a contribution to a project involved in constructing a system for architectural walkthrough, where architects and their clients are able to navigate through and effect changes to a virtual building interior. Emphasis is placed on the interface provided by the virtual environment generator (VEG) to the human user, and initially on the problem of the establishment of the presence of the human inside the virtual environment (VE).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Immersive virtual environments (IVEs) provide a tightly coupled human-computer interface; input to the sensory organs of the human participant are directly generated through computer displays, in the visual, auditory, tactile and haptic modalities. Some of the results of a pilot experimental study of presence in IVEs are outlined. This is a contribution to a project involved in constructing a system for architectural walkthrough, where architects and their clients are able to navigate through and effect changes to a virtual building interior. Emphasis is placed on the interface provided by the virtual environment generator (VEG) to the human user, and initially on the problem of the establishment of the presence of the human inside the virtual environment (VE).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Immersive virtual environments (IVEs) provide a tightly coupled human-computer interface; input to the sensory organs of the human participant are directly generated through computer displays, in the visual, auditory, tactile and haptic modalities. Some of the results of a pilot experimental study of presence in IVEs are outlined. This is a contribution to a project involved in constructing a system for architectural walkthrough, where architects and their clients are able to navigate through and effect changes to a virtual building interior. Emphasis is placed on the interface provided by the virtual environment generator (VEG) to the human user, and initially on the problem of the establishment of the presence of the human inside the virtual environment (VE).",
"fno": "00380793",
"keywords": [
"Virtual Environment Generator",
"Immersive Virtual Environments",
"Tightly Coupled Human Computer Interface",
"Sensory Organs",
"Computer Displays",
"Visual",
"Auditory",
"Tactile",
"Haptic",
"Architectural Walkthrough",
"Virtual Building Interior"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci., QMW Univ. of London, UK",
"fullName": "M. Slater",
"givenName": "M.",
"surname": "Slater",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci., QMW Univ. of London, UK",
"fullName": "M. Usoh",
"givenName": "M.",
"surname": "Usoh",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrais",
"isOpenAccess": false,
"showRecommendedArticles": false,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1993-09-01T00:00:00",
"pubType": "proceedings",
"pages": "90-96",
"year": "1993",
"issn": null,
"isbn": "0-7803-1363-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00380792",
"articleId": "12OmNx8wTlJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00380794",
"articleId": "12OmNzC5SWw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNscfI2G",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "3",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBpEeZ6",
"doi": "10.1109/ICPR.2004.1334454",
"title": "Pattern perception in animals remote from man",
"normalizedTitle": "Pattern perception in animals remote from man",
"abstract": "Summary form only given, as follows. Humans, with the massive computational power of the cerebral cortex, have managed to solve most of the problems that make pattern recognition such a difficult task. Other animals are not so well endowed with processing power: an insect brain, for example, has 105 to 106 neurons compared with our 1011. Nevertheless, they still have to recognise predators, prey and conspecifics, and find their way around the world. Often this means that they have to cut corners, using what machinery they have in economical ways. Typically this means tailoring their recognition systems to just those features that really matter, rather than going for the general purpose mechanism that primates have achieved. In this talk I will examine some of the ingenious and sometimes strange solutions that animals such as insects, spiders, crabs and molluscs have come up with to simplify the tasks of pattern recognition, while still satisfying their requirements of their often complex behaviour.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given, as follows. Humans, with the massive computational power of the cerebral cortex, have managed to solve most of the problems that make pattern recognition such a difficult task. Other animals are not so well endowed with processing power: an insect brain, for example, has 105 to 106 neurons compared with our 1011. Nevertheless, they still have to recognise predators, prey and conspecifics, and find their way around the world. Often this means that they have to cut corners, using what machinery they have in economical ways. Typically this means tailoring their recognition systems to just those features that really matter, rather than going for the general purpose mechanism that primates have achieved. In this talk I will examine some of the ingenious and sometimes strange solutions that animals such as insects, spiders, crabs and molluscs have come up with to simplify the tasks of pattern recognition, while still satisfying their requirements of their often complex behaviour.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given, as follows. Humans, with the massive computational power of the cerebral cortex, have managed to solve most of the problems that make pattern recognition such a difficult task. Other animals are not so well endowed with processing power: an insect brain, for example, has 105 to 106 neurons compared with our 1011. Nevertheless, they still have to recognise predators, prey and conspecifics, and find their way around the world. Often this means that they have to cut corners, using what machinery they have in economical ways. Typically this means tailoring their recognition systems to just those features that really matter, rather than going for the general purpose mechanism that primates have achieved. In this talk I will examine some of the ingenious and sometimes strange solutions that animals such as insects, spiders, crabs and molluscs have come up with to simplify the tasks of pattern recognition, while still satisfying their requirements of their often complex behaviour.",
"fno": "01334454",
"keywords": [
"Animals",
"Pattern Recognition",
"Insects",
"Humans",
"Cerebral Cortex",
"Energy Management",
"Neurons",
"Machinery",
"Power System Economics",
"Power Generation Economics"
],
"authors": [
{
"affiliation": "University of Sussex",
"fullName": "M.F. Land",
"givenName": "M.F.",
"surname": "Land",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1",
"year": "2004",
"issn": "1051-4651",
"isbn": "0-7695-2128-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01334453",
"articleId": "12OmNAg7k1U",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "212830008",
"articleId": "12OmNzaQomj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/1993/3870/0/00378181",
"title": "A system for automatic vectorization and interpretation of map-drawings",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/1993/00378181/12OmNApLGD9",
"parentPublication": {
"id": "proceedings/iccv/1993/3870/0",
"title": "1993 (4th) International Conference on Computer Vision",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/1996/7324/2/00495427",
"title": "Genetically optimized neural network classifiers for bankruptcy prediction-an empirical study",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/1996/00495427/12OmNBKEymp",
"parentPublication": {
"id": "proceedings/hicss/1996/7324/2",
"title": "Proceedings of HICSS-29: 29th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tai/1998/5214/0/00744864",
"title": "Diagnostics of dynamical systems by recognizing the default and abnormal pattern",
"doi": null,
"abstractUrl": "/proceedings-article/tai/1998/00744864/12OmNBf94XG",
"parentPublication": {
"id": "proceedings/tai/1998/5214/0",
"title": "Proceedings of 10th International Conference on Tools with Artificial Intelligence (ICTA'98)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04760945",
"title": "The 5th IAPR workshop on pattern recognition in remote sensing",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04760945/12OmNs0C9Lf",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/camp/1993/5420/0/00622502",
"title": "An integrated system architecture for binary image understanding",
"doi": null,
"abstractUrl": "/proceedings-article/camp/1993/00622502/12OmNz5JCcp",
"parentPublication": {
"id": "proceedings/camp/1993/5420/0",
"title": "1993 Computer Architectures for Machine Perception",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cit/2009/3836/1/05328106",
"title": "Keynote Speeches - Volume 1",
"doi": null,
"abstractUrl": "/proceedings-article/cit/2009/05328106/12OmNz5apCI",
"parentPublication": {
"id": "proceedings/cit/2009/3836/1",
"title": "2009 Ninth IEEE International Conference on Computer and Information Technology. CIT 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ispan/2009/3908/0/3908a426",
"title": "Designing Hi-Facsimile Briefing Remote-Controller with Bluetooth and Speech-Recognition Processor",
"doi": null,
"abstractUrl": "/proceedings-article/ispan/2009/3908a426/12OmNzsrwmn",
"parentPublication": {
"id": "proceedings/ispan/2009/3908/0",
"title": "Parallel Architectures, Algorithms, and Networks, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acctcs/2022/0034/0/003400a152",
"title": "Discussion and Analysis on the Design of Electric Power Whole-Process Video Visualization Monitoring System",
"doi": null,
"abstractUrl": "/proceedings-article/acctcs/2022/003400a152/1F6UhYQZChO",
"parentPublication": {
"id": "proceedings/acctcs/2022/0034/0",
"title": "2022 2nd Asia-Pacific Conference on Communications Technology and Computer Science (ACCTCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998379",
"title": "Animals in Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998379/1hrXhy1IFpu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sges/2020/8550/0/855000a169",
"title": "Backup generator output control method of off-grid system based on renewable energy in remote island",
"doi": null,
"abstractUrl": "/proceedings-article/sges/2020/855000a169/1rITG9TKJPi",
"parentPublication": {
"id": "proceedings/sges/2020/8550/0",
"title": "2020 International Conference on Smart Grids and Energy Systems (SGES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwJPMXt",
"title": "Optoelectronics and Image Processing, International Conference on",
"acronym": "icoip",
"groupId": "1800228",
"volume": "1",
"displayVolume": "1",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyOq4Zj",
"doi": "10.1109/ICOIP.2010.327",
"title": "A Video Tracking System for Limb Motion Measurement in Small Animals",
"normalizedTitle": "A Video Tracking System for Limb Motion Measurement in Small Animals",
"abstract": "Motion analysis can provide useful information for biomedical researches. In this paper, we have presented a video tracking system for limb motion measurement in small animals. The system employs a consumer-grade video camera to record the locomotion of a rat, which can reduce the cost of the system. Three matching methods are available for different application. Moreover, a new algorithm to forecast the joint positions based on the polynomial fitting and improved Kalman filtering algorithms has been proposed, not only for limiting the region of matching, but also for marker occlusion and various maneuverability of the limb. Two methods are also applied to detect mismatch for ensuring a reliable tracking. Once 2D markers positions are collected, kinematic characteristics of the motion can be derived. Finally, the experimental results of the tracking system are presented.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion analysis can provide useful information for biomedical researches. In this paper, we have presented a video tracking system for limb motion measurement in small animals. The system employs a consumer-grade video camera to record the locomotion of a rat, which can reduce the cost of the system. Three matching methods are available for different application. Moreover, a new algorithm to forecast the joint positions based on the polynomial fitting and improved Kalman filtering algorithms has been proposed, not only for limiting the region of matching, but also for marker occlusion and various maneuverability of the limb. Two methods are also applied to detect mismatch for ensuring a reliable tracking. Once 2D markers positions are collected, kinematic characteristics of the motion can be derived. Finally, the experimental results of the tracking system are presented.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion analysis can provide useful information for biomedical researches. In this paper, we have presented a video tracking system for limb motion measurement in small animals. The system employs a consumer-grade video camera to record the locomotion of a rat, which can reduce the cost of the system. Three matching methods are available for different application. Moreover, a new algorithm to forecast the joint positions based on the polynomial fitting and improved Kalman filtering algorithms has been proposed, not only for limiting the region of matching, but also for marker occlusion and various maneuverability of the limb. Two methods are also applied to detect mismatch for ensuring a reliable tracking. Once 2D markers positions are collected, kinematic characteristics of the motion can be derived. Finally, the experimental results of the tracking system are presented.",
"fno": "4252a181",
"keywords": [
"Tracking Markers",
"2 D Motion Analysis",
"Small Animal",
"Image Processing"
],
"authors": [
{
"affiliation": null,
"fullName": "Qi Xu",
"givenName": "Qi",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Cong Cai",
"givenName": "Cong",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Houlun Zhou",
"givenName": "Houlun",
"surname": "Zhou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Hangkong Ren",
"givenName": "Hangkong",
"surname": "Ren",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icoip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-11-01T00:00:00",
"pubType": "proceedings",
"pages": "181-184",
"year": "2010",
"issn": null,
"isbn": "978-0-7695-4252-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4252a177",
"articleId": "12OmNxb5hti",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4252a185",
"articleId": "12OmNzJbQWw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2002/1695/2/169520024",
"title": "Tracking Multiple Animals in Wildlife Footage",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2002/169520024/12OmNAYoKx3",
"parentPublication": {
"id": "proceedings/icpr/2002/1695/2",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bmei/2008/3118/2/3118b076",
"title": "Fluorescent Optical Imaging of Small Animals Using Filtered Back-projection 3D Surface Reconstruction Method",
"doi": null,
"abstractUrl": "/proceedings-article/bmei/2008/3118b076/12OmNCwCLs1",
"parentPublication": {
"id": "proceedings/bmei/2008/3118/2",
"title": "BioMedical Engineering and Informatics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1990/2062/1/00118109",
"title": "Time-recursive motion estimation using dynamical models for motion prediction",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1990/00118109/12OmNwIYZE0",
"parentPublication": {
"id": "proceedings/icpr/1990/2062/1",
"title": "Proceedings 10th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csie/2009/3507/6/3507f085",
"title": "Using Rao-Blackwellised Particle Filter Track 3D Arm Motion Based on Hierarchical Limb Model",
"doi": null,
"abstractUrl": "/proceedings-article/csie/2009/3507f085/12OmNy9Prgb",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bsn/2010/4065/0/4065a117",
"title": "3D Upper Limb Motion Modeling and Estimation Using Wearable Micro-sensors",
"doi": null,
"abstractUrl": "/proceedings-article/bsn/2010/4065a117/12OmNzYNN53",
"parentPublication": {
"id": "proceedings/bsn/2010/4065/0",
"title": "Wearable and Implantable Body Sensor Networks, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1997/03/mcg1997030022",
"title": "Animals with Anatomy",
"doi": null,
"abstractUrl": "/magazine/cg/1997/03/mcg1997030022/13rRUxNmPIr",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iisa/2018/8161/0/08633693",
"title": "Extending Upper Limb User Interactions in AR, VR and MR Headsets Employing a Custom-Made Wearable Device",
"doi": null,
"abstractUrl": "/proceedings-article/iisa/2018/08633693/17D45WYQJ59",
"parentPublication": {
"id": "proceedings/iisa/2018/8161/0",
"title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a576",
"title": "Magic Mirror on the Wall: Reflecting the Realities of Lower Limb Rehabilitation in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a576/1J7WeNTcphe",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hdis/2022/9144/0/09991504",
"title": "Intrackability Detection Based Upper-Limb Motion Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/hdis/2022/09991504/1JwQ2OIIXa8",
"parentPublication": {
"id": "proceedings/hdis/2022/9144/0",
"title": "2022 International Conference on High Performance Big Data and Intelligent Systems (HDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isaiam/2022/8541/0/854100a140",
"title": "Motion Pattern Recognition of Lower Limb Exoskeleton Based on SAPSO-SVM",
"doi": null,
"abstractUrl": "/proceedings-article/isaiam/2022/854100a140/1MTTdpOP1bG",
"parentPublication": {
"id": "proceedings/isaiam/2022/8541/0",
"title": "2022 2nd International Symposium on Artificial Intelligence and its Application on Media (ISAIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzn3955",
"title": "Proceedings 1988 IEEE International Symposium on Intelligent Control",
"acronym": "isic",
"groupId": "1000388",
"volume": "0",
"displayVolume": "0",
"year": "1988",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzIUfQB",
"doi": "10.1109/ISIC.1988.65421",
"title": "Multisensor integration in biological systems",
"normalizedTitle": "Multisensor integration in biological systems",
"abstract": "The authors discuss the integration of sensory information in biological systems. In particular, they consider the structure in vertebrate animals that utilizes multiple sensory inputs to orient the sensor platform, i.e. the body or the head, toward objects of interest. This structure is known as the optic tectum in lower vertebrates and the superior colliculus in mammals. The representation of the various sensory modalities on the tectum follows the maplike image format of the retina. This requires in some cases a considerable transformation from the original representation of the sensory input available from the other sensors. As an example, the authors present a detailed discussion of the visual/acoustic object localization system of the barn owl along with a model for the adaptive coregistration of the coordinate systems of the visual and acoustic maps on the tectum.<>",
"abstracts": [
{
"abstractType": "Regular",
"content": "The authors discuss the integration of sensory information in biological systems. In particular, they consider the structure in vertebrate animals that utilizes multiple sensory inputs to orient the sensor platform, i.e. the body or the head, toward objects of interest. This structure is known as the optic tectum in lower vertebrates and the superior colliculus in mammals. The representation of the various sensory modalities on the tectum follows the maplike image format of the retina. This requires in some cases a considerable transformation from the original representation of the sensory input available from the other sensors. As an example, the authors present a detailed discussion of the visual/acoustic object localization system of the barn owl along with a model for the adaptive coregistration of the coordinate systems of the visual and acoustic maps on the tectum.<>",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The authors discuss the integration of sensory information in biological systems. In particular, they consider the structure in vertebrate animals that utilizes multiple sensory inputs to orient the sensor platform, i.e. the body or the head, toward objects of interest. This structure is known as the optic tectum in lower vertebrates and the superior colliculus in mammals. The representation of the various sensory modalities on the tectum follows the maplike image format of the retina. This requires in some cases a considerable transformation from the original representation of the sensory input available from the other sensors. As an example, the authors present a detailed discussion of the visual/acoustic object localization system of the barn owl along with a model for the adaptive coregistration of the coordinate systems of the visual and acoustic maps on the tectum.",
"fno": "00065421",
"keywords": [
"Biocybernetics",
"Biology",
"Hearing",
"Vision",
"Hearing",
"Biological Systems",
"Sensory Information",
"Optic Tectum",
"Superior Colliculus",
"Retina",
"Visual Acoustic Object Localization System",
"Biological Systems",
"Optical Sensors",
"Biomedical Optical Imaging",
"Marine Animals",
"Visual System",
"Animal Structures",
"Biosensors",
"Laser Radar",
"Infrared Sensors",
"Neurons"
],
"authors": [
{
"affiliation": "David Sarnoff Res. Center, Princeton, NJ, USA",
"fullName": "J.J. Gelfand",
"givenName": "J.J.",
"surname": "Gelfand",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "David Sarnoff Res. Center, Princeton, NJ, USA",
"fullName": "J.C. Pearson",
"givenName": "J.C.",
"surname": "Pearson",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "David Sarnoff Res. Center, Princeton, NJ, USA",
"fullName": "C.D. Spence",
"givenName": "C.D.",
"surname": "Spence",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "W.E. Sullivan",
"givenName": "W.E.",
"surname": "Sullivan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isic",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1988-01-01T00:00:00",
"pubType": "proceedings",
"pages": "147,148,149,150,151,152,153",
"year": "1988",
"issn": "2158-9860",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "00065420",
"articleId": "12OmNrGb2jM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "00065422",
"articleId": "12OmNzvQHT0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lssa/2006/0277/0/04015840",
"title": "Indium Phosphide Optical MEMS for Chemical and Biological Sensing",
"doi": null,
"abstractUrl": "/proceedings-article/lssa/2006/04015840/12OmNA0dMFu",
"parentPublication": {
"id": "proceedings/lssa/2006/0277/0",
"title": "2006 IEEE/NLM Life Science Systems and Applications Workshop",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/1/01326026",
"title": "Multisensor MELPe using parameter substitution",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326026/12OmNBCHMIa",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/1",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dft/2004/2241/0/01347826",
"title": "Defect avoidance in a 3-D heterogeneous sensor [acoustic/seismic/active pixel/IR imaging sensor array]",
"doi": null,
"abstractUrl": "/proceedings-article/dft/2004/01347826/12OmNrHjqKY",
"parentPublication": {
"id": "proceedings/dft/2004/2241/0",
"title": "19th IEEE International Symposium on Defect and Fault Tolerance in VLSI Systems, 2004. DFT 2004. Proceedings.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/waina/2010/4019/0/4019a991",
"title": "Detecting Suspicious Motion with Nonimaging Sensors",
"doi": null,
"abstractUrl": "/proceedings-article/waina/2010/4019a991/12OmNx3Zjjx",
"parentPublication": {
"id": "proceedings/waina/2010/4019/0",
"title": "2010 IEEE 24th International Conference on Advanced Information Networking and Applications Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ijcnn/2009/3548/0/05178941",
"title": "Investigating the properties of optimal sensory and motor synergies in a nonlinear model of arm dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/ijcnn/2009/05178941/12OmNyKa6cS",
"parentPublication": {
"id": "proceedings/ijcnn/2009/3548/0",
"title": "Neural Networks, IEEE - INNS - ENNS International Joint Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ssst/1990/2038/0/00138212",
"title": "Image segmentation by multisensor data-fusion",
"doi": null,
"abstractUrl": "/proceedings-article/ssst/1990/00138212/12OmNzICELU",
"parentPublication": {
"id": "proceedings/ssst/1990/2038/0",
"title": "Proceedings The Twenty-Second Southeastern Symposium on System Theory",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acssc/1993/4120/0/00342514",
"title": "MUltiSensor Target Recognition System (MUSTRS)",
"doi": null,
"abstractUrl": "/proceedings-article/acssc/1993/00342514/12OmNzayNmr",
"parentPublication": {
"id": "proceedings/acssc/1993/4120/0",
"title": "Proceedings of 27th Asilomar Conference on Signals, Systems and Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/microneuro/1999/0043/0/00430021",
"title": "Energy and Information Processing in Biological and Silicon Sensory Systems",
"doi": null,
"abstractUrl": "/proceedings-article/microneuro/1999/00430021/12OmNznkJXW",
"parentPublication": {
"id": "proceedings/microneuro/1999/0043/0",
"title": "Microelectronics for Neural Networks and Fuzzy Systems, International Conference on/Microelectronics for Neural, Fuzzy, and Bio-Inspired Systems, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998379",
"title": "Animals in Virtual Environments",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998379/1hrXhy1IFpu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1qcibXBSvDi",
"title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"acronym": "iciev-&-icivpr",
"groupId": "1802578",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qcie8QjZIY",
"doi": "10.1109/ICIEVicIVPR48672.2020.9306528",
"title": "Virtual and Augmented Reality Animals in Smart and Playful Cities: (Invited Paper)",
"normalizedTitle": "Virtual and Augmented Reality Animals in Smart and Playful Cities: (Invited Paper)",
"abstract": "Our future urban environments are smart. Sensors and actuators are embedded in these environments and their inhabitants. We have an Internet of Things, where the `Things' include objects, cars, tools, buildings, street furniture, and whatever can be equipped with sensors and actuators, including human and non-human animals. Augmented humans and augmented animals have their senses extended with digital technology. Their smart wearables connected with the smart environment make humans and animals smarter. Rather than on living animals, in this survey paper we focus on non-living virtual and augmented reality non-human animals that will inhabit our smart and playable urban environments. They will co-exist with robotic animals and (digitally augmented) humans and non-human animals. We include observations on augmented humans interacting with virtual and augmented reality animals. The paper is meant to raise awareness for the possibilities of augmented reality to introduce virtual animals for social, entertainment, and educational reasons.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Our future urban environments are smart. Sensors and actuators are embedded in these environments and their inhabitants. We have an Internet of Things, where the `Things' include objects, cars, tools, buildings, street furniture, and whatever can be equipped with sensors and actuators, including human and non-human animals. Augmented humans and augmented animals have their senses extended with digital technology. Their smart wearables connected with the smart environment make humans and animals smarter. Rather than on living animals, in this survey paper we focus on non-living virtual and augmented reality non-human animals that will inhabit our smart and playable urban environments. They will co-exist with robotic animals and (digitally augmented) humans and non-human animals. We include observations on augmented humans interacting with virtual and augmented reality animals. The paper is meant to raise awareness for the possibilities of augmented reality to introduce virtual animals for social, entertainment, and educational reasons.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Our future urban environments are smart. Sensors and actuators are embedded in these environments and their inhabitants. We have an Internet of Things, where the `Things' include objects, cars, tools, buildings, street furniture, and whatever can be equipped with sensors and actuators, including human and non-human animals. Augmented humans and augmented animals have their senses extended with digital technology. Their smart wearables connected with the smart environment make humans and animals smarter. Rather than on living animals, in this survey paper we focus on non-living virtual and augmented reality non-human animals that will inhabit our smart and playable urban environments. They will co-exist with robotic animals and (digitally augmented) humans and non-human animals. We include observations on augmented humans interacting with virtual and augmented reality animals. The paper is meant to raise awareness for the possibilities of augmented reality to introduce virtual animals for social, entertainment, and educational reasons.",
"fno": "09306528",
"keywords": [
"Augmented Reality",
"Computer Animation",
"Virtual Reality",
"Augmented Reality",
"Future Urban Environments",
"Actuators",
"Internet Of Things",
"Street Furniture",
"Sensors",
"Nonhuman Animals",
"Augmented Humans",
"Augmented Animals",
"Smart Wearables",
"Smart Environment",
"Living Animals",
"Smart Environments",
"Playable Urban Environments",
"Robotic Animals",
"Virtual Animals",
"Animals",
"Urban Areas",
"Augmented Reality",
"Dogs",
"Actuators",
"Smart Cities",
"Intelligent Sensors",
"Animal Computer Interaction",
"Augmented Humans",
"Augmented Animals",
"Virtual Animals",
"Augmented Reality",
"Smart Cities",
"Playable Cities",
"Entertainment Technology",
"Sensors",
"Actuators",
"Street Furniture"
],
"authors": [
{
"affiliation": "University of Twente,Enschede,the Netherlands",
"fullName": "Anton Nijholt",
"givenName": "Anton",
"surname": "Nijholt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iciev-&-icivpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-08-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9331-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09306666",
"articleId": "1qcifBk6vm0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09306546",
"articleId": "1qcifsuPSzC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/rew/2017/3488/0/3488a423",
"title": "Agile with Animals: Towards a Development Method",
"doi": null,
"abstractUrl": "/proceedings-article/rew/2017/3488a423/12OmNANTAys",
"parentPublication": {
"id": "proceedings/rew/2017/3488/0",
"title": "2017 IEEE 25th International Requirements Engineering Conference Workshops (REW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f524",
"title": "3D Menagerie: Modeling the 3D Shape and Pose of Animals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f524/12OmNBJw9Rf",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aipr/2009/5146/0/05466303",
"title": "Remote detection of humans and animals",
"doi": null,
"abstractUrl": "/proceedings-article/aipr/2009/05466303/12OmNxzMnPZ",
"parentPublication": {
"id": "proceedings/aipr/2009/5146/0",
"title": "2009 IEEE Applied Imagery Pattern Recognition Workshop (AIPR 2009)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-iscmht/2017/1023/0/08338610",
"title": "Playable cities: A short survey (Keynote paper)",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-iscmht/2017/08338610/12OmNz5s0Qd",
"parentPublication": {
"id": "proceedings/iciev-iscmht/2017/1023/0",
"title": "2017 6th International Conference on Informatics, Electronics and Vision & 2017 7th International Symposium in Computational Medical and Health Technology (ICIEV-ISCMHT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a848",
"title": "Three-Dimensional Simulation for Training Autonomous Vehicles in Smart City Environments",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a848/1ehBHIfsG6k",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925472",
"title": "Pose-Informed Face Alignment for Extreme Head Pose Variations in Animals",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925472/1fHGBBxU1NK",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a400",
"title": "Investigating Augmented Reality Animals as Companions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a400/1gysk9h5xzW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090664",
"title": "Augmented Reality Animals: Are They Our Future Companions?",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090664/1jIxAo6n9jq",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2383",
"title": "Learning From Synthetic Animals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2383/1m3nkH0jTaM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciev-&-icivpr/2020/9331/0/09306546",
"title": "Keynote Talk 4: Virtual and Augmented Reality Animals in Smart and Playful Cities",
"doi": null,
"abstractUrl": "/proceedings-article/iciev-&-icivpr/2020/09306546/1qcifsuPSzC",
"parentPublication": {
"id": "proceedings/iciev-&-icivpr/2020/9331/0",
"title": "2020 Joint 9th International Conference on Informatics, Electronics & Vision (ICIEV) and 2020 4th International Conference on Imaging, Vision & Pattern Recognition (icIVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tnWwqMuCzu",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"acronym": "vrw",
"groupId": "1836626",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tnWCWkHcQg",
"doi": "10.1109/VRW52623.2021.00010",
"title": "Collective Intelligence of Autonomous Animals in VR Hunting",
"normalizedTitle": "Collective Intelligence of Autonomous Animals in VR Hunting",
"abstract": "In the scenario of a VR hunting game, autonomous behaviour of in-game animals is essential. In this study, new adaptive steering algorithms are designed for autonomous animals to navigate around the environment, with a research focus on collective intelligence in decision making and tactical actions. Advanced strategies for a group of autonomous animals are developed in order to simulate a more realistic forest environment. Computational experiments and comparisons with animation results are presented, accompanied by a demo video, which show significant advantages over previous work. The new models and algorithms can also be used for autonomous motion controls for other XR-based training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the scenario of a VR hunting game, autonomous behaviour of in-game animals is essential. In this study, new adaptive steering algorithms are designed for autonomous animals to navigate around the environment, with a research focus on collective intelligence in decision making and tactical actions. Advanced strategies for a group of autonomous animals are developed in order to simulate a more realistic forest environment. Computational experiments and comparisons with animation results are presented, accompanied by a demo video, which show significant advantages over previous work. The new models and algorithms can also be used for autonomous motion controls for other XR-based training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the scenario of a VR hunting game, autonomous behaviour of in-game animals is essential. In this study, new adaptive steering algorithms are designed for autonomous animals to navigate around the environment, with a research focus on collective intelligence in decision making and tactical actions. Advanced strategies for a group of autonomous animals are developed in order to simulate a more realistic forest environment. Computational experiments and comparisons with animation results are presented, accompanied by a demo video, which show significant advantages over previous work. The new models and algorithms can also be used for autonomous motion controls for other XR-based training.",
"fno": "405700a014",
"keywords": [
"Computer Animation",
"Computer Based Training",
"Decision Making",
"Mobile Robots",
"Motion Control",
"Virtual Reality",
"Collective Intelligence",
"Autonomous Animals",
"Animation Results",
"Autonomous Motion Controls",
"VR Hunting Game",
"Autonomous Behaviour",
"In Game Animals",
"Adaptive Steering Algorithms",
"Solid Modeling",
"Adaptation Models",
"Animals",
"Navigation",
"Conferences",
"Computational Modeling",
"Games",
"Virtual Reality",
"Kinematics",
"Rigid Body Dynamics",
"Collective Intelligence"
],
"authors": [
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering",
"fullName": "Kangqiao Zhao",
"givenName": "Kangqiao",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sino-Singapore International Joint Research Institute",
"fullName": "Feng Lin",
"givenName": "Feng",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering",
"fullName": "Hock Soon Seah",
"givenName": "Hock Soon",
"surname": "Seah",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vrw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "14-22",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4057-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tnWCJj8pMI",
"name": "pvrw202140570-09419159s1-mm_405700a014.zip",
"size": "30.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419159s1-mm_405700a014.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "405700a006",
"articleId": "1tnX9D9JTJS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "405700a023",
"articleId": "1tnXWEmiofK",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icic/2012/1985/0/06258057",
"title": "A Real-Time, Interactive Simulation Environment for Unmanned Ground Vehicles: The Autonomous Navigation Virtual Environment Laboratory (ANVEL)",
"doi": null,
"abstractUrl": "/proceedings-article/icic/2012/06258057/12OmNAXPyny",
"parentPublication": {
"id": "proceedings/icic/2012/1985/0",
"title": "2012 Fifth International Conference on Information and Computing Science",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2017/0457/0/0457f524",
"title": "3D Menagerie: Modeling the 3D Shape and Pose of Animals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2017/0457f524/12OmNBJw9Rf",
"parentPublication": {
"id": "proceedings/cvpr/2017/0457/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/soac/1990/2031/0/00082186",
"title": "'Deercare' - A deer population and hunting simulation model",
"doi": null,
"abstractUrl": "/proceedings-article/soac/1990/00082186/12OmNBQ2VTT",
"parentPublication": {
"id": "proceedings/soac/1990/2031/0",
"title": "Proceedings of the 1990 Symposium on Applied Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ca/1994/6240/0/00323999",
"title": "Computer simulations of adaptive behavior in animals",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1994/00323999/12OmNs0C9Yw",
"parentPublication": {
"id": "proceedings/ca/1994/6240/0",
"title": "Proceedings of Computer Animation '94",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a524",
"title": "Augmented Treasure Hunting Generator for Edutainment",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a524/17D45XvMcdz",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669656",
"title": "Swarm Inverse Reinforcement Learning for Biological Systems",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669656/1A9VSLLCfss",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iseeie/2022/6874/0/687400a183",
"title": "Simulation Bicycle Arcade Game with VR Bike",
"doi": null,
"abstractUrl": "/proceedings-article/iseeie/2022/687400a183/1FWmGZwZdV6",
"parentPublication": {
"id": "proceedings/iseeie/2022/6874/0",
"title": "2022 International Symposium on Electrical, Electronics and Information Engineering (ISEEIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800m2383",
"title": "Learning From Synthetic Animals",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800m2383/1m3nkH0jTaM",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2020/8432/0/843200a047",
"title": "Development of an Autonomous Agent based on Reinforcement Learning for a Digital Fighting Game",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2020/843200a047/1pQIKKYIcvK",
"parentPublication": {
"id": "proceedings/sbgames/2020/8432/0",
"title": "2020 19th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a278",
"title": "Design and production of monster hunting game based on virtual reality technology",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a278/1vg89TLPjsQ",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwl8GIE",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"acronym": "bibe",
"groupId": "1000075",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqGRG9l",
"doi": "10.1109/BIBE.2015.7367660",
"title": "Inner ear boundary motion during bone conduction stimulation — Indications for inner ear compression and fluid inertia",
"normalizedTitle": "Inner ear boundary motion during bone conduction stimulation — Indications for inner ear compression and fluid inertia",
"abstract": "A finite element model of a whole human head was developed to study sound transmission by bone conducted sound. The model comprises tissues as bone, brain and soft tissues. With this model, the motion of the bone surrounding the inner ear was investigated. This was done by defining an imaginary box encapsulating the inner ear and analyzing the motion of the opposing sides. According to this analysis, the motion over the surface area was smooth and regular. However, when comparing the motions at the opposing sides the magnitudes differed significantly. This cannot be explained by regular damping of the wave transmission but originates in the complex wave motion in the bone. It also implies that inner ear compression is probably more important for bone conduction hearing than predicted with models using a constant magnitude of the vibration in the bone around the inner ear.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A finite element model of a whole human head was developed to study sound transmission by bone conducted sound. The model comprises tissues as bone, brain and soft tissues. With this model, the motion of the bone surrounding the inner ear was investigated. This was done by defining an imaginary box encapsulating the inner ear and analyzing the motion of the opposing sides. According to this analysis, the motion over the surface area was smooth and regular. However, when comparing the motions at the opposing sides the magnitudes differed significantly. This cannot be explained by regular damping of the wave transmission but originates in the complex wave motion in the bone. It also implies that inner ear compression is probably more important for bone conduction hearing than predicted with models using a constant magnitude of the vibration in the bone around the inner ear.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A finite element model of a whole human head was developed to study sound transmission by bone conducted sound. The model comprises tissues as bone, brain and soft tissues. With this model, the motion of the bone surrounding the inner ear was investigated. This was done by defining an imaginary box encapsulating the inner ear and analyzing the motion of the opposing sides. According to this analysis, the motion over the surface area was smooth and regular. However, when comparing the motions at the opposing sides the magnitudes differed significantly. This cannot be explained by regular damping of the wave transmission but originates in the complex wave motion in the bone. It also implies that inner ear compression is probably more important for bone conduction hearing than predicted with models using a constant magnitude of the vibration in the bone around the inner ear.",
"fno": "07367660",
"keywords": [
"Ear",
"Vibrations",
"Bones",
"Finite Element Analysis",
"Fluids",
"Auditory System"
],
"authors": [
{
"affiliation": "Department of Clinical and Experimental Medicine, Linköping University, 58185 Linköping, Sweden",
"fullName": "Stefan Stenfelt",
"givenName": "Stefan",
"surname": "Stenfelt",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Division of Mechanical System Engineering, Incheon National University, Incheon, Korea",
"fullName": "Namkeun Kim",
"givenName": "Namkeun",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibe",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7983-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07367659",
"articleId": "12OmNwnYG0M",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07367661",
"articleId": "12OmNzXWZF9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2006/9753/0/04042281",
"title": "On Equalization of Bone Conducted Speech for Improved Speech Quality",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2006/04042281/12OmNAObbDg",
"parentPublication": {
"id": "proceedings/isspit/2006/9753/0",
"title": "2006 IEEE International Symposium on Signal Processing and Information Technology",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/4/01326796",
"title": "Auditory information processing with nerve-action potentials",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326796/12OmNs0C9R7",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/4",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2013/3211/0/3211a818",
"title": "Bone-Conduction-Based Brain Computer Interface Paradigm -- EEG Signal Processing, Feature Extraction and Classification",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2013/3211a818/12OmNvrdI51",
"parentPublication": {
"id": "proceedings/sitis/2013/3211/0",
"title": "2013 International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2018/6060/0/606001a012",
"title": "Development and Validation of a Virtual Reality Tutor to Teach Clinically Oriented Surgical Anatomy of the Ear",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2018/606001a012/12OmNwNwzNm",
"parentPublication": {
"id": "proceedings/cbms/2018/6060/0",
"title": "2018 IEEE 31st International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibe/2015/7983/0/07367728",
"title": "Extending inner-ear anatomical concepts in the Foundational Model of Anatomy (FMA) ontology",
"doi": null,
"abstractUrl": "/proceedings-article/bibe/2015/07367728/12OmNxQOjCg",
"parentPublication": {
"id": "proceedings/bibe/2015/7983/0",
"title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccit/2009/3896/0/3896a511",
"title": "Coordinate direction normalization using point cloud projection density for 3D ear",
"doi": null,
"abstractUrl": "/proceedings-article/iccit/2009/3896a511/12OmNxWLTp8",
"parentPublication": {
"id": "proceedings/iccit/2009/3896/0",
"title": "Convergence Information Technology, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccict/2022/7224/0/722400a543",
"title": "Ear disease detection using R-CNN",
"doi": null,
"abstractUrl": "/proceedings-article/ccict/2022/722400a543/1HpDUbwaynm",
"parentPublication": {
"id": "proceedings/ccict/2022/7224/0",
"title": "2022 Fifth International Conference on Computational Intelligence and Communication Technologies (CCICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom/2023/5378/0/10099317",
"title": "hEARt: Motion-resilient Heart Rate Monitoring with In-ear Microphones",
"doi": null,
"abstractUrl": "/proceedings-article/percom/2023/10099317/1MrG9KjVBew",
"parentPublication": {
"id": "proceedings/percom/2023/5378/0",
"title": "2023 IEEE International Conference on Pervasive Computing and Communications (PerCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798218",
"title": "Auditory Spatial Perception Using Bone Conduction Headphones along with Fitted Head Related Transfer Functions",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798218/1cJ0Ydq9p7O",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icid/2020/1481/0/440500a263",
"title": "Research on aerial refueling tanker docking and positioning with ear tags",
"doi": null,
"abstractUrl": "/proceedings-article/icid/2020/440500a263/1taFt2SQ3rG",
"parentPublication": {
"id": "proceedings/icid/2020/1481/0",
"title": "2020 International Conference on Intelligent Design (ICID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvlxJwR",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cbms",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2017",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNrJiCGD",
"doi": "10.1109/CBMS.2017.168",
"title": "HEAR?INFO: A Modern Mobile-Web Platform Addressed to Hard-of-Hearing Elderly Individuals",
"normalizedTitle": "HEAR?INFO: A Modern Mobile-Web Platform Addressed to Hard-of-Hearing Elderly Individuals",
"abstract": "In the concept of the hearing loss awareness, a modern mobile-web platform is hereby presented, aiming to offer constant online access to individuals who are hard of hearing, while presenting them regularly updated information concerning their condition. This information is presented via a specific modified interface, taking into account the special needs of the specific community. After a thorough research in GUI, the software requirements substitute or supplement the lack of integrated sound systems, with visual modifications, caption text and even specially chosen colours. Different applications, including auditory tests and exercises are considered, aiming to the self-awareness and broadening of an individual's fund of knowledge.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In the concept of the hearing loss awareness, a modern mobile-web platform is hereby presented, aiming to offer constant online access to individuals who are hard of hearing, while presenting them regularly updated information concerning their condition. This information is presented via a specific modified interface, taking into account the special needs of the specific community. After a thorough research in GUI, the software requirements substitute or supplement the lack of integrated sound systems, with visual modifications, caption text and even specially chosen colours. Different applications, including auditory tests and exercises are considered, aiming to the self-awareness and broadening of an individual's fund of knowledge.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In the concept of the hearing loss awareness, a modern mobile-web platform is hereby presented, aiming to offer constant online access to individuals who are hard of hearing, while presenting them regularly updated information concerning their condition. This information is presented via a specific modified interface, taking into account the special needs of the specific community. After a thorough research in GUI, the software requirements substitute or supplement the lack of integrated sound systems, with visual modifications, caption text and even specially chosen colours. Different applications, including auditory tests and exercises are considered, aiming to the self-awareness and broadening of an individual's fund of knowledge.",
"fno": "1710a570",
"keywords": [
"Geriatrics",
"Hearing",
"Human Computer Interaction",
"Internet",
"Medical Information Systems",
"Mobile Computing",
"User Interfaces",
"HEAR",
"Hard Of Hearing Elderly Individuals",
"Hearing Loss Awareness",
"Constant Online Access",
"Mobile Web Platform",
"Software Requirements",
"Auditory System",
"Hearing Loss Mobile Web Platform Self Awareness Self Management"
],
"authors": [
{
"affiliation": null,
"fullName": "Penelope Ioannidou",
"givenName": "Penelope",
"surname": "Ioannidou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Panagiotis Katrakazas",
"givenName": "Panagiotis",
"surname": "Katrakazas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stefanos Kollias",
"givenName": "Stefanos",
"surname": "Kollias",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michail Sarafidis",
"givenName": "Michail",
"surname": "Sarafidis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Dimitrios Koutsouris",
"givenName": "Dimitrios",
"surname": "Koutsouris",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cbms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2017-06-01T00:00:00",
"pubType": "proceedings",
"pages": "570-575",
"year": "2017",
"issn": "2372-9198",
"isbn": "978-1-5386-1710-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "1710a564",
"articleId": "12OmNvRU0dW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "1710a576",
"articleId": "12OmNzV70C7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmla/2016/6167/0/07838118",
"title": "Inferring Hearing Loss from Learned Speech Kernels",
"doi": null,
"abstractUrl": "/proceedings-article/icmla/2016/07838118/12OmNBf94Wo",
"parentPublication": {
"id": "proceedings/icmla/2016/6167/0",
"title": "2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2002/7402/4/05745533",
"title": "Signal processing, hearing aid design, and the psychoacoustic turing test",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2002/05745533/12OmNrAv3Gu",
"parentPublication": {
"id": "proceedings/icassp/2002/7402/4",
"title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wse/2001/1399/0/00988783",
"title": "Requirements for maintaining Web access for hearing-impaired individuals",
"doi": null,
"abstractUrl": "/proceedings-article/wse/2001/00988783/12OmNwpGgPW",
"parentPublication": {
"id": "proceedings/wse/2001/1399/0",
"title": "Proceedings 3rd International Workshop on Web Site Evolution. WSE 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a725",
"title": "A Big-Data Informed Model Approach to Hearing Health Policy Decision Making",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a725/19RSwBbZGxO",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/contie/2021/0821/0/082100a180",
"title": "Educational inclusion through ICT for hearing impaired students in EIS",
"doi": null,
"abstractUrl": "/proceedings-article/contie/2021/082100a180/1B12nQPdpU4",
"parentPublication": {
"id": "proceedings/contie/2021/0821/0",
"title": "2021 4th International Conference on Inclusive Technology and Education (CONTIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cncit/2022/5296/0/529600a014",
"title": "A Study of Hearing Aid Fitting Formulas Based on Polynomial Approximation",
"doi": null,
"abstractUrl": "/proceedings-article/cncit/2022/529600a014/1GNuyw8GC76",
"parentPublication": {
"id": "proceedings/cncit/2022/5296/0",
"title": "2022 International Conference on Networks, Communications and Information Technology (CNCIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/services/2019/3851/0/385100a307",
"title": "Improving Hearing Healthcare with Big Data Analytics of Real-Time Hearing Aid Data",
"doi": null,
"abstractUrl": "/proceedings-article/services/2019/385100a307/1cTIczQh5vi",
"parentPublication": {
"id": "proceedings/services/2019/3851/2642-939X",
"title": "2019 IEEE World Congress on Services (SERVICES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csii-bcd/2017/3302/0/3302a053",
"title": "Voice Recognition and Information Transmission System for Hearing Impaired People",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csii-bcd/2017/3302a053/1cdOxOiVQZi",
"parentPublication": {
"id": "proceedings/acit-csii-bcd/2017/3302/0",
"title": "2017 5th Intl Conf on Applied Computing and Information Technology/4th Intl Conf on Computational Science/Intelligence and Applied Informatics/2nd Intl Conf on Big Data, Cloud Computing, Data Science (ACIT-CSII-BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyjLoSc",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzahcka",
"doi": "10.1109/FIE.2015.7344327",
"title": "Enhancing the educational experience for deaf and hard of hearing students in software engineering",
"normalizedTitle": "Enhancing the educational experience for deaf and hard of hearing students in software engineering",
"abstract": "Software engineering is largely a communication-driven, team-oriented discipline. There are numerous hurdles for ensuring proper communication and interaction between all project stakeholders, including physical, technological, and cultural barriers. These obstructions not only affect software engineering in industry, but in academia as well. One possible issue that is often overlooked in software engineering education is how to best educate Deaf and hard-of-hearing (Deaf/HoH) students, and how to fully engage them in the classroom. In this paper, we present our experiences in teaching software engineering to Deaf/HoH students. In the classroom, these students work very closely in activities and on project teams with their hearing peers. We also present recommendations for creating a more robust software engineering educational experience for not only Deaf/HoH students, but for hearing students as well. We encourage instructors not only in software engineering programs, but in other computing disciplines to consider our recommendations and observations in order to enhance the educational experience for all students in the classroom, whether Deaf/HoH or hearing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Software engineering is largely a communication-driven, team-oriented discipline. There are numerous hurdles for ensuring proper communication and interaction between all project stakeholders, including physical, technological, and cultural barriers. These obstructions not only affect software engineering in industry, but in academia as well. One possible issue that is often overlooked in software engineering education is how to best educate Deaf and hard-of-hearing (Deaf/HoH) students, and how to fully engage them in the classroom. In this paper, we present our experiences in teaching software engineering to Deaf/HoH students. In the classroom, these students work very closely in activities and on project teams with their hearing peers. We also present recommendations for creating a more robust software engineering educational experience for not only Deaf/HoH students, but for hearing students as well. We encourage instructors not only in software engineering programs, but in other computing disciplines to consider our recommendations and observations in order to enhance the educational experience for all students in the classroom, whether Deaf/HoH or hearing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Software engineering is largely a communication-driven, team-oriented discipline. There are numerous hurdles for ensuring proper communication and interaction between all project stakeholders, including physical, technological, and cultural barriers. These obstructions not only affect software engineering in industry, but in academia as well. One possible issue that is often overlooked in software engineering education is how to best educate Deaf and hard-of-hearing (Deaf/HoH) students, and how to fully engage them in the classroom. In this paper, we present our experiences in teaching software engineering to Deaf/HoH students. In the classroom, these students work very closely in activities and on project teams with their hearing peers. We also present recommendations for creating a more robust software engineering educational experience for not only Deaf/HoH students, but for hearing students as well. We encourage instructors not only in software engineering programs, but in other computing disciplines to consider our recommendations and observations in order to enhance the educational experience for all students in the classroom, whether Deaf/HoH or hearing.",
"fno": "07344327",
"keywords": [
"Auditory System",
"Software Engineering",
"Stakeholders",
"Industries",
"Education",
"Robustness",
"Assistive Technology"
],
"authors": [
{
"affiliation": "Software Engineering Department, Rochester Institute of Technology, USA",
"fullName": "Daniel E. Krutz",
"givenName": "Daniel E.",
"surname": "Krutz",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Software Engineering Department, Rochester Institute of Technology, USA",
"fullName": "Samuel A. Malachowsky",
"givenName": "Samuel A.",
"surname": "Malachowsky",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Software Engineering Department, Rochester Institute of Technology, USA",
"fullName": "Scott D. Jones",
"givenName": "Scott D.",
"surname": "Jones",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "St. Mary's School for the Deaf, Buffalo, NY, USA",
"fullName": "Jayme A. Kaplan",
"givenName": "Jayme A.",
"surname": "Kaplan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-9",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-8454-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07344326",
"articleId": "12OmNzXFoDi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07344328",
"articleId": "12OmNxeut5g",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pci/2012/4825/0/4825a369",
"title": "Secure Wireless Infrastructures and Mobile Learning for Deaf and Hard-of-Hearing Students",
"doi": null,
"abstractUrl": "/proceedings-article/pci/2012/4825a369/12OmNBpVPSE",
"parentPublication": {
"id": "proceedings/pci/2012/4825/0",
"title": "2012 16th Panhellenic Conference on Informatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/respect/2015/0151/0/07296524",
"title": "On the benefits of specialized settings for deaf and hard of hearing students in computing",
"doi": null,
"abstractUrl": "/proceedings-article/respect/2015/07296524/12OmNrJAeat",
"parentPublication": {
"id": "proceedings/respect/2015/0151/0",
"title": "2015 Research in Equity and Sustained Participation in Engineering, Computing, and Technology (RESPECT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2009/3763/0/3763a279",
"title": "Accessible Multimodal Web Pages with Sign Language Translations for Deaf and Hard of Hearing Users",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2009/3763a279/12OmNvkYxaJ",
"parentPublication": {
"id": "proceedings/dexa/2009/3763/0",
"title": "2009 20th International Workshop on Database and Expert Systems Application. DEXA 2009",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2013/5261/0/06685176",
"title": "Pedagogical application of RFID technology for hard of hearing children during mathematics and science learning activities",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2013/06685176/12OmNwCJOXD",
"parentPublication": {
"id": "proceedings/fie/2013/5261/0",
"title": "2013 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916982",
"title": "Evaluating Multi-Modal Speech Visualization Application for Deaf and Hard of Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916982/12OmNz61cWD",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2018/9120/0/08612893",
"title": "Social Networking Sites and Deaf and Hard of Hearing People in Jordan: Characteristics and Preferences",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2018/08612893/17D45VtKizp",
"parentPublication": {
"id": "proceedings/aiccsa/2018/9120/0",
"title": "2018 IEEE/ACS 15th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a284",
"title": "Online Library for Deaf and Hard of Hearing Students",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a284/1FUUiRIccmY",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a431",
"title": "Augmenting Communication Between Hearing Parents and Deaf Children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a431/1gyslDmdEJy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2019/1746/0/09028376",
"title": "A Metaphorical Debugger Model to support deaf and hearing impaired in Java programming learning",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2019/09028376/1ifftxLNOpy",
"parentPublication": {
"id": "proceedings/fie/2019/1746/0",
"title": "2019 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a588",
"title": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a588/1tuAGAPl3Tq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisK",
"title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"acronym": "sbgames",
"groupId": "1800056",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45WaTkny",
"doi": "10.1109/SBGAMES.2018.00034",
"title": "Representing Sentiment Using Colors and Particles to Provide Accessibility for Deaf and Hard of Hearing Players",
"normalizedTitle": "Representing Sentiment Using Colors and Particles to Provide Accessibility for Deaf and Hard of Hearing Players",
"abstract": "Providing game accessibility to deaf or hard of hearing players is still an issue in the game industry. The most common access feature developed to provide accessibility for players with this type of disability is to implement closed captions and other textual information to detail sentiments. This paper presents another approach to this problem. Based on the study of colors, this work uses the particle system provided by Unity engine and combines these elements into different scenarios where colorful particles propagate in different directions and with different speeds and forms. This paper proposes that these scenarios can express different sentiments. The proposal evaluation was performed through user interviews. The results present a set of scenarios that can be used by game designers to visually express the intended sentiment of the music or game environment sounds and a Unity plugin that support this task.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Providing game accessibility to deaf or hard of hearing players is still an issue in the game industry. The most common access feature developed to provide accessibility for players with this type of disability is to implement closed captions and other textual information to detail sentiments. This paper presents another approach to this problem. Based on the study of colors, this work uses the particle system provided by Unity engine and combines these elements into different scenarios where colorful particles propagate in different directions and with different speeds and forms. This paper proposes that these scenarios can express different sentiments. The proposal evaluation was performed through user interviews. The results present a set of scenarios that can be used by game designers to visually express the intended sentiment of the music or game environment sounds and a Unity plugin that support this task.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Providing game accessibility to deaf or hard of hearing players is still an issue in the game industry. The most common access feature developed to provide accessibility for players with this type of disability is to implement closed captions and other textual information to detail sentiments. This paper presents another approach to this problem. Based on the study of colors, this work uses the particle system provided by Unity engine and combines these elements into different scenarios where colorful particles propagate in different directions and with different speeds and forms. This paper proposes that these scenarios can express different sentiments. The proposal evaluation was performed through user interviews. The results present a set of scenarios that can be used by game designers to visually express the intended sentiment of the music or game environment sounds and a Unity plugin that support this task.",
"fno": "960500a221",
"keywords": [
"Computer Games",
"Ergonomics",
"Handicapped Aids",
"Music",
"Game Industry",
"Common Access Feature",
"Closed Captions",
"Textual Information",
"Particle System",
"Colorful Particles",
"Game Designers",
"Intended Sentiment",
"Game Accessibility",
"Sentiment Representation",
"Hard Of Hearing Players",
"Unity Engine",
"Games",
"Auditory System",
"Visualization",
"Engines",
"Music",
"Deafness",
"Ear",
"Accessibility",
"Deaf",
"Hard Of Hearing",
"Game Component"
],
"authors": [
{
"affiliation": null,
"fullName": "João Marcos Epifânio da Silva",
"givenName": "João Marcos Epifânio",
"surname": "da Silva",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Arthur de Castro Callado",
"givenName": "Arthur de Castro",
"surname": "Callado",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Paulyne Matthews Jucá",
"givenName": "Paulyne Matthews",
"surname": "Jucá",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "sbgames",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "221-22109",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-9605-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "960500a212",
"articleId": "17D45XH89q7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "960500a231",
"articleId": "17D45XreC5E",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/caapwd/1992/2730/0/00217396",
"title": "A computer sound cue indicator for hearing impaired people",
"doi": null,
"abstractUrl": "/proceedings-article/caapwd/1992/00217396/12OmNrMHOi5",
"parentPublication": {
"id": "proceedings/caapwd/1992/2730/0",
"title": "Proceedings of the Johns Hopkins National Search for Computing Applications to Assist Persons with Disabilities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wse/2001/1399/0/00988784",
"title": "The case for the use of plain English to increase web accessibility",
"doi": null,
"abstractUrl": "/proceedings-article/wse/2001/00988784/12OmNwK7o7C",
"parentPublication": {
"id": "proceedings/wse/2001/1399/0",
"title": "Proceedings 3rd International Workshop on Web Site Evolution. WSE 2001",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2011/4648/0/4648a053",
"title": "An Analysis of Information Conveyed through Audio in an FPS Game and Its Impact on Deaf Players Experience",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2011/4648a053/12OmNwwuDPp",
"parentPublication": {
"id": "proceedings/sbgames/2011/4648/0",
"title": "2011 Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2015/7204/0/7204a099",
"title": "A 3D Simulation System for Emergency Evacuation in Offshore Platforms",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2015/7204a099/12OmNyRg4EF",
"parentPublication": {
"id": "proceedings/svr/2015/7204/0",
"title": "2015 XVII Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2018/7744/0/774400b055",
"title": "The Influence of Dunqiang Qigong on Hearing and Blood Sugar Lever of Patients with Diabetes Deafness",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2018/774400b055/17D45XDIXWx",
"parentPublication": {
"id": "proceedings/itme/2018/7744/0",
"title": "2018 9th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998298",
"title": "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998298/1hrXce2Kmhq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998401",
"title": "Superhuman Hearing - Virtual Prototyping of Artificial Hearing: a Case Study on Interactions and Acoustic Beamforming",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998401/1hrXgAAK6NW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2019/1746/0/09028376",
"title": "A Metaphorical Debugger Model to support deaf and hearing impaired in Java programming learning",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2019/09028376/1ifftxLNOpy",
"parentPublication": {
"id": "proceedings/fie/2019/1746/0",
"title": "2019 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090404",
"title": "Addressing Deaf or Hard-of-Hearing People in Avatar-Based Mixed Reality Collaboration Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090404/1jIxucASqv6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2021/0189/0/018900a191",
"title": "UtopicSense: a tool to support the use of synesthesia as an assistive resource in 2D games",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2021/018900a191/1zusqttQ6R2",
"parentPublication": {
"id": "proceedings/sbgames/2021/0189/0",
"title": "2021 20th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17QjJbuquxq",
"title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"acronym": "isspit",
"groupId": "1001026",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17QjJcN2ztN",
"doi": "10.1109/ISSPIT.2018.8642632",
"title": "Deep Learning-Based Hazardous Sound Classification for the Hard of Hearing and Deaf",
"normalizedTitle": "Deep Learning-Based Hazardous Sound Classification for the Hard of Hearing and Deaf",
"abstract": "The hard of hearing or deaf can only access limited auditory information in dangerous situations. Therefore, development of a system for sensing hazardous auditory information may be of great help to them. However, such systems have focused on effective signal transduction when a hazardous sound is detected, and the classification of hazardous sounds has been less investigated. The present study was conducted to classify sounds by using Recurrent Neural Network (RNN)-based models, Convolutional Neural Network (CNN)-based models, the combination of the two models, and ensemble models prepared by combining various models. The experimental results showed that the accuracy of the 3-layer Long Short-Term Memory (LSTM) model was 97.63% and that of the ensemble model was 98.00%. As an attempt at real-life application of the developed model, a warning system was prepared by using Raspberry Pi and a vibrator.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The hard of hearing or deaf can only access limited auditory information in dangerous situations. Therefore, development of a system for sensing hazardous auditory information may be of great help to them. However, such systems have focused on effective signal transduction when a hazardous sound is detected, and the classification of hazardous sounds has been less investigated. The present study was conducted to classify sounds by using Recurrent Neural Network (RNN)-based models, Convolutional Neural Network (CNN)-based models, the combination of the two models, and ensemble models prepared by combining various models. The experimental results showed that the accuracy of the 3-layer Long Short-Term Memory (LSTM) model was 97.63% and that of the ensemble model was 98.00%. As an attempt at real-life application of the developed model, a warning system was prepared by using Raspberry Pi and a vibrator.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The hard of hearing or deaf can only access limited auditory information in dangerous situations. Therefore, development of a system for sensing hazardous auditory information may be of great help to them. However, such systems have focused on effective signal transduction when a hazardous sound is detected, and the classification of hazardous sounds has been less investigated. The present study was conducted to classify sounds by using Recurrent Neural Network (RNN)-based models, Convolutional Neural Network (CNN)-based models, the combination of the two models, and ensemble models prepared by combining various models. The experimental results showed that the accuracy of the 3-layer Long Short-Term Memory (LSTM) model was 97.63% and that of the ensemble model was 98.00%. As an attempt at real-life application of the developed model, a warning system was prepared by using Raspberry Pi and a vibrator.",
"fno": "08642632",
"keywords": [
"Acoustic Signal Processing",
"Convolutional Neural Nets",
"Handicapped Aids",
"Hearing",
"Learning Artificial Intelligence",
"Recurrent Neural Nets",
"Signal Classification",
"Hazardous Sound Classification",
"Hazardous Auditory Information",
"Ensemble Model",
"Warning System",
"Hard Of Hearing",
"Deaf",
"Signal Transduction",
"Deep Learning",
"Recurrent Neural Network",
"Convolutional Neural Network",
"Predictive Models",
"Feature Extraction",
"Data Models",
"Hardware",
"Auditory System",
"Artificial Neural Networks",
"Automobiles",
"Environmental Sound",
"Sound Classification",
"Assistive Technology"
],
"authors": [
{
"affiliation": "Applied Acoustics Lab., Korea Science Academy of KAIST Busan, Republic of Korea",
"fullName": "Hyewon Suh",
"givenName": "Hyewon",
"surname": "Suh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Applied Acoustics Lab., Korea Science Academy of KAIST Busan, Republic of Korea",
"fullName": "Seungwook Seo",
"givenName": "Seungwook",
"surname": "Seo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Applied Acoustics Lab., Korea Science Academy of KAIST Busan, Republic of Korea",
"fullName": "Young H. Kim",
"givenName": "Young H.",
"surname": "Kim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "isspit",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-12-01T00:00:00",
"pubType": "proceedings",
"pages": "59-63",
"year": "2018",
"issn": "2162-7843",
"isbn": "978-1-5386-7568-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08642782",
"articleId": "17QjJfe3aGk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08642755",
"articleId": "17QjJfcUM9S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2011/4589/0/4589a204",
"title": "Sound Zone Control in an Interactive Table System Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2011/4589a204/12OmNAlNiyH",
"parentPublication": {
"id": "proceedings/ism/2011/4589/0",
"title": "2011 IEEE International Symposium on Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890097",
"title": "Integrating Multimodal Information about Surface Texture via a Probe: Relative Contributions of Haptic and Touch-Produced Sound Sources",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890097/12OmNBBzohw",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2017/1710/0/1710a570",
"title": "HEAR?INFO: A Modern Mobile-Web Platform Addressed to Hard-of-Hearing Elderly Individuals",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2017/1710a570/12OmNrJiCGD",
"parentPublication": {
"id": "proceedings/cbms/2017/1710/0",
"title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2015/9548/0/9548a525",
"title": "In-Situ Measurement and Prediction of Hearing Aid Outcomes Using Mobile Phones",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2015/9548a525/12OmNwoPtAi",
"parentPublication": {
"id": "proceedings/ichi/2015/9548/0",
"title": "2015 International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2018/9605/0/960500a221",
"title": "Representing Sentiment Using Colors and Particles to Provide Accessibility for Deaf and Hard of Hearing Players",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2018/960500a221/17D45WaTkny",
"parentPublication": {
"id": "proceedings/sbgames/2018/9605/0",
"title": "2018 17th Brazilian Symposium on Computer Games and Digital Entertainment (SBGames)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a284",
"title": "Online Library for Deaf and Hard of Hearing Students",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a284/1FUUiRIccmY",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998298",
"title": "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998298/1hrXce2Kmhq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2019/5686/0/568600a146",
"title": "Machine Learning Based Detection of Hearing Loss Using Auditory Perception Responses",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2019/568600a146/1j9xAR4e1Gw",
"parentPublication": {
"id": "proceedings/sitis/2019/5686/0",
"title": "2019 15th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090404",
"title": "Addressing Deaf or Hard-of-Hearing People in Avatar-Based Mixed Reality Collaboration Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090404/1jIxucASqv6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a582",
"title": "Head Up Visualization of Spatial Sound Sources in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a582/1tuAPlsZnMc",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuAGAPl3Tq",
"doi": "10.1109/VR50410.2021.00084",
"title": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People",
"normalizedTitle": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People",
"abstract": "Information visualization techniques play an important role in Virtual Reality (VR) because they improve task performance, support cognitive processes, and eventually increase the feeling of immersion. Deaf and Hard-of-Hearing (DHH) persons have special needs for information presentation because they feel and perceive VR environments differently. Therefore, it is necessary to pay attention to requirements about presenting information in VR for this group of users. Previous research showed that adding special features and using haptic methods helps DHH persons to do VR tasks better. In this paper, we propose a novel Omni-directional particle visualization method and also evaluate multi-modal presentation methods in VR for DHH persons, such as audio, visual, haptic, and a combination of them (AVH). Additionally, we compare the results with the results of persons without hearing problems. The methods for information presentation in our study focus on spatial object localization in VR. Our user studies show that both DHH persons and persons without hearing problems were able to do VR tasks significantly faster using AVH. Also, we found out that DHH persons can do visual-related VR tasks faster than persons without hearing problems by using our new proposed visualization method. Our results suggest that the benefits of using audio among persons without hearing problems and the benefits of using vision among DHH persons cause an interesting balance in the results of AVH between both groups. Finally, our qualitative and quantitative evaluation indicates that both groups of participants preferred and enjoyed AVH modality more than other modalities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Information visualization techniques play an important role in Virtual Reality (VR) because they improve task performance, support cognitive processes, and eventually increase the feeling of immersion. Deaf and Hard-of-Hearing (DHH) persons have special needs for information presentation because they feel and perceive VR environments differently. Therefore, it is necessary to pay attention to requirements about presenting information in VR for this group of users. Previous research showed that adding special features and using haptic methods helps DHH persons to do VR tasks better. In this paper, we propose a novel Omni-directional particle visualization method and also evaluate multi-modal presentation methods in VR for DHH persons, such as audio, visual, haptic, and a combination of them (AVH). Additionally, we compare the results with the results of persons without hearing problems. The methods for information presentation in our study focus on spatial object localization in VR. Our user studies show that both DHH persons and persons without hearing problems were able to do VR tasks significantly faster using AVH. Also, we found out that DHH persons can do visual-related VR tasks faster than persons without hearing problems by using our new proposed visualization method. Our results suggest that the benefits of using audio among persons without hearing problems and the benefits of using vision among DHH persons cause an interesting balance in the results of AVH between both groups. Finally, our qualitative and quantitative evaluation indicates that both groups of participants preferred and enjoyed AVH modality more than other modalities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Information visualization techniques play an important role in Virtual Reality (VR) because they improve task performance, support cognitive processes, and eventually increase the feeling of immersion. Deaf and Hard-of-Hearing (DHH) persons have special needs for information presentation because they feel and perceive VR environments differently. Therefore, it is necessary to pay attention to requirements about presenting information in VR for this group of users. Previous research showed that adding special features and using haptic methods helps DHH persons to do VR tasks better. In this paper, we propose a novel Omni-directional particle visualization method and also evaluate multi-modal presentation methods in VR for DHH persons, such as audio, visual, haptic, and a combination of them (AVH). Additionally, we compare the results with the results of persons without hearing problems. The methods for information presentation in our study focus on spatial object localization in VR. Our user studies show that both DHH persons and persons without hearing problems were able to do VR tasks significantly faster using AVH. Also, we found out that DHH persons can do visual-related VR tasks faster than persons without hearing problems by using our new proposed visualization method. Our results suggest that the benefits of using audio among persons without hearing problems and the benefits of using vision among DHH persons cause an interesting balance in the results of AVH between both groups. Finally, our qualitative and quantitative evaluation indicates that both groups of participants preferred and enjoyed AVH modality more than other modalities.",
"fno": "255600a588",
"keywords": [
"Cognition",
"Data Visualisation",
"Handicapped Aids",
"Haptic Interfaces",
"Hearing",
"User Interfaces",
"Virtual Reality",
"DHH Persons",
"Hearing Problems",
"Visual Related VR Tasks",
"Multimodal Spatial Object Localization",
"Virtual Reality",
"Information Visualization Techniques",
"Information Presentation",
"VR Environments",
"Adding Special Features",
"Using Haptic Methods",
"Novel Omni Directional Particle Visualization Method",
"Multimodal Presentation Methods",
"Location Awareness",
"Visualization",
"Three Dimensional Displays",
"Cognitive Processes",
"Auditory System",
"Virtual Reality",
"User Interfaces"
],
"authors": [
{
"affiliation": "Institute of Visual Computing and Human-Centered Technology, Vienna University of Technology,Vienna,Austria",
"fullName": "Mohammadreza Mirzaei",
"givenName": "Mohammadreza",
"surname": "Mirzaei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Aarhus University,Department of Computer Science,Denmark",
"fullName": "Peter Kán",
"givenName": "Peter",
"surname": "Kán",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Visual Computing and Human-Centered Technology, Vienna University of Technology,Vienna,Austria",
"fullName": "Hannes Kaufmann",
"givenName": "Hannes",
"surname": "Kaufmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "588-596",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuAFRqEE5G",
"name": "pvr202118380-09417786s1-mm_255600a588.zip",
"size": "135 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417786s1-mm_255600a588.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600a582",
"articleId": "1tuAPlsZnMc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a597",
"articleId": "1tuBa9r3ZYc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2017/3050/0/08217846",
"title": "Exploratory textual analysis of consumer health languages for people who are D/deaf and hard of hearing",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2017/08217846/12OmNyv7m2k",
"parentPublication": {
"id": "proceedings/bibm/2017/3050/0",
"title": "2017 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acit-csi/2016/4871/0/07916982",
"title": "Evaluating Multi-Modal Speech Visualization Application for Deaf and Hard of Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/acit-csi/2016/07916982/12OmNz61cWD",
"parentPublication": {
"id": "proceedings/acit-csi/2016/4871/0",
"title": "2016 4th Intl. Conf. on Applied Computing and Information Technology (ACIT), 3rd Intl. Conf. on Computational Science/Intelligence and Applied Informatics (CSII), and 1st Intl. Conf. on Big Data, Cloud Computing, Data Science & Engineering (BCD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2015/8454/0/07344327",
"title": "Enhancing the educational experience for deaf and hard of hearing students in software engineering",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2015/07344327/12OmNzahcka",
"parentPublication": {
"id": "proceedings/fie/2015/8454/0",
"title": "2015 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2018/9120/0/08612893",
"title": "Social Networking Sites and Deaf and Hard of Hearing People in Jordan: Characteristics and Preferences",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2018/08612893/17D45VtKizp",
"parentPublication": {
"id": "proceedings/aiccsa/2018/9120/0",
"title": "2018 IEEE/ACS 15th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2022/9519/0/951900a284",
"title": "Online Library for Deaf and Hard of Hearing Students",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2022/951900a284/1FUUiRIccmY",
"parentPublication": {
"id": "proceedings/icalt/2022/9519/0",
"title": "2022 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a431",
"title": "Augmenting Communication Between Hearing Parents and Deaf Children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a431/1gyslDmdEJy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998298",
"title": "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998298/1hrXce2Kmhq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090404",
"title": "Addressing Deaf or Hard-of-Hearing People in Avatar-Based Mixed Reality Collaboration Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090404/1jIxucASqv6",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a582",
"title": "Head Up Visualization of Spatial Sound Sources in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a582/1tuAPlsZnMc",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2020/0497/0/049700a213",
"title": "Design of Auxiliary Hearing Compensation System Based on Bluetooth for Deaf Children",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2020/049700a213/1vg7WQmVvCU",
"parentPublication": {
"id": "proceedings/icvrv/2020/0497/0",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1vg7AGzvxNC",
"title": "2020 International Conference on Virtual Reality and Visualization (ICVRV)",
"acronym": "icvrv",
"groupId": "1800579",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1vg7WQmVvCU",
"doi": "10.1109/ICVRV51359.2020.00051",
"title": "Design of Auxiliary Hearing Compensation System Based on Bluetooth for Deaf Children",
"normalizedTitle": "Design of Auxiliary Hearing Compensation System Based on Bluetooth for Deaf Children",
"abstract": "this paper studies a kind of hearing aid device based on Bluetooth technology and single chip microcomputer. According to the degree of hearing loss of hearing-impaired children, appropriate gain is selected to realize sound amplification, so that children with hearing impairment can hear the outside sound clearly and provide convenience for their life. This design adopts the design idea of the host and slave, the audio amplification parameters in the host are wireless transmitted to the slave through Bluetooth technology, and the audio code coding chip is given instructions. By selecting the appropriate audio signal amplification gain, the audio signal is amplified and processed, so that the hearing-impaired children can hear the external sound clearly. In this design scheme, the system hardware module design mainly includes the system control module, audio module, power module, Bluetooth communication module, etc., among which the audio module also includes the microphone input signal conditioning circuit, audio codec circuit, audio interface circuit, etc. The software platform uses the STM32F103xx firmware library provided by STMicroelectronics Company, and uses the library function provided by it and the corresponding instance to modify and debug.",
"abstracts": [
{
"abstractType": "Regular",
"content": "this paper studies a kind of hearing aid device based on Bluetooth technology and single chip microcomputer. According to the degree of hearing loss of hearing-impaired children, appropriate gain is selected to realize sound amplification, so that children with hearing impairment can hear the outside sound clearly and provide convenience for their life. This design adopts the design idea of the host and slave, the audio amplification parameters in the host are wireless transmitted to the slave through Bluetooth technology, and the audio code coding chip is given instructions. By selecting the appropriate audio signal amplification gain, the audio signal is amplified and processed, so that the hearing-impaired children can hear the external sound clearly. In this design scheme, the system hardware module design mainly includes the system control module, audio module, power module, Bluetooth communication module, etc., among which the audio module also includes the microphone input signal conditioning circuit, audio codec circuit, audio interface circuit, etc. The software platform uses the STM32F103xx firmware library provided by STMicroelectronics Company, and uses the library function provided by it and the corresponding instance to modify and debug.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "this paper studies a kind of hearing aid device based on Bluetooth technology and single chip microcomputer. According to the degree of hearing loss of hearing-impaired children, appropriate gain is selected to realize sound amplification, so that children with hearing impairment can hear the outside sound clearly and provide convenience for their life. This design adopts the design idea of the host and slave, the audio amplification parameters in the host are wireless transmitted to the slave through Bluetooth technology, and the audio code coding chip is given instructions. By selecting the appropriate audio signal amplification gain, the audio signal is amplified and processed, so that the hearing-impaired children can hear the external sound clearly. In this design scheme, the system hardware module design mainly includes the system control module, audio module, power module, Bluetooth communication module, etc., among which the audio module also includes the microphone input signal conditioning circuit, audio codec circuit, audio interface circuit, etc. The software platform uses the STM32F103xx firmware library provided by STMicroelectronics Company, and uses the library function provided by it and the corresponding instance to modify and debug.",
"fno": "049700a213",
"keywords": [
"Audio Coding",
"Bluetooth",
"Codecs",
"Firmware",
"Handicapped Aids",
"Hearing",
"Hearing Aids",
"Low Power Electronics",
"Microcontrollers",
"Microphones",
"Microprocessor Chips",
"Radio Equipment",
"Signal Conditioning Circuits",
"Sound Amplification",
"Hearing Impairment",
"Slave",
"Audio Amplification Parameters",
"Bluetooth Technology",
"Audio Code Coding Chip",
"Appropriate Audio Signal Amplification Gain",
"Hearing Impaired Children",
"External Sound",
"Design Scheme",
"System Hardware Module Design",
"System Control Module",
"Audio Module",
"Bluetooth Communication Module",
"Microphone Input Signal Conditioning Circuit",
"Audio Codec Circuit",
"Audio Interface Circuit",
"Auxiliary Hearing Compensation System",
"Deaf Children",
"Hearing Aid Device",
"Single Chip Microcomputer",
"Hearing Loss",
"Appropriate Gain",
"Wireless Communication",
"Bluetooth",
"Prototypes",
"Multichip Modules",
"Auditory System",
"Virtual Reality",
"Software",
"Bluetooth",
"Audio Codec",
"Audio Amplifier",
"STM 32 F 103",
"VS 1053 B"
],
"authors": [
{
"affiliation": "College of Cyberspace Security Changchun University,ChangChun,China",
"fullName": "Dawei Xu",
"givenName": "Dawei",
"surname": "Xu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Cyberspace Security Changchun University,ChangChun,China",
"fullName": "Jiaqi Gao",
"givenName": "Jiaqi",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Cyberspace Security Changchun University,ChangChun,China",
"fullName": "Fudong Wu",
"givenName": "Fudong",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Electronic Information Engineering College Changchun University,ChangChun,China",
"fullName": "Lijuan Shi",
"givenName": "Lijuan",
"surname": "Shi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Cyberspace Security Changchun University,ChangChun,China",
"fullName": "Yijie She",
"givenName": "Yijie",
"surname": "She",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of computer science and technology Changchun University,ChangChun,China",
"fullName": "Zhao Jian",
"givenName": "Zhao",
"surname": "Jian",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icvrv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "213-217",
"year": "2020",
"issn": "2375-141X",
"isbn": "978-1-6654-0497-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "049700a209",
"articleId": "1vg7WJJ3xTi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "049700a218",
"articleId": "1vg7X9XOGl2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/lcn/2010/8387/0/p356hoene",
"title": "Optimally using the Bluetooth subband codec",
"doi": null,
"abstractUrl": "/proceedings-article/lcn/2010/p356hoene/12OmNAS9zPj",
"parentPublication": {
"id": "proceedings/lcn/2010/8387/0",
"title": "IEEE Local Computer Network Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444757",
"title": "Simulating hearing loss in virtual training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444757/12OmNAYoKvN",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2013/5009/0/5009a212",
"title": "Development of Speech Training Aid System for Hearing-Impaired Children",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a212/12OmNB9t6ma",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2016/5510/0/07881327",
"title": "6dB SNR Improved 64 Channel Hearing Aid Development Using CSR8675 Bluetooth Chip",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881327/12OmNvSbBHM",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlsid/2016/8700/0/8700a579",
"title": "FPGA-based Design of a Hearing Aid with Frequency Response Selection through Audio Input",
"doi": null,
"abstractUrl": "/proceedings-article/vlsid/2016/8700a579/12OmNvTBB7v",
"parentPublication": {
"id": "proceedings/vlsid/2016/8700/0",
"title": "2016 29th International Conference on VLSI Design and 2016 15th International Conference on Embedded Systems (VLSID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2018/5713/0/08577076",
"title": "The 3D Tune-In Toolkit – 3D audio spatialiser, hearing loss and hearing aid simulations",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2018/08577076/17D45VsBU4f",
"parentPublication": {
"id": "proceedings/sive/2018/5713/0",
"title": "2018 IEEE 4th VR Workshop on Sonic Interactions for Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a431",
"title": "Augmenting Communication Between Hearing Parents and Deaf Children",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a431/1gyslDmdEJy",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998298",
"title": "EarVR: Using Ear Haptics in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998298/1hrXce2Kmhq",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise/2020/2261/0/226100a241",
"title": "The application design of hearing aid parameters auto adaptive system for hearing impaired children based on android terminal",
"doi": null,
"abstractUrl": "/proceedings-article/icise/2020/226100a241/1tnYlALODdu",
"parentPublication": {
"id": "proceedings/icise/2020/2261/0",
"title": "2020 International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a588",
"title": "Multi-modal Spatial Object Localization in Virtual Reality for Deaf and Hard-of-Hearing People",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a588/1tuAGAPl3Tq",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvAiSpZ",
"title": "2015 IEEE Virtual Reality (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx5Yvk2",
"doi": "10.1109/VR.2015.7223322",
"title": "Perceiving mass in mixed reality through pseudo-haptic rendering of Newton's third law",
"normalizedTitle": "Perceiving mass in mixed reality through pseudo-haptic rendering of Newton's third law",
"abstract": "In mixed reality, real objects can be used to interact with virtual objects. However, unlike in the real world, real objects do not encounter any opposite reaction force when pushing against virtual objects. The lack of reaction force during manipulation prevents users from perceiving the mass of virtual objects. Although this could be addressed by equipping real objects with force-feedback devices, such a solution remains complex and impractical. In this work, we present a technique to produce an illusion of mass without any active force-feedback mechanism. This is achieved by simulating the effects of this reaction force in a purely visual way. A first study demonstrates that our technique indeed allows users to differentiate light virtual objects from heavy virtual objects. In addition, it shows that the illusion is immediately effective, with no prior training. In a second study, we measure the lowest mass difference (JND) that can be perceived with this technique. The effectiveness and ease of implementation of our solution provides an opportunity to enhance mixed reality interaction at no additional cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In mixed reality, real objects can be used to interact with virtual objects. However, unlike in the real world, real objects do not encounter any opposite reaction force when pushing against virtual objects. The lack of reaction force during manipulation prevents users from perceiving the mass of virtual objects. Although this could be addressed by equipping real objects with force-feedback devices, such a solution remains complex and impractical. In this work, we present a technique to produce an illusion of mass without any active force-feedback mechanism. This is achieved by simulating the effects of this reaction force in a purely visual way. A first study demonstrates that our technique indeed allows users to differentiate light virtual objects from heavy virtual objects. In addition, it shows that the illusion is immediately effective, with no prior training. In a second study, we measure the lowest mass difference (JND) that can be perceived with this technique. The effectiveness and ease of implementation of our solution provides an opportunity to enhance mixed reality interaction at no additional cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In mixed reality, real objects can be used to interact with virtual objects. However, unlike in the real world, real objects do not encounter any opposite reaction force when pushing against virtual objects. The lack of reaction force during manipulation prevents users from perceiving the mass of virtual objects. Although this could be addressed by equipping real objects with force-feedback devices, such a solution remains complex and impractical. In this work, we present a technique to produce an illusion of mass without any active force-feedback mechanism. This is achieved by simulating the effects of this reaction force in a purely visual way. A first study demonstrates that our technique indeed allows users to differentiate light virtual objects from heavy virtual objects. In addition, it shows that the illusion is immediately effective, with no prior training. In a second study, we measure the lowest mass difference (JND) that can be perceived with this technique. The effectiveness and ease of implementation of our solution provides an opportunity to enhance mixed reality interaction at no additional cost.",
"fno": "07223322",
"keywords": [
"Cloning",
"Force",
"Springs",
"Visualization",
"Virtual Reality",
"Haptic Interfaces",
"Damping",
"Pseudo Haptics",
"Mass Perception",
"Physically Based Simulation",
"Mixed Reality"
],
"authors": [
{
"affiliation": "LIMSI-CNRS Univ. Paris-Sud",
"fullName": "Paul Issartel",
"givenName": "Paul",
"surname": "Issartel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Mathematics, Florida State University",
"fullName": "Florimond Gueniat",
"givenName": "Florimond",
"surname": "Gueniat",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "INRIA, Univ. Grenoble Alpes, LIG, CNRS, LIG",
"fullName": "Sabine Coquillart",
"givenName": "Sabine",
"surname": "Coquillart",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "LIMSI-CNRS, Univ. Paris-Sud",
"fullName": "Mehdi Ammi",
"givenName": "Mehdi",
"surname": "Ammi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-03-01T00:00:00",
"pubType": "proceedings",
"pages": "41-46",
"year": "2015",
"issn": null,
"isbn": "978-1-4799-1727-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07223321",
"articleId": "12OmNCeaPWO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07223323",
"articleId": "12OmNzZmZu4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2012/4814/0/4814a157",
"title": "Stable Dynamic Algorithm Based on Virtual Coupling for 6-DOF Haptic Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2012/4814a157/12OmNqJ8tk6",
"parentPublication": {
"id": "proceedings/cw/2012/4814/0",
"title": "2012 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145247",
"title": "Development of Fingertip Type Non-grounding Force Feedback Display",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145247/12OmNx5Yv8J",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446403",
"title": "Effect of Electrical Stimulation Haptic Feedback on Perceptions of Softness-Hardness and Stickiness While Touching a Virtual Object",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446403/13bd1eSlytA",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446516",
"title": "Softness-Hardness and Stickiness Feedback Using Electrical Stimulation While Touching a Virtual Object",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446516/13bd1fWcuDz",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/03/06736085",
"title": "Stability of Haptic Systems with Projection-Based Force Reflection",
"doi": null,
"abstractUrl": "/journal/th/2014/03/06736085/13rRUwInv4A",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07469346",
"title": "The Effect of Global and Local Damping on the Perception of Hardness",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07469346/13rRUwcS1D8",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07452650",
"title": "Non-Colocated Kinesthetic Display Limits Compliance Discrimination in the Absence of Terminal Force Cues",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07452650/13rRUwjoNxb",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2014/01/06674293",
"title": "Grip Force Control during Virtual Object Interaction: Effect of Force Feedback, Accuracy Demands, and Training",
"doi": null,
"abstractUrl": "/journal/th/2014/01/06674293/13rRUxAASW5",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998563",
"title": "Pseudo-Haptic Display of Mass and Mass Distribution During Object Rotation in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998563/1hx2CTjPZII",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090530",
"title": "A Just Noticeable Difference for Perceiving Virtual Surfaces through Haptic Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090530/1jIxtOsYn16",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNz5JC3u",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"acronym": "whc",
"groupId": "1001635",
"volume": "0",
"displayVolume": "0",
"year": "2007",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxFJXRu",
"doi": "10.1109/WHC.2007.48",
"title": "Evaluation of Human Performance with Kinematic and Haptic Errors",
"normalizedTitle": "Evaluation of Human Performance with Kinematic and Haptic Errors",
"abstract": "In teleoperation systems, link flexion results in kinematic errors, such that the position mapping between the master motion and slave motion is not correct. For haptic feedback teleoperators, this also causes errors in the direction of force feedback to the operator. This study examines human ability to compensate for rotational kinematic/haptic errors, where the mapping between master and slave kinematics and haptics has an error of 5 degrees. Using a 2-degree-of-freedom haptic system, with a virtual environment representing the slave robot and environment, subjects performed object tracing tasks on either a square or a circle with various combinations of correct and incorrect kinematics, and correct, incorrect, and absent haptic feedback. A point-to-point targeting task was performed between each tracing task to minimize the possibility of aftereffects. The results showed no significance for using different object shapes for the tracing task or for having haptic feedback in the targeting task. Incorrect haptic feedback proved to be comparable to having correct haptic feedback under our experimental conditions",
"abstracts": [
{
"abstractType": "Regular",
"content": "In teleoperation systems, link flexion results in kinematic errors, such that the position mapping between the master motion and slave motion is not correct. For haptic feedback teleoperators, this also causes errors in the direction of force feedback to the operator. This study examines human ability to compensate for rotational kinematic/haptic errors, where the mapping between master and slave kinematics and haptics has an error of 5 degrees. Using a 2-degree-of-freedom haptic system, with a virtual environment representing the slave robot and environment, subjects performed object tracing tasks on either a square or a circle with various combinations of correct and incorrect kinematics, and correct, incorrect, and absent haptic feedback. A point-to-point targeting task was performed between each tracing task to minimize the possibility of aftereffects. The results showed no significance for using different object shapes for the tracing task or for having haptic feedback in the targeting task. Incorrect haptic feedback proved to be comparable to having correct haptic feedback under our experimental conditions",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In teleoperation systems, link flexion results in kinematic errors, such that the position mapping between the master motion and slave motion is not correct. For haptic feedback teleoperators, this also causes errors in the direction of force feedback to the operator. This study examines human ability to compensate for rotational kinematic/haptic errors, where the mapping between master and slave kinematics and haptics has an error of 5 degrees. Using a 2-degree-of-freedom haptic system, with a virtual environment representing the slave robot and environment, subjects performed object tracing tasks on either a square or a circle with various combinations of correct and incorrect kinematics, and correct, incorrect, and absent haptic feedback. A point-to-point targeting task was performed between each tracing task to minimize the possibility of aftereffects. The results showed no significance for using different object shapes for the tracing task or for having haptic feedback in the targeting task. Incorrect haptic feedback proved to be comparable to having correct haptic feedback under our experimental conditions",
"fno": "04145155",
"keywords": [
"Force Feedback",
"Haptic Interfaces",
"Robot Kinematics",
"Telerobotics",
"Human Performance Evaluation",
"Haptic Errors",
"Kinematic Errors",
"Position Mapping",
"Haptic Feedback Teleoperators",
"Force Feedback",
"2 Degree Of Freedom",
"Slave Robot",
"Point To Point Targeting",
"Humans",
"Kinematics",
"Haptic Interfaces",
"Master Slave",
"Force Feedback",
"Error Correction",
"Teleoperators",
"Virtual Environment",
"Robots",
"Shape"
],
"authors": [
{
"affiliation": "Johns Hopkins University",
"fullName": "Tomonori Yamamoto",
"givenName": "Tomonori",
"surname": "Yamamoto",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Johns Hopkins University",
"fullName": "Allison M. Okamura",
"givenName": "Allison M.",
"surname": "Okamura",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "whc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2007-03-01T00:00:00",
"pubType": "proceedings",
"pages": "78-83",
"year": "2007",
"issn": null,
"isbn": "0-7695-2738-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "27380072",
"articleId": "12OmNvmowLM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "27380084",
"articleId": "12OmNynsbCO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/achi/2009/3529/0/3529a348",
"title": "Control Concept for a Hydraulic Mobile Machine Using a Haptic Operating Device",
"doi": null,
"abstractUrl": "/proceedings-article/achi/2009/3529a348/12OmNC1GujQ",
"parentPublication": {
"id": "proceedings/achi/2009/3529/0",
"title": "International Conference on Advances in Computer-Human Interaction",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icca/2003/7777/0/01595164",
"title": "Design and Implementation of Force-feedback System via Ethernet",
"doi": null,
"abstractUrl": "/proceedings-article/icca/2003/01595164/12OmNwCJOJI",
"parentPublication": {
"id": "proceedings/icca/2003/7777/0",
"title": "4th International Conference on Control and Automation. Final Program and Book of Abstracts",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145189",
"title": "Effects of Translational and Gripping Force Feedback are Decoupled in a 4-Degree-of-Freedom Telemanipulator",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145189/12OmNwdtwdo",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145219",
"title": "Enhancing Transparency of a Position-Exchange Teleoperator",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145219/12OmNxXl5zB",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2002/1489/0/14890319",
"title": "Human Kinematic Factor for Haptic Manipulation: The Wrist to Thumb",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2002/14890319/12OmNyGtjiS",
"parentPublication": {
"id": "proceedings/haptics/2002/1489/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2010/6821/0/05444612",
"title": "Haptic system design for MRI-guided needle based prostate brachytherapy",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2010/05444612/12OmNyQYttN",
"parentPublication": {
"id": "proceedings/haptics/2010/6821/0",
"title": "2010 IEEE Haptics Symposium (Formerly known as Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2008/2005/0/04479978",
"title": "Haptic Implications of Tool Flexibility in Surgical Teleoperation",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2008/04479978/12OmNylboNh",
"parentPublication": {
"id": "proceedings/haptics/2008/2005/0",
"title": "IEEE Haptics Symposium 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/whc/2007/2738/0/04145201",
"title": "A Novel Planar 3-DOF Hard-Soft Haptic Teleoperator",
"doi": null,
"abstractUrl": "/proceedings-article/whc/2007/04145201/12OmNzXFoFa",
"parentPublication": {
"id": "proceedings/whc/2007/2738/0",
"title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798139",
"title": "Human, Virtual Human, Bump! A Preliminary Study on Haptic Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798139/1cJ157IzTri",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2020/6406/0/640600b433",
"title": "The Force Feedback and Master-Slave Teleoperation Robot for Live Working",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2020/640600b433/1x3kVaJ2Qa4",
"parentPublication": {
"id": "proceedings/icisce/2020/6406/0",
"title": "2020 7th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "13bd1eJgoia",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "13bd1fph1xN",
"doi": "10.1109/VR.2018.8446550",
"title": "AnimationVR - Interactive Controller-Based Animating in Virtual Reality",
"normalizedTitle": "AnimationVR - Interactive Controller-Based Animating in Virtual Reality",
"abstract": "Animating with keyframes gives animators a lot of control but they can be tedious and complicated to work with. Currently, different solutions try to simplify animation creation by recording the natural hand movements of the user. However, most of the solutions are bound to 2D animations [1] or suffer from a low workflow speed [2]. The proposed Unity plugin Animation Vr uses the HTC Vive system to enable the puppeteering animation technique in VR while still allowing for a fast workflow speed by utilizing the controllers of the VR system. Also, Animation Vr is written for easy integration in already existing Unity projects. The plugin was evaluated with four animation experts. The consensus was that Animation Vr increases the workflow speed while decreasing the animation precision. This tradeoff makes it useful for storyboarding in professional environments. Additionally, the plugin could improve the understanding of VR storytelling as the animators would create and instantly review the animations in the correct medium. The experts also noted the ease of use of the puppeteering technique which could enable beginners to create complex animations with little to no experience with Animation Vr. Additionally, the accessibility for animation beginners could improve the communication in animation teams between animators and directors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Animating with keyframes gives animators a lot of control but they can be tedious and complicated to work with. Currently, different solutions try to simplify animation creation by recording the natural hand movements of the user. However, most of the solutions are bound to 2D animations [1] or suffer from a low workflow speed [2]. The proposed Unity plugin Animation Vr uses the HTC Vive system to enable the puppeteering animation technique in VR while still allowing for a fast workflow speed by utilizing the controllers of the VR system. Also, Animation Vr is written for easy integration in already existing Unity projects. The plugin was evaluated with four animation experts. The consensus was that Animation Vr increases the workflow speed while decreasing the animation precision. This tradeoff makes it useful for storyboarding in professional environments. Additionally, the plugin could improve the understanding of VR storytelling as the animators would create and instantly review the animations in the correct medium. The experts also noted the ease of use of the puppeteering technique which could enable beginners to create complex animations with little to no experience with Animation Vr. Additionally, the accessibility for animation beginners could improve the communication in animation teams between animators and directors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Animating with keyframes gives animators a lot of control but they can be tedious and complicated to work with. Currently, different solutions try to simplify animation creation by recording the natural hand movements of the user. However, most of the solutions are bound to 2D animations [1] or suffer from a low workflow speed [2]. The proposed Unity plugin Animation Vr uses the HTC Vive system to enable the puppeteering animation technique in VR while still allowing for a fast workflow speed by utilizing the controllers of the VR system. Also, Animation Vr is written for easy integration in already existing Unity projects. The plugin was evaluated with four animation experts. The consensus was that Animation Vr increases the workflow speed while decreasing the animation precision. This tradeoff makes it useful for storyboarding in professional environments. Additionally, the plugin could improve the understanding of VR storytelling as the animators would create and instantly review the animations in the correct medium. The experts also noted the ease of use of the puppeteering technique which could enable beginners to create complex animations with little to no experience with Animation Vr. Additionally, the accessibility for animation beginners could improve the communication in animation teams between animators and directors.",
"fno": "08446550",
"keywords": [
"Computer Animation",
"Virtual Reality",
"Animation Creation",
"Unity Plugin Animation Vr",
"Puppeteering Animation Technique",
"VR System",
"Animation Precision",
"VR Storytelling",
"Complex Animations",
"Virtual Reality",
"Animation VR",
"Interactive Controller Based Animating",
"Animation",
"Virtual Reality",
"Interviews",
"Robots",
"Two Dimensional Displays",
"Indexes",
"Programming",
"Human Centered Computing Human Computer Interaction HCI Interactive Systems And Tools User Interface Programming"
],
"authors": [
{
"affiliation": "University of Hamburg",
"fullName": "Daniel Vogel",
"givenName": "Daniel",
"surname": "Vogel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hamburg",
"fullName": "Paul Lubos",
"givenName": "Paul",
"surname": "Lubos",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hamburg",
"fullName": "Frank Steinicke",
"givenName": "Frank",
"surname": "Steinicke",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1-1",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-3365-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08446469",
"articleId": "13bd1eSlysL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08446239",
"articleId": "13bd1tMztYb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ca/1995/7062/0/70620050",
"title": "Creating animations using virtual reality ThatcherWorld: a case study",
"doi": null,
"abstractUrl": "/proceedings-article/ca/1995/70620050/12OmNrJiCSy",
"parentPublication": {
"id": "proceedings/ca/1995/7062/0",
"title": "Computer Animation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446332",
"title": "AnimationVR - Interactive Controller-Based Animating in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446332/13bd1ftOBCY",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2017/04/mcg2017040030",
"title": "Data-Driven Approach to Synthesizing Facial Animation Using Motion Capture",
"doi": null,
"abstractUrl": "/magazine/cg/2017/04/mcg2017040030/13rRUyeTVkv",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/anivae/2018/6511/0/08587268",
"title": "AnimationVR - Interactive Controller-based Animating in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/anivae/2018/08587268/17D45Xtvpbo",
"parentPublication": {
"id": "proceedings/anivae/2018/6511/0",
"title": "2018 IEEE 1st Workshop on Animation in Virtual and Augmented Environments (ANIVAE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a056",
"title": "Physics-based character animation for Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a056/1CJdEcF4PjG",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2022/7172/0/717200a218",
"title": "Emotionally Expressive Motion Controller for Virtual Character Locomotion Animations",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2022/717200a218/1KaHFw8d8VW",
"parentPublication": {
"id": "proceedings/ism/2022/7172/0",
"title": "2022 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2022/5725/0/572500a064",
"title": "Smart Motion Trails for Animating in VR",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2022/572500a064/1KmFbVCEHxm",
"parentPublication": {
"id": "proceedings/aivr/2022/5725/0",
"title": "2022 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2020/04/09063224",
"title": "Is Immersive Virtual Reality the Ultimate Interface for 3D Animators?",
"doi": null,
"abstractUrl": "/magazine/co/2020/04/09063224/1iUHNooSIUM",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090665",
"title": "Authoring-by-Doing: Animating Work Instructions for Industrial Virtual Reality Learning Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090665/1jIxyiybRDO",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09213044",
"title": "Multiple Character Motion Adaptation in Virtual Cities Using Procedural Animation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09213044/1nHRVadEuac",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1CJbEwHHqEg",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1CJcrwDUDgk",
"doi": "10.1109/VR51125.2022.00048",
"title": "Virtual Reality Observations: Using Virtual Reality to Augment Lab-Based Shoulder Surfing Research",
"normalizedTitle": "Virtual Reality Observations: Using Virtual Reality to Augment Lab-Based Shoulder Surfing Research",
"abstract": "Given the difficulties of studying the shoulder surfing resistance of authentication systems in a live setting, researchers often ask study participants to shoulder surf authentications by watching two-dimensional (2D) video recordings of a user authenticating. How-ever, these video recordings do not provide participants with a realistic shoulder surfing experience, creating uncertainty in the value and validity of lab-based shoulder surfing experiments. In this work, we exploit the unique characteristics of virtual reality (VR) and study the use of non-immersive/immersive VR recordings for shoulder surfing research. We conducted a user study (N=18) to explore the strengths and weaknesses of such a VR-based shoulder surfing research approach. Our results suggest that immersive VR observations result in a more realistic shoulder surfing experience, in a significantly higher sense of being part of the authentication environment, in a greater feeling of spatial presence, and in a higher level of involvement than 2D video observations without impacting participants’ observation performance. This suggests that studying shoulder surfing in VR is advantageous in many ways compared to currently used approaches, e.g., participants can freely choose their observation angle rather than being limited to a fixed observation angle as done in current methods. We discuss the strengths and weaknesses of using VR for shoulder surfing research and conclude with four recommendations to help researchers decide when (and when not) to employ VR for shoulder surfing research in the authentication research domain.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Given the difficulties of studying the shoulder surfing resistance of authentication systems in a live setting, researchers often ask study participants to shoulder surf authentications by watching two-dimensional (2D) video recordings of a user authenticating. How-ever, these video recordings do not provide participants with a realistic shoulder surfing experience, creating uncertainty in the value and validity of lab-based shoulder surfing experiments. In this work, we exploit the unique characteristics of virtual reality (VR) and study the use of non-immersive/immersive VR recordings for shoulder surfing research. We conducted a user study (N=18) to explore the strengths and weaknesses of such a VR-based shoulder surfing research approach. Our results suggest that immersive VR observations result in a more realistic shoulder surfing experience, in a significantly higher sense of being part of the authentication environment, in a greater feeling of spatial presence, and in a higher level of involvement than 2D video observations without impacting participants’ observation performance. This suggests that studying shoulder surfing in VR is advantageous in many ways compared to currently used approaches, e.g., participants can freely choose their observation angle rather than being limited to a fixed observation angle as done in current methods. We discuss the strengths and weaknesses of using VR for shoulder surfing research and conclude with four recommendations to help researchers decide when (and when not) to employ VR for shoulder surfing research in the authentication research domain.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Given the difficulties of studying the shoulder surfing resistance of authentication systems in a live setting, researchers often ask study participants to shoulder surf authentications by watching two-dimensional (2D) video recordings of a user authenticating. How-ever, these video recordings do not provide participants with a realistic shoulder surfing experience, creating uncertainty in the value and validity of lab-based shoulder surfing experiments. In this work, we exploit the unique characteristics of virtual reality (VR) and study the use of non-immersive/immersive VR recordings for shoulder surfing research. We conducted a user study (N=18) to explore the strengths and weaknesses of such a VR-based shoulder surfing research approach. Our results suggest that immersive VR observations result in a more realistic shoulder surfing experience, in a significantly higher sense of being part of the authentication environment, in a greater feeling of spatial presence, and in a higher level of involvement than 2D video observations without impacting participants’ observation performance. This suggests that studying shoulder surfing in VR is advantageous in many ways compared to currently used approaches, e.g., participants can freely choose their observation angle rather than being limited to a fixed observation angle as done in current methods. We discuss the strengths and weaknesses of using VR for shoulder surfing research and conclude with four recommendations to help researchers decide when (and when not) to employ VR for shoulder surfing research in the authentication research domain.",
"fno": "961700a291",
"keywords": [
"Authorisation",
"Computer Crime",
"Video Recording",
"Virtual Reality",
"Virtual Reality Observations",
"Augment Lab Based Shoulder Surfing Research",
"Shoulder Surfing Resistance",
"Two Dimensional Video Recordings",
"Realistic Shoulder Surfing Experience",
"2 D Video Recordings",
"VR Based Shoulder Shoulder Surfing Research",
"User Authentication",
"Shoulder Surf Authentications",
"Resistance",
"Privacy",
"Uncertainty",
"Three Dimensional Displays",
"Two Dimensional Displays",
"Authentication",
"Virtual Reality",
"Shoulder Surfing",
"Authentication"
],
"authors": [
{
"affiliation": "University of Glasgow",
"fullName": "Florian Mathis",
"givenName": "Florian",
"surname": "Mathis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Glasgow",
"fullName": "Joseph O’Hagan",
"givenName": "Joseph",
"surname": "O’Hagan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Glasgow",
"fullName": "Mohamed Khamis",
"givenName": "Mohamed",
"surname": "Khamis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Edinburgh",
"fullName": "Kami Vaniea",
"givenName": "Kami",
"surname": "Vaniea",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-03-01T00:00:00",
"pubType": "proceedings",
"pages": "291-300",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9617-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1CJcnGSApbO",
"name": "pvr202296170-09756826s1-mm_961700a291.zip",
"size": "30.9 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202296170-09756826s1-mm_961700a291.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "961700a281",
"articleId": "1CJbRovjGSI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "961700a301",
"articleId": "1CJc2tCNdBu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscmi/2015/9819/0/9819a134",
"title": "Enhanced Virtual Password Authentication Scheme Resistant to Shoulder Surfing",
"doi": null,
"abstractUrl": "/proceedings-article/iscmi/2015/9819a134/12OmNASraAu",
"parentPublication": {
"id": "proceedings/iscmi/2015/9819/0",
"title": "2015 Second International Conference on Soft Computing and Machine Intelligence (ISCMI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ainaw/2007/2847/2/284720467",
"title": "S3PAS: A Scalable Shoulder-Surfing Resistant Textual-Graphical Password Authentication Scheme",
"doi": null,
"abstractUrl": "/proceedings-article/ainaw/2007/284720467/12OmNBOll8r",
"parentPublication": {
"id": "proceedings/ainaw/2007/2847/2",
"title": "Advanced Information Networking and Applications Workshops, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/securware/2009/3668/0/05211005",
"title": "A PIN Entry Scheme Resistant to Recording-Based Shoulder-Surfing",
"doi": null,
"abstractUrl": "/proceedings-article/securware/2009/05211005/12OmNrJiCGV",
"parentPublication": {
"id": "proceedings/securware/2009/3668/0",
"title": "2009 Third International Conference on Emerging Security Information, Systems and Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2010/4215/0/4215a194",
"title": "A New Graphical Password Scheme Resistant to Shoulder-Surfing",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2010/4215a194/12OmNy5R3Em",
"parentPublication": {
"id": "proceedings/cw/2010/4215/0",
"title": "2010 International Conference on Cyberworlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsn-w/2013/0181/0/06615517",
"title": "SAFE: Shoulder-surfing attack filibustered with ease",
"doi": null,
"abstractUrl": "/proceedings-article/dsn-w/2013/06615517/12OmNyr8Yik",
"parentPublication": {
"id": "proceedings/dsn-w/2013/0181/0",
"title": "2013 43rd Annual IEEE/IFIP Conference on Dependable Systems and Networks Workshop (DSN-W)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imis/2016/0984/0/0984a514",
"title": "Invisible Secure Keypad Solution Resilient against Shoulder Surfing Attacks",
"doi": null,
"abstractUrl": "/proceedings-article/imis/2016/0984a514/12OmNzb7Zie",
"parentPublication": {
"id": "proceedings/imis/2016/0984/0",
"title": "2016 10th International Conference on Innovative Mobile and Internet Services in Ubiquitous Computing (IMIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/2018/02/07429722",
"title": "A Shoulder Surfing Resistant Graphical Authentication System",
"doi": null,
"abstractUrl": "/journal/tq/2018/02/07429722/13rRUEgaruc",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a576",
"title": "Understanding Shoulder Surfer Behavior Using Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a576/1CJd33f4h4k",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscloud-edgecom/2019/1661/0/166100a151",
"title": "Efficient Shoulder Surfing Resistant PIN Authentication Scheme Based on Localized Tactile Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/cscloud-edgecom/2019/166100a151/1dPoFIuXiJa",
"parentPublication": {
"id": "proceedings/cscloud-edgecom/2019/1661/0",
"title": "2019 6th IEEE International Conference on Cyber Security and Cloud Computing (CSCloud)/ 2019 5th IEEE International Conference on Edge Computing and Scalable Cloud (EdgeCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2019/1746/0/09028560",
"title": "Security and Privacy Education for STEM Undergraduates: A Shoulder Surfing Course Project",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2019/09028560/1iff8TkqrTO",
"parentPublication": {
"id": "proceedings/fie/2019/1746/0",
"title": "2019 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pBMeBWXAZ2",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"acronym": "ismar-adjunct",
"groupId": "1810084",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pBMhwWNphC",
"doi": "10.1109/ISMAR-Adjunct51615.2020.00079",
"title": "User Study on Virtual Reality for Design Reviews in Architecture",
"normalizedTitle": "User Study on Virtual Reality for Design Reviews in Architecture",
"abstract": "Virtual reality is a candidate to become the preferred interface for architectural design review, but the effectiveness and usability of such systems is still an issue. We put together a multidisciplinary team to implement a test methodology and system to compare VR with 2D interaction, with a coherent test platform using Rhinoceros as industry-standard CAD software. A direct and valid comparison of the two setups is made possible by using the same software for both conditions. We designed and modeled three similar CAD models of a 2 two-story villa (1 for the training and 2 for the test) and we implanted 13 artificial errors, simulating common CAD issues. Users were asked to find the errors in a 10 minutes fixed-time session for each setup respectively. We completed our test with 10 students from the design and architecture faculty, with proven experience of the 2D version of the CAD. We did not find any significant differences between the two modalities in cognitive workload, but the user preference was clearly towards VR. The presented work may provide interesting insights for future human-centered studies and to improve future VR architectural applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality is a candidate to become the preferred interface for architectural design review, but the effectiveness and usability of such systems is still an issue. We put together a multidisciplinary team to implement a test methodology and system to compare VR with 2D interaction, with a coherent test platform using Rhinoceros as industry-standard CAD software. A direct and valid comparison of the two setups is made possible by using the same software for both conditions. We designed and modeled three similar CAD models of a 2 two-story villa (1 for the training and 2 for the test) and we implanted 13 artificial errors, simulating common CAD issues. Users were asked to find the errors in a 10 minutes fixed-time session for each setup respectively. We completed our test with 10 students from the design and architecture faculty, with proven experience of the 2D version of the CAD. We did not find any significant differences between the two modalities in cognitive workload, but the user preference was clearly towards VR. The presented work may provide interesting insights for future human-centered studies and to improve future VR architectural applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality is a candidate to become the preferred interface for architectural design review, but the effectiveness and usability of such systems is still an issue. We put together a multidisciplinary team to implement a test methodology and system to compare VR with 2D interaction, with a coherent test platform using Rhinoceros as industry-standard CAD software. A direct and valid comparison of the two setups is made possible by using the same software for both conditions. We designed and modeled three similar CAD models of a 2 two-story villa (1 for the training and 2 for the test) and we implanted 13 artificial errors, simulating common CAD issues. Users were asked to find the errors in a 10 minutes fixed-time session for each setup respectively. We completed our test with 10 students from the design and architecture faculty, with proven experience of the 2D version of the CAD. We did not find any significant differences between the two modalities in cognitive workload, but the user preference was clearly towards VR. The presented work may provide interesting insights for future human-centered studies and to improve future VR architectural applications.",
"fno": "767500a283",
"keywords": [
"Architecture",
"CAD",
"Civil Engineering Computing",
"Cognition",
"Human Computer Interaction",
"Virtual Reality",
"Two Story Villa",
"Virtual Reality",
"Architectural Design Review",
"Industry Standard CAD Software",
"CAD Models",
"VR Architectural Applications",
"Architecture",
"Training",
"Solid Modeling",
"Two Dimensional Displays",
"Software",
"Usability",
"Augmented Reality",
"Virtual Reality",
"Architecture",
"Design Review",
"Evaluation",
"User Preference",
"Workload"
],
"authors": [
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Michele Fiorentino",
"givenName": "Michele",
"surname": "Fiorentino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Kassel",
"fullName": "Elisa Maria Klose",
"givenName": "Elisa Maria",
"surname": "Klose",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Maria Lucia V. Alemanno",
"givenName": "Maria",
"surname": "Lucia V. Alemanno",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Isabella Giordano",
"givenName": "Isabella",
"surname": "Giordano",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Alessandro De Bellis",
"givenName": "Alessandro De",
"surname": "Bellis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Ilaria Cavaliere",
"givenName": "Ilaria",
"surname": "Cavaliere",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Dario Costantino",
"givenName": "Dario",
"surname": "Costantino",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Polytechnic University of Bari",
"fullName": "Giuseppe Fallacara",
"givenName": "Giuseppe",
"surname": "Fallacara",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Kassel",
"fullName": "Oliver Straeter",
"givenName": "Oliver",
"surname": "Straeter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Mindesk Inc",
"fullName": "Gabriele Sorrento",
"givenName": "Gabriele",
"surname": "Sorrento",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar-adjunct",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "283-288",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7675-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "767500a277",
"articleId": "1pBMfQFtTC8",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "767500a289",
"articleId": "1pBMiBe8hlC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/csci/2016/5510/0/07881370",
"title": "Virtual Reality for Digital User Experience and Interactive Learning Based on User Satisfaction: A Pilot Study",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2016/07881370/12OmNApcupQ",
"parentPublication": {
"id": "proceedings/csci/2016/5510/0",
"title": "2016 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223452",
"title": "Shark punch: A virtual reality game for aquatic rehabilitation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223452/12OmNqGA54Q",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2002/1492/0/14920287",
"title": "Tinmith-Hand: Unified User Interface Technology for Mobile Outdoor Augmented Reality and Indoor Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2002/14920287/12OmNqH9htu",
"parentPublication": {
"id": "proceedings/vr/2002/1492/0",
"title": "Proceedings IEEE Virtual Reality 2002",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2018/6049/0/604901a385",
"title": "Virtual Reality Learning Environments for Vocational Education: A Comparison Study with Conventional Instructional Media on Knowledge Retention",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2018/604901a385/12OmNzVoBD5",
"parentPublication": {
"id": "proceedings/icalt/2018/6049/0",
"title": "2018 IEEE 18th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/imet/2022/7016/0/09929500",
"title": "Interactive Historical Documentary in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/imet/2022/09929500/1HYuTheBVYY",
"parentPublication": {
"id": "proceedings/imet/2022/7016/0",
"title": "2022 International Conference on Interactive Media, Smart Systems and Emerging Technologies (IMET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864583",
"title": "Comparison of a Gamified and Non-Gamified Virtual Reality Training Assembly Task",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864583/1e5ZtdBx66c",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a047",
"title": "A User Experience Study of Locomotion Design in Virtual Reality Between Adult and Minor Users",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a047/1gyskG3tsTS",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998292",
"title": "Immersive Process Model Exploration in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998292/1hpPCy1gJoI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a627",
"title": "The Cognitive Load and Usability of Three Walking Metaphors for Consumer Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a627/1pysyecdlzq",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2021/1838/0/255600a131",
"title": "Diegetic Tool Management in a Virtual Reality Training Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2021/255600a131/1tuAgbFhYCQ",
"parentPublication": {
"id": "proceedings/vr/2021/1838/0",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1rUIVdcRaJW",
"title": "2020 IEEE International Conference on Healthcare Informatics (ICHI)",
"acronym": "ichi",
"groupId": "1803080",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1rUJ2FLVdEk",
"doi": "10.1109/ICHI48887.2020.9374344",
"title": "Integrating 3D and 2D Views of Medical Image Data in Virtual Reality for Efficient Navigation",
"normalizedTitle": "Integrating 3D and 2D Views of Medical Image Data in Virtual Reality for Efficient Navigation",
"abstract": "Medical imaging techniques, e.g., CT or MRI, produce large sets of 2D and 3D data for diagnosis and operative planning. Virtual Reality (VR) could provide a more intuitive and efficient way of interacting with such data. While 3D representations are well suited to show 3D spatial relationships, 2D image slices are important to show details, i.e., both 3D and 2D are required. While researchers have investigated separated approaches for presenting 3D and 2D data in VR, we present a solution that closely integrates 3D and 2D and provides an intuitive and efficient interaction technique for navigating the information by syncing the position of a 3D cursor inside the 3D representation to matching 2D images on different axes. We conducted an empirical evaluation comparing our approach to navigating the data in 2D and found that the VR method was significantly faster, although, at the cost of significantly increased task load.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Medical imaging techniques, e.g., CT or MRI, produce large sets of 2D and 3D data for diagnosis and operative planning. Virtual Reality (VR) could provide a more intuitive and efficient way of interacting with such data. While 3D representations are well suited to show 3D spatial relationships, 2D image slices are important to show details, i.e., both 3D and 2D are required. While researchers have investigated separated approaches for presenting 3D and 2D data in VR, we present a solution that closely integrates 3D and 2D and provides an intuitive and efficient interaction technique for navigating the information by syncing the position of a 3D cursor inside the 3D representation to matching 2D images on different axes. We conducted an empirical evaluation comparing our approach to navigating the data in 2D and found that the VR method was significantly faster, although, at the cost of significantly increased task load.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Medical imaging techniques, e.g., CT or MRI, produce large sets of 2D and 3D data for diagnosis and operative planning. Virtual Reality (VR) could provide a more intuitive and efficient way of interacting with such data. While 3D representations are well suited to show 3D spatial relationships, 2D image slices are important to show details, i.e., both 3D and 2D are required. While researchers have investigated separated approaches for presenting 3D and 2D data in VR, we present a solution that closely integrates 3D and 2D and provides an intuitive and efficient interaction technique for navigating the information by syncing the position of a 3D cursor inside the 3D representation to matching 2D images on different axes. We conducted an empirical evaluation comparing our approach to navigating the data in 2D and found that the VR method was significantly faster, although, at the cost of significantly increased task load.",
"fno": "09374344",
"keywords": [
"Biomedical MRI",
"Computerised Tomography",
"Image Reconstruction",
"Medical Image Processing",
"Virtual Reality",
"2 D Image Slices",
"Interaction Technique",
"Matching 2 D Images",
"VR Method",
"Medical Image Data",
"Virtual Reality",
"3 D Spatial Relationships",
"MRI",
"CT",
"Three Dimensional Displays",
"Navigation",
"Two Dimensional Displays",
"Virtual Reality",
"Planning",
"Task Analysis",
"Medical Diagnostic Imaging",
"Medical Imaging",
"Radiology",
"Image Navigation",
"Spatial Interaction",
"Virtual Display Space"
],
"authors": [
{
"affiliation": "TU Kaiserslautern,Serious Games Engineering,Kaiserslautern,Germany",
"fullName": "Johannes Klonig",
"givenName": "Johannes",
"surname": "Klonig",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TU Kaiserslautern,Serious Games Engineering,Kaiserslautern,Germany",
"fullName": "Marc Herrlich",
"givenName": "Marc",
"surname": "Herrlich",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ichi",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-5382-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09374396",
"articleId": "1rUJ0Nk7Ufm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09374361",
"articleId": "1rUIY4mxwUo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2017/2610/0/261001a402",
"title": "3D Shape Induction from 2D Views of Multiple Objects",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2017/261001a402/12OmNBLdKOR",
"parentPublication": {
"id": "proceedings/3dv/2017/2610/0",
"title": "2017 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798876",
"title": "Poster: Exploring 3D volumetric medical data using mobile devices",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798876/12OmNvAiSsV",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2015/7343/0/07314295",
"title": "Hybrid-Dimensional Visualization and Interaction - Integrating 2D and 3D Visualization with Semi-Immersive Navigation Techniques",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2015/07314295/12OmNzBOi7E",
"parentPublication": {
"id": "proceedings/bdva/2015/7343/0",
"title": "2015 Big Data Visual Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sose/2017/6320/0/07943298",
"title": "A 2D and 3D Indoor Mapping Approach for Virtual Navigation Services",
"doi": null,
"abstractUrl": "/proceedings-article/sose/2017/07943298/12OmNzcxZuL",
"parentPublication": {
"id": "proceedings/sose/2017/6320/0",
"title": "2017 11th IEEE Symposium on Service-Oriented System Engineering (SOSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a064",
"title": "VRContour: Bringing Contour Delineations of Medical Structures Into Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a064/1JrRc4SdYgU",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300d731",
"title": "Enhancing 2D Representation via Adjacent Views for 3D Shape Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300d731/1hVlyfuMEg0",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089611",
"title": "Above Surface Interaction for Multiscale Navigation in Mobile Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089611/1jIxbotiTUk",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089663",
"title": "Slicing-Volume: Hybrid 3D/2D Multi-target Selection Technique for Dense Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089663/1jIxdJFH8as",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/searis/2018/6272/0/09180231",
"title": "VD1: a technical approach to a hybrid 2D and 3D desktop environment",
"doi": null,
"abstractUrl": "/proceedings-article/searis/2018/09180231/1mK7jmn4lbO",
"parentPublication": {
"id": "proceedings/searis/2018/6272/0",
"title": "2018 IEEE 11th Workshop on Software Engineering and Architectures for Real-time Interactive Systems (SEARIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a713",
"title": "An In-Depth Exploration of the Effect of 2D/3D Views and Controller Types on First Person Shooter Games in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a713/1pysuzo9dLi",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1xPsim7PuRq",
"title": "2021 IEEE 4th International Conference on Multimedia Information Processing and Retrieval (MIPR)",
"acronym": "mipr",
"groupId": "1825825",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1xPso0QWilO",
"doi": "10.1109/MIPR51284.2021.00018",
"title": "FPX-G: First Person Exploration for Graph",
"normalizedTitle": "FPX-G: First Person Exploration for Graph",
"abstract": "Data exploration is a fundamental user task in the information seeking process. In data exploration, users have ambiguous information needs, and they traverse across the data for gathering information. In this paper, a novel data exploration system, called FPX-G, is proposed that uses virtual reality (VR) technology. VR-based data exploration (or immersive analytics) is a recent trend in data analytics, and the existing work approaches involve aggregated information in an interactive and 3D manner. However, exploration for individual pieces of data scarcely has been approached. Traditional data exploration is done on 2D displays, therefore space is limited, and there is no depth. FPX-G fully utilizes 3D space to make individual piece of data visible in the user’s line of sight. In this paper, the data structure in FPX-G is designed as a graph, and the data exploration process is modeled as graph traversal. To utilize the capability of VR, FPX-G provides a first person view-based interface from which users can look at individual pieces of data and can walk through the data (like walking in a library). In addition to the walking mechanism, to deal with limited physical space in a room, FPX-G introduces eye-tracking technology for traversing data through a graph. A simulation-based evaluation reveals that FPX-G provides a significantly efficient interface for exploring data compared with the traditional 2D interface.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Data exploration is a fundamental user task in the information seeking process. In data exploration, users have ambiguous information needs, and they traverse across the data for gathering information. In this paper, a novel data exploration system, called FPX-G, is proposed that uses virtual reality (VR) technology. VR-based data exploration (or immersive analytics) is a recent trend in data analytics, and the existing work approaches involve aggregated information in an interactive and 3D manner. However, exploration for individual pieces of data scarcely has been approached. Traditional data exploration is done on 2D displays, therefore space is limited, and there is no depth. FPX-G fully utilizes 3D space to make individual piece of data visible in the user’s line of sight. In this paper, the data structure in FPX-G is designed as a graph, and the data exploration process is modeled as graph traversal. To utilize the capability of VR, FPX-G provides a first person view-based interface from which users can look at individual pieces of data and can walk through the data (like walking in a library). In addition to the walking mechanism, to deal with limited physical space in a room, FPX-G introduces eye-tracking technology for traversing data through a graph. A simulation-based evaluation reveals that FPX-G provides a significantly efficient interface for exploring data compared with the traditional 2D interface.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Data exploration is a fundamental user task in the information seeking process. In data exploration, users have ambiguous information needs, and they traverse across the data for gathering information. In this paper, a novel data exploration system, called FPX-G, is proposed that uses virtual reality (VR) technology. VR-based data exploration (or immersive analytics) is a recent trend in data analytics, and the existing work approaches involve aggregated information in an interactive and 3D manner. However, exploration for individual pieces of data scarcely has been approached. Traditional data exploration is done on 2D displays, therefore space is limited, and there is no depth. FPX-G fully utilizes 3D space to make individual piece of data visible in the user’s line of sight. In this paper, the data structure in FPX-G is designed as a graph, and the data exploration process is modeled as graph traversal. To utilize the capability of VR, FPX-G provides a first person view-based interface from which users can look at individual pieces of data and can walk through the data (like walking in a library). In addition to the walking mechanism, to deal with limited physical space in a room, FPX-G introduces eye-tracking technology for traversing data through a graph. A simulation-based evaluation reveals that FPX-G provides a significantly efficient interface for exploring data compared with the traditional 2D interface.",
"fno": "186500a070",
"keywords": [
"Data Analysis",
"Data Structures",
"Data Visualisation",
"Information Retrieval",
"Virtual Reality",
"First Person Exploration",
"Fundamental User Task",
"Information Seeking Process",
"Ambiguous Information Needs",
"Gathering Information",
"Novel Data Exploration System",
"Called FPX G",
"Data Analytics",
"Individual Piece",
"Traditional Data Exploration",
"Data Structure",
"Data Exploration Process",
"Graph Traversal",
"Person View Based Interface",
"Legged Locomotion",
"Solid Modeling",
"Three Dimensional Displays",
"Tracking",
"Two Dimensional Displays",
"Virtual Reality",
"Information Processing",
"Graph Exploration",
"Virtual Reality",
"First Person View",
"Eye Tracking Based Input"
],
"authors": [
{
"affiliation": "Nagoya University,Nagoya,Japan",
"fullName": "Takahiro Komamizu",
"givenName": "Takahiro",
"surname": "Komamizu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nagoya University,Nagoya,Japan",
"fullName": "Shoi Ito",
"givenName": "Shoi",
"surname": "Ito",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nagoya University,Nagoya,Japan",
"fullName": "Yasuhiro Ogawa",
"givenName": "Yasuhiro",
"surname": "Ogawa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nagoya University,Nagoya,Japan",
"fullName": "Katsuhiko Toyama",
"givenName": "Katsuhiko",
"surname": "Toyama",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mipr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "70-76",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1865-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "186500a063",
"articleId": "1xPsjXkDspq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "186500a077",
"articleId": "1xPslTvcEyQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/sc/2016/8815/0/8815a830",
"title": "G-Store: High-Performance Graph Store for Trillion-Edge Processing",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2016/8815a830/12OmNxFaLks",
"parentPublication": {
"id": "proceedings/sc/2016/8815/0",
"title": "SC16: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549349",
"title": "Visual exploration of the infinite canvas",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549349/12OmNzn38XE",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699200",
"title": "Effective Free Field of View Scene Exploration in VR and AR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699200/19F1SrRS4vK",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a222",
"title": "Design and Evaluation of Travel and Orientation Techniques for Desk VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a222/1CJc05Lu2LS",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a758",
"title": "Infinite Virtual Space Exploration Using Space Tiling and Perceivable Reset at Fixed Positions",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a758/1JrRneazFCw",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500a094",
"title": "An EEG-based Experiment on VR Sickness and Postural Instability While Walking in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500a094/1MNgWtYsR5S",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798143",
"title": "Estimating Detection Thresholds for Desktop-Scale Hand Redirection in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798143/1cJ0GRxSQwM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998292",
"title": "Immersive Process Model Exploration in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998292/1hpPCy1gJoI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2020/8508/0/850800a713",
"title": "An In-Depth Exploration of the Effect of 2D/3D Views and Controller Types on First Person Shooter Games in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2020/850800a713/1pysuzo9dLi",
"parentPublication": {
"id": "proceedings/ismar/2020/8508/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09359481",
"title": "The Effect of Exploration Mode and Frame of Reference in Immersive Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09359481/1rlAR6130Dm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNrMHOd6",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"acronym": "hicss",
"groupId": "1000730",
"volume": "0",
"displayVolume": "0",
"year": "2016",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCdk2HE",
"doi": "10.1109/HICSS.2016.235",
"title": "Risk Taking in Online Crowdsourcing Tournaments",
"normalizedTitle": "Risk Taking in Online Crowdsourcing Tournaments",
"abstract": "Rankings and tournaments are often used to incentivize task completion and participation in online innovation and design contests and prediction markets. One of the main challenges for platform operators is to encourage high quality contributions and effort. In this study we illustrate that in such tournaments, the participants' ranks interfere with risk taking behavior. We present an online experiment accompanying the FIFA World Cup 2014, considering the interplay of different tournament modes (individual and team rankings), the relative rank, tournament progress, and risk taking. We find that subjects take higher risk as the tournament progresses, where this increase is stronger for subjects competing individually, compared to those competing as teams.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rankings and tournaments are often used to incentivize task completion and participation in online innovation and design contests and prediction markets. One of the main challenges for platform operators is to encourage high quality contributions and effort. In this study we illustrate that in such tournaments, the participants' ranks interfere with risk taking behavior. We present an online experiment accompanying the FIFA World Cup 2014, considering the interplay of different tournament modes (individual and team rankings), the relative rank, tournament progress, and risk taking. We find that subjects take higher risk as the tournament progresses, where this increase is stronger for subjects competing individually, compared to those competing as teams.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rankings and tournaments are often used to incentivize task completion and participation in online innovation and design contests and prediction markets. One of the main challenges for platform operators is to encourage high quality contributions and effort. In this study we illustrate that in such tournaments, the participants' ranks interfere with risk taking behavior. We present an online experiment accompanying the FIFA World Cup 2014, considering the interplay of different tournament modes (individual and team rankings), the relative rank, tournament progress, and risk taking. We find that subjects take higher risk as the tournament progresses, where this increase is stronger for subjects competing individually, compared to those competing as teams.",
"fno": "5670b851",
"keywords": [
"Crowdsourcing",
"Technological Innovation",
"Companies",
"Correlation",
"Incentive Schemes"
],
"authors": [
{
"affiliation": null,
"fullName": "Tim Straub",
"givenName": "Tim",
"surname": "Straub",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Timm Teubner",
"givenName": "Timm",
"surname": "Teubner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Christof Weinhardt",
"givenName": "Christof",
"surname": "Weinhardt",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "hicss",
"isOpenAccess": true,
"showRecommendedArticles": true,
"showBuyMe": false,
"hasPdf": true,
"pubDate": "2016-01-01T00:00:00",
"pubType": "proceedings",
"pages": "1851-1860",
"year": "2016",
"issn": "1530-1605",
"isbn": "978-0-7695-5670-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5670b841",
"articleId": "12OmNxd4ttb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5670b861",
"articleId": "12OmNrJAdUI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2011/4523/3/4523c058",
"title": "Review and Prospect of Online Innovation Contest",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523c058/12OmNAi6vRH",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/3",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2017/5920/0/08190496",
"title": "Zui Quan pedagogy: The art of risk taking in the classroom",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2017/08190496/12OmNxGj9Vr",
"parentPublication": {
"id": "proceedings/fie/2017/5920/0",
"title": "2017 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945793",
"title": "How can crowdsourcing help in crisis situations? Missing kids case study",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945793/12OmNzlUKmo",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2017/2715/0/08258370",
"title": "Gamified crowdsourcing for disaster risk management",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2017/08258370/17D45WgziQ3",
"parentPublication": {
"id": "proceedings/big-data/2017/2715/0",
"title": "2017 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tm/2019/04/08387487",
"title": "Reward or Penalty: Aligning Incentives of Stakeholders in Crowdsourcing",
"doi": null,
"abstractUrl": "/journal/tm/2019/04/08387487/189vjEcsLSg",
"parentPublication": {
"id": "trans/tm",
"title": "IEEE Transactions on Mobile Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2018/1174/0/08659136",
"title": "Exploring Pedagogical Risk-Taking of Engineering Faculty",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2018/08659136/18j95blNnTG",
"parentPublication": {
"id": "proceedings/fie/2018/1174/0",
"title": "2018 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/5555/01/09854201",
"title": "Two-stage Bilateral Online Priority Assignment in Spatio-temporal Crowdsourcing",
"doi": null,
"abstractUrl": "/journal/sc/5555/01/09854201/1FJ0FlCuU4o",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbi/2019/0650/1/065001a452",
"title": "How IT-Related Financial Innovation Influences Bank Risk-Taking: Results from an Empirical Analysis of Patent Applications",
"doi": null,
"abstractUrl": "/proceedings-article/cbi/2019/065001a452/1cI6tZPytfG",
"parentPublication": {
"id": "cbi/2019/0650/1",
"title": "2019 IEEE 21st Conference on Business Informatics (CBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/nt/2020/06/09180340",
"title": "Two-Stage Game Design of Payoff Decision-Making Scheme for Crowdsourcing Dilemmas",
"doi": null,
"abstractUrl": "/journal/nt/2020/06/09180340/1mK2W78TFoA",
"parentPublication": {
"id": "trans/nt",
"title": "IEEE/ACM Transactions on Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlise/2021/1736/0/173600a229",
"title": "An Adaptive Network-Based Fuzzy Inference System to Intellectual Property Risk Assessment in Crowdsourcing Design",
"doi": null,
"abstractUrl": "/proceedings-article/mlise/2021/173600a229/1yOW67xTbMY",
"parentPublication": {
"id": "proceedings/mlise/2021/1736/0",
"title": "2021 International Conference on Machine Learning and Intelligent Systems Engineering (MLISE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBhpS6P",
"title": "2012 International Conference on Privacy, Security, Risk and Trust (PASSAT)",
"acronym": "passat-socialcom",
"groupId": "1800612",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNx5pj2h",
"doi": "10.1109/SocialCom-PASSAT.2012.29",
"title": "Social and Emotional Turn Taking for Embodied Conversational Agents",
"normalizedTitle": "Social and Emotional Turn Taking for Embodied Conversational Agents",
"abstract": "In this doctoral consortium paper I describe the theme of my research, the model-based generation of consistent emotional turn taking behavior in virtual human conversations and the evaluation of this behavior. My goal is to investigate and generate convincing social behavior in embodied conversational agents.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this doctoral consortium paper I describe the theme of my research, the model-based generation of consistent emotional turn taking behavior in virtual human conversations and the evaluation of this behavior. My goal is to investigate and generate convincing social behavior in embodied conversational agents.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this doctoral consortium paper I describe the theme of my research, the model-based generation of consistent emotional turn taking behavior in virtual human conversations and the evaluation of this behavior. My goal is to investigate and generate convincing social behavior in embodied conversational agents.",
"fno": "06406356",
"keywords": [
"Behavioural Sciences Computing",
"Multi Agent Systems",
"Model Based Generation",
"Virtual Human Conversation",
"Social Behavior",
"Embodied Conversational Agent",
"Consistent Emotional Turn Taking Behavior",
"Humans",
"Computational Modeling",
"Mirrors",
"Solid Modeling",
"Conferences",
"Educational Institutions",
"Human Computer Interaction",
"Social Interaction",
"Conversation",
"Emotion",
"Turn Taking",
"Virtual Humans",
"Human Computer Interaction"
],
"authors": [
{
"affiliation": null,
"fullName": "Merijn Bruijnes",
"givenName": "Merijn",
"surname": "Bruijnes",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "passat-socialcom",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-09-01T00:00:00",
"pubType": "proceedings",
"pages": "977-978",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-5638-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06406355",
"articleId": "12OmNCeK2cD",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06406357",
"articleId": "12OmNxEjXWi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ictai/2008/3440/2/3440b357",
"title": "GeoDialogue: A Software Agent Enabling Collaborative Dialogues between a User and a Conversational GIS",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2008/3440b357/12OmNAHEpD0",
"parentPublication": {
"id": "proceedings/ictai/2008/3440/2",
"title": "2008 20th IEEE International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vlhcc/2010/8485/0/05635289",
"title": "Model-Driven Research in Human-Centric Computing",
"doi": null,
"abstractUrl": "/proceedings-article/vlhcc/2010/05635289/12OmNBTJIE0",
"parentPublication": {
"id": "proceedings/vlhcc/2010/8485/0",
"title": "2010 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2008/1971/0/04480801",
"title": "Conversational Pointing Gestures for Virtual Reality Interaction: Implications from an Empirical Study",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2008/04480801/12OmNs0C9Gl",
"parentPublication": {
"id": "proceedings/vr/2008/1971/0",
"title": "IEEE Virtual Reality 2008",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/casa/2003/1934/0/19340011",
"title": "Crafting the Illusion of Meaning: Template-Based Specification of Embodied Conversational Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/casa/2003/19340011/12OmNs5rkRo",
"parentPublication": {
"id": "proceedings/casa/2003/1934/0",
"title": "Computer Animation and Social Agents, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2012/4702/0/4702a686",
"title": "Embodied Conversational Human-Machine Interface with Wearable Body Sensors for Improving Geography Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2012/4702a686/12OmNx3q6Yd",
"parentPublication": {
"id": "proceedings/icalt/2012/4702/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a624",
"title": "Affective Conversational Models: Interpersonal Stance in a Police Interview Context",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a624/12OmNzuZUEe",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2021/02/08700489",
"title": "Embodied Robot Models for Interdisciplinary Emotion Research",
"doi": null,
"abstractUrl": "/journal/ta/2021/02/08700489/19xNibj044E",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/irc/2021/3416/0/341600a043",
"title": "Integration of Scene Image and Conversational Text for Human-Robot Dialogue",
"doi": null,
"abstractUrl": "/proceedings-article/irc/2021/341600a043/1ANLMZK56q4",
"parentPublication": {
"id": "proceedings/irc/2021/3416/0",
"title": "2021 Fifth IEEE International Conference on Robotic Computing (IRC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956536",
"title": "Detection of Free-Standing Conversational Groups with Graph Convolutional Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956536/1IHpVoqkok8",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNscfI2d",
"title": "Information Technology, Computer Engineering and Management Sciences, International Conference of",
"acronym": "icm",
"groupId": "1800613",
"volume": "2",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzwHvsL",
"doi": "10.1109/ICM.2011.133",
"title": "Behavioural Approaches to Safety Management in Underground Mines",
"normalizedTitle": "Behavioural Approaches to Safety Management in Underground Mines",
"abstract": "Mine accidents and injuries are complex and generally characterized by several factors starting from personal to technical, and technical to social characteristics. This paper is therefore sought to examine the role of behavioral factors on the occurrence of mine accidents and injuries through a case study. Data were collected from two neighboring underground coal mines operating under a large public sector organization of Inner Mongolia. High-low plots and t-test were done to explore the differences between behavioral characteristics of accident involved and noninvolved workers. How these differences could cause accidents/injuries in mines was estimated through structural equation modeling. The case study results show that accident group of workers are more job dissatisfied, negatively affected, and highly risk taking compared to the non-accident group of workers. Apart from direct influences to work injuries, negative emotions and job dissatisfaction make workers to take more risks and behave unsafely.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Mine accidents and injuries are complex and generally characterized by several factors starting from personal to technical, and technical to social characteristics. This paper is therefore sought to examine the role of behavioral factors on the occurrence of mine accidents and injuries through a case study. Data were collected from two neighboring underground coal mines operating under a large public sector organization of Inner Mongolia. High-low plots and t-test were done to explore the differences between behavioral characteristics of accident involved and noninvolved workers. How these differences could cause accidents/injuries in mines was estimated through structural equation modeling. The case study results show that accident group of workers are more job dissatisfied, negatively affected, and highly risk taking compared to the non-accident group of workers. Apart from direct influences to work injuries, negative emotions and job dissatisfaction make workers to take more risks and behave unsafely.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Mine accidents and injuries are complex and generally characterized by several factors starting from personal to technical, and technical to social characteristics. This paper is therefore sought to examine the role of behavioral factors on the occurrence of mine accidents and injuries through a case study. Data were collected from two neighboring underground coal mines operating under a large public sector organization of Inner Mongolia. High-low plots and t-test were done to explore the differences between behavioral characteristics of accident involved and noninvolved workers. How these differences could cause accidents/injuries in mines was estimated through structural equation modeling. The case study results show that accident group of workers are more job dissatisfied, negatively affected, and highly risk taking compared to the non-accident group of workers. Apart from direct influences to work injuries, negative emotions and job dissatisfaction make workers to take more risks and behave unsafely.",
"fno": "4522b324",
"keywords": [
"Safety Management",
"Behavioral Approaches",
"Occupational Injury",
"Mine"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhangliang Chen",
"givenName": "Zhangliang",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Liqiang Ma",
"givenName": "Liqiang",
"surname": "Ma",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yufeng Sun",
"givenName": "Yufeng",
"surname": "Sun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "324-327",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4522-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4522b320",
"articleId": "12OmNwtn3uf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4522b328",
"articleId": "12OmNqJq4qW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2011/4523/1/4523a448",
"title": "Game Analysis on Punishing and Preventing Unsafe Behaviors in Coal Mines",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2011/4523a448/12OmNAoDi94",
"parentPublication": {
"id": "proceedings/iciii/2011/4523/1",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2012/4772/0/4772a356",
"title": "Effects of Bull Bars on Head and Lower Extremity Injuries in Vehicle-Pedestrian Collision",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2012/4772a356/12OmNvk7JQh",
"parentPublication": {
"id": "proceedings/icdma/2012/4772/0",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cason/2010/4202/0/4202a452",
"title": "Wireless Mobile Monitoring System for Tram Rail Transport in Underground Coal Mine Based on WMN",
"doi": null,
"abstractUrl": "/proceedings-article/cason/2010/4202a452/12OmNwEJ0LE",
"parentPublication": {
"id": "proceedings/cason/2010/4202/0",
"title": "Computational Aspects of Social Networks, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2015/9795/0/9795a340",
"title": "Measures for the Improvement of Construction Work Accident Information Service Contents in CPMS: Focused on Analysis of Construction Work Accidents Big Data",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2015/9795a340/12OmNwdL7rl",
"parentPublication": {
"id": "proceedings/csci/2015/9795/0",
"title": "2015 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icica/2014/3966/0/3966a305",
"title": "Smart Helmet Using RF and WSN Technology for Underground Mines Safety",
"doi": null,
"abstractUrl": "/proceedings-article/icica/2014/3966a305/12OmNwudQMh",
"parentPublication": {
"id": "proceedings/icica/2014/3966/0",
"title": "2014 International Conference on Intelligent Computing Applications (ICICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoip/2010/4252/1/4252a160",
"title": "A Study on Pedestrian Injuries Based on Minivan and Sedan Real-World Accidents",
"doi": null,
"abstractUrl": "/proceedings-article/icoip/2010/4252a160/12OmNxEjXXF",
"parentPublication": {
"id": "proceedings/icoip/2010/4252/2",
"title": "Optoelectronics and Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2013/4932/0/4932a218",
"title": "An Investigation on Child Occupant Safety in Passenger Vehicles Based on Accident Data from Changsha, China",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2013/4932a218/12OmNxFaLcH",
"parentPublication": {
"id": "proceedings/icmtma/2013/4932/0",
"title": "2013 Fifth International Conference on Measuring Technology and Mechatronics Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2010/8333/2/05743430",
"title": "Risk Assessment Based on Accident Theory in Urban Railway Transportation",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2010/05743430/12OmNxWcHlo",
"parentPublication": {
"id": "proceedings/isdea/2010/8333/2",
"title": "2010 International Conference on Intelligent System Design and Engineering Application",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2018/7206/0/720600a031",
"title": "A Wearable System for Situational Awareness Estimation in Underground Mines",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2018/720600a031/181W9onlXJD",
"parentPublication": {
"id": "proceedings/chase/2018/7206/0",
"title": "2018 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/conisoft/2021/4361/0/436100a017",
"title": "Information Requirements for Ventilation Systems in Underground Mines in Mexico",
"doi": null,
"abstractUrl": "/proceedings-article/conisoft/2021/436100a017/1zHIpotBRpS",
"parentPublication": {
"id": "proceedings/conisoft/2021/4361/0",
"title": "2021 9th International Conference in Software Engineering Research and Innovation (CONISOFT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1AIMDCWTrry",
"title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"acronym": "chase",
"groupId": "1814404",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1AIMIxRcJag",
"doi": "10.1109/CHASE52844.2021.00013",
"title": "Detection and Analysis of Interrupted Behaviors by Public Policy Interventions during COVID-19",
"normalizedTitle": "Detection and Analysis of Interrupted Behaviors by Public Policy Interventions during COVID-19",
"abstract": "In most countries around the world, various public policies and guidelines, such as social distancing and stay-at-home orders, have been put in place to slow down the spreading of COVID-19. Relying on traditional surveys to assess policy impacts on community level behavior changes may lead to biased results, and limit fine-grained understanding of human behavior dynamics over time. We propose to leverage mobile sensing to capture people's behavior footprints amid the COVID-19 pandemic, and understand their collective behavior changes with respect to existing policies. Specifically, we propose to extract a rich set of behavioral markers from raw mobile sensing data, including mobility, social interactions, physical activities, and health states, and apply them in a generalized behavior change analysis framework to measure and detect community level behavior changes in an epidemic context. We present how to combine change point detection algorithm and interrupted time series analysis to automatically detect three different measurements of behavior changes (e.g., level, trend, and variance changes), and provide insights supported by statistical inference. A case study using a dataset that we collected from a large mobile sensing study conducted in the United States is shown to demonstrate the proposed framework and method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In most countries around the world, various public policies and guidelines, such as social distancing and stay-at-home orders, have been put in place to slow down the spreading of COVID-19. Relying on traditional surveys to assess policy impacts on community level behavior changes may lead to biased results, and limit fine-grained understanding of human behavior dynamics over time. We propose to leverage mobile sensing to capture people's behavior footprints amid the COVID-19 pandemic, and understand their collective behavior changes with respect to existing policies. Specifically, we propose to extract a rich set of behavioral markers from raw mobile sensing data, including mobility, social interactions, physical activities, and health states, and apply them in a generalized behavior change analysis framework to measure and detect community level behavior changes in an epidemic context. We present how to combine change point detection algorithm and interrupted time series analysis to automatically detect three different measurements of behavior changes (e.g., level, trend, and variance changes), and provide insights supported by statistical inference. A case study using a dataset that we collected from a large mobile sensing study conducted in the United States is shown to demonstrate the proposed framework and method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In most countries around the world, various public policies and guidelines, such as social distancing and stay-at-home orders, have been put in place to slow down the spreading of COVID-19. Relying on traditional surveys to assess policy impacts on community level behavior changes may lead to biased results, and limit fine-grained understanding of human behavior dynamics over time. We propose to leverage mobile sensing to capture people's behavior footprints amid the COVID-19 pandemic, and understand their collective behavior changes with respect to existing policies. Specifically, we propose to extract a rich set of behavioral markers from raw mobile sensing data, including mobility, social interactions, physical activities, and health states, and apply them in a generalized behavior change analysis framework to measure and detect community level behavior changes in an epidemic context. We present how to combine change point detection algorithm and interrupted time series analysis to automatically detect three different measurements of behavior changes (e.g., level, trend, and variance changes), and provide insights supported by statistical inference. A case study using a dataset that we collected from a large mobile sensing study conducted in the United States is shown to demonstrate the proposed framework and method.",
"fno": "396500a046",
"keywords": [
"Behavioural Sciences Computing",
"Diseases",
"Epidemics",
"Health Care",
"Medical Computing",
"Mobile Computing",
"Sensor Fusion",
"Statistical Analysis",
"Time Series",
"Public Policy Interventions",
"Community Level Behavior Changes",
"Human Behavior Dynamics",
"Mobile Sensing Data",
"COVID 19 Pandemic",
"Generalized Behavior Change Analysis Framework",
"Change Point Detection Algorithm",
"Interrupted Time Series Analysis",
"Interrupted Behavior Analysis",
"Interrupted Behavior Detection",
"Social Interactions",
"Physical Activities",
"Health States",
"Mobility",
"Statistical Inference",
"Behavioral Marker Extraction",
"COVID 19",
"Pandemics",
"Time Series Analysis",
"Human Factors",
"Market Research",
"Time Measurement",
"Social Factors",
"Behavior Change",
"Change Point Detection",
"COVID 19",
"Interrupted Time Series Analysis",
"Mobile Sensing"
],
"authors": [
{
"affiliation": "University of Virginia,Engineering Systems and Environment,Charlottesville,VA,USA,22901",
"fullName": "Guimin Dong",
"givenName": "Guimin",
"surname": "Dong",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Virginia,Engineering Systems and Environment,Charlottesville,VA,USA,22901",
"fullName": "Lihua Cai",
"givenName": "Lihua",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Virginia,Engineering Systems and Environment,Charlottesville,VA,USA,22901",
"fullName": "Shashwat Kumar",
"givenName": "Shashwat",
"surname": "Kumar",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Virginia,Engineering Systems and Environment,Charlottesville,VA,USA,22901",
"fullName": "Debajyoti Datta",
"givenName": "Debajyoti",
"surname": "Datta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Virginia,Engineering Systems and Environment,Charlottesville,VA,USA,22901",
"fullName": "Laura E. Barnes",
"givenName": "Laura E.",
"surname": "Barnes",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Virginia,Engineering Systems and Environment,Charlottesville,VA,USA,22901",
"fullName": "Mehdi Boukhechba",
"givenName": "Mehdi",
"surname": "Boukhechba",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "chase",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-12-01T00:00:00",
"pubType": "proceedings",
"pages": "46-57",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3965-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "396500a035",
"articleId": "1AIMKbpCUrm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "396500a058",
"articleId": "1AIMGA1B8Nq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2021/3902/0/09671479",
"title": "An Analysis of COVID-19 Knowledge Graph Construction and Applications",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671479/1A8gYFo843m",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/it/2022/01/09717336",
"title": "iTrace: When IOTA Meets COVID-19 Contact Tracing",
"doi": null,
"abstractUrl": "/magazine/it/2022/01/09717336/1BaW3h0sFLW",
"parentPublication": {
"id": "mags/it",
"title": "IT Professional",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2022/8045/0/10020497",
"title": "The relationship between Twitter sentiment and mobility during the COVID-19 pandemic",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2022/10020497/1KfRuG4QnlK",
"parentPublication": {
"id": "proceedings/big-data/2022/8045/0",
"title": "2022 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2023/03/10058769",
"title": "Observing Human Mobility Internationally During COVID-19",
"doi": null,
"abstractUrl": "/magazine/co/2023/03/10058769/1LdkhyVTx2E",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2020/10/09206411",
"title": "Computer Education in the Age of COVID-19",
"doi": null,
"abstractUrl": "/magazine/co/2020/10/09206411/1npxLw1ejII",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2021/02/09268454",
"title": "Gendered Experiences of Software Engineers During the COVID-19 Crisis",
"doi": null,
"abstractUrl": "/magazine/so/2021/02/09268454/1p1cdN4NYKk",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313281",
"title": "Consumer Demand Modeling During COVID-19 Pandemic",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313281/1qmfYzauzf2",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2020/9171/0/917100a013",
"title": "An Investigation on Online Learning for K12 in Rural Areas in China during COVID-19 Pandemic",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2020/917100a013/1qyxsw2Z7So",
"parentPublication": {
"id": "proceedings/eitt/2020/9171/0",
"title": "2020 Ninth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378028",
"title": "Leveraging Natural Language Processing to Mine Issues on Twitter During the COVID-19 Pandemic",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378028/1s64R4Lte00",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378374",
"title": "Country-wide Mobility Changes Observed Using Mobile Phone Data During COVID-19 Pandemic",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378374/1s64i3CxhOE",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1KpBpkR1qBq",
"title": "2022 3rd International Conference on Education, Knowledge and Information Management (ICEKIM)",
"acronym": "icekim",
"groupId": "1841184",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KpBZv1EgJG",
"doi": "10.1109/ICEKIM55072.2022.00046",
"title": "Effect Perception of Online and Offline Mixed Teaching among College Students during the Epidemic: A Study from the Perspective of Social Distancing",
"normalizedTitle": "Effect Perception of Online and Offline Mixed Teaching among College Students during the Epidemic: A Study from the Perspective of Social Distancing",
"abstract": "The world is currently in the normalization of epidemic prevention. In order to reduce the risk of COVID-19 transmission and maintain social distancing, colleges have deepened the development of online and offline mixed teaching. And that makes such teaching mode become a mainstream. This paper uses structural equation model (SEM) to establish the relationship structure among various influencing factors and online and offline mixed teaching. By the methods of questionnaire surveys and SPSS and AMOS data processing, the results can be concluded that \"social distancing\" has a significant and positive effect on the \"learning environment\". \"Learning environment\" has a significant and positive effect on the \"teaching quality\", \"Students' attitude\" has a significant and positive effect on the \"teaching quality\", and \"Social distancing\" plays a negative role in regulating the relationship between \"students' attitude\" and \"teaching quality\". Finally, the paper puts forward the novel suggestions for college students' online and offline mixed teaching.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The world is currently in the normalization of epidemic prevention. In order to reduce the risk of COVID-19 transmission and maintain social distancing, colleges have deepened the development of online and offline mixed teaching. And that makes such teaching mode become a mainstream. This paper uses structural equation model (SEM) to establish the relationship structure among various influencing factors and online and offline mixed teaching. By the methods of questionnaire surveys and SPSS and AMOS data processing, the results can be concluded that \"social distancing\" has a significant and positive effect on the \"learning environment\". \"Learning environment\" has a significant and positive effect on the \"teaching quality\", \"Students' attitude\" has a significant and positive effect on the \"teaching quality\", and \"Social distancing\" plays a negative role in regulating the relationship between \"students' attitude\" and \"teaching quality\". Finally, the paper puts forward the novel suggestions for college students' online and offline mixed teaching.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The world is currently in the normalization of epidemic prevention. In order to reduce the risk of COVID-19 transmission and maintain social distancing, colleges have deepened the development of online and offline mixed teaching. And that makes such teaching mode become a mainstream. This paper uses structural equation model (SEM) to establish the relationship structure among various influencing factors and online and offline mixed teaching. By the methods of questionnaire surveys and SPSS and AMOS data processing, the results can be concluded that \"social distancing\" has a significant and positive effect on the \"learning environment\". \"Learning environment\" has a significant and positive effect on the \"teaching quality\", \"Students' attitude\" has a significant and positive effect on the \"teaching quality\", and \"Social distancing\" plays a negative role in regulating the relationship between \"students' attitude\" and \"teaching quality\". Finally, the paper puts forward the novel suggestions for college students' online and offline mixed teaching.",
"fno": "166600a176",
"keywords": [
"Blended Learning",
"Diseases",
"Educational Institutions",
"Epidemics",
"Statistical Analysis",
"Teaching",
"AMOS Data Processing",
"College Students",
"COVID 19 Transmission",
"Offline Mixed Teaching",
"Social Distancing",
"SPSS",
"Student Attitude",
"Teaching Mode",
"Teaching Quality",
"COVID 19",
"Epidemics",
"Numerical Analysis",
"Education",
"Human Factors",
"Data Processing",
"Social Factors",
"Online And Offline Mixed Teaching",
"Social Distancing",
"Structural Equation Model"
],
"authors": [
{
"affiliation": "Wuhan University of Technology,School of Automation,Wuhan,China",
"fullName": "HaoHua Xia",
"givenName": "HaoHua",
"surname": "Xia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icekim",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-01-01T00:00:00",
"pubType": "proceedings",
"pages": "176-181",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-1666-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "166600a172",
"articleId": "1KpByWEufLi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "166600a182",
"articleId": "1KpBrpefNQs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/ises/2021/8753/0/875300a349",
"title": "Smart Camera for Enforcing Social Distancing",
"doi": null,
"abstractUrl": "/proceedings-article/ises/2021/875300a349/1APpRZbtyr6",
"parentPublication": {
"id": "proceedings/ises/2021/8753/0",
"title": "2021 IEEE International Symposium on Smart Electronic Systems (iSES)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0/217400a901",
"title": "A Depth Camera-based Warning System Design for Social distancing Detection",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2021/217400a901/1BLntm30oms",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2021/2174/0",
"title": "2021 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icise-ie/2021/3829/0/382900a201",
"title": "Exploration and Practice of Online-Offline Mixed Teaching Mode Based on Internet + Intelligent Technology in Post- epidemic Period : — Take the course of phytogeography as an example",
"doi": null,
"abstractUrl": "/proceedings-article/icise-ie/2021/382900a201/1C8GMP5MvJK",
"parentPublication": {
"id": "proceedings/icise-ie/2021/3829/0",
"title": "2021 2nd International Conference on Information Science and Education (ICISE-IE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csci/2021/5841/0/584100b066",
"title": "Effects of Social Distancing Intention, Affective Risk Perception, and Cabin Fever Syndrome on Perceived Value of E-learning : Type of submission: Late Breaking Paper / Most relevant symposium: CSCI-ISED",
"doi": null,
"abstractUrl": "/proceedings-article/csci/2021/584100b066/1EpLwSMG9H2",
"parentPublication": {
"id": "proceedings/csci/2021/5841/0",
"title": "2021 International Conference on Computational Science and Computational Intelligence (CSCI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iciscet/2022/6044/0/604400a200",
"title": "Research and Practice of Curriculum Teaching Reform in Colleges and Universities in Post-epidemic Era",
"doi": null,
"abstractUrl": "/proceedings-article/iciscet/2022/604400a200/1HbbUUbbgNa",
"parentPublication": {
"id": "proceedings/iciscet/2022/6044/0",
"title": "2022 International Conference on Information System, Computing and Educational Technology (ICISCET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ai4i/2020/8701/0/870100a066",
"title": "A Queue Management Approach for Social Distancing and Contact Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/ai4i/2020/870100a066/1oJ0rfi1zPy",
"parentPublication": {
"id": "proceedings/ai4i/2020/8701/0",
"title": "2020 Third International Conference on Artificial Intelligence for Industries (AI4I)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2020/9171/0/917100a024",
"title": "Analysis of the college students' online learning status and implementation strategies during the epidemic",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2020/917100a024/1qyxpVO8Bkk",
"parentPublication": {
"id": "proceedings/eitt/2020/9171/0",
"title": "2020 Ninth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2021/2663/0/266300a014",
"title": "Research and Practice of Hybrid Mixed Teaching Mode in COVID - 19 Times",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2021/266300a014/1wG5QxdZ72w",
"parentPublication": {
"id": "proceedings/cste/2021/2663/0",
"title": "2021 3rd International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eimss/2021/2707/0/270700a141",
"title": "Research on Online Teaching Quality during the Epidemic Period in Chinese Universities-Based on Student Feedback of English Course in Glasgow College, UESTC",
"doi": null,
"abstractUrl": "/proceedings-article/eimss/2021/270700a141/1yEZRDZFGow",
"parentPublication": {
"id": "proceedings/eimss/2021/2707/0",
"title": "2021 International Conference on Education, Information Management and Service Science (EIMSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2021/3851/0/09637196",
"title": "A Trans-regional Online and Offline Fusion Lab Teaching Practice Through Cross-university Cooperation",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2021/09637196/1zuwmKBB6Fi",
"parentPublication": {
"id": "proceedings/fie/2021/3851/0",
"title": "2021 IEEE Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yBEZe3hqyQ",
"title": "2021 International Conference on Cyberworlds (CW)",
"acronym": "cw",
"groupId": "1000175",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yBF0L6Dd8k",
"doi": "10.1109/CW52790.2021.00038",
"title": "Entering a new Dimension in Virtual Reality Research: An Overview of Existing Toolkits, their Features and Challenges",
"normalizedTitle": "Entering a new Dimension in Virtual Reality Research: An Overview of Existing Toolkits, their Features and Challenges",
"abstract": "Virtual reality becomes a medium to be explored for itself, to study human factors and human behavior within these worlds, and to infer possible behavior in the real world. Among many advantages, building test routines in virtual environments remains a challenge due to the lack of established procedures and toolkits. To encourage research in this direction and lower the barrier to entry, it is necessary to simplify the process of setting up a research environment in virtual reality by providing appropriate toolkits. This paper discusses what challenges need to be overcome, what features might be relevant, and compares available toolkits.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual reality becomes a medium to be explored for itself, to study human factors and human behavior within these worlds, and to infer possible behavior in the real world. Among many advantages, building test routines in virtual environments remains a challenge due to the lack of established procedures and toolkits. To encourage research in this direction and lower the barrier to entry, it is necessary to simplify the process of setting up a research environment in virtual reality by providing appropriate toolkits. This paper discusses what challenges need to be overcome, what features might be relevant, and compares available toolkits.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual reality becomes a medium to be explored for itself, to study human factors and human behavior within these worlds, and to infer possible behavior in the real world. Among many advantages, building test routines in virtual environments remains a challenge due to the lack of established procedures and toolkits. To encourage research in this direction and lower the barrier to entry, it is necessary to simplify the process of setting up a research environment in virtual reality by providing appropriate toolkits. This paper discusses what challenges need to be overcome, what features might be relevant, and compares available toolkits.",
"fno": "406500a180",
"keywords": [
"Human Factors",
"Virtual Reality",
"Appropriate Toolkits",
"Virtual Reality Research",
"Human Factors",
"Human Behavior",
"Buildings",
"Virtual Environments",
"Human Factors",
"Research Tools",
"Virtual Reality",
"Human Factors",
"Human Behavior"
],
"authors": [
{
"affiliation": "Karlsruhe University of Applied Sciences,Faculty of Computer Science and Business Information Systems,Karlsruhe,Germany",
"fullName": "Matthias Wölfel",
"givenName": "Matthias",
"surname": "Wölfel",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Karlsruhe University of Applied Sciences,Faculty of Computer Science and Business Information Systems,Karlsruhe,Germany",
"fullName": "Daniel Hepperle",
"givenName": "Daniel",
"surname": "Hepperle",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Karlsruhe University of Applied Sciences,Faculty of Computer Science and Business Information Systems,Karlsruhe,Germany",
"fullName": "Christian Felix Purps",
"givenName": "Christian Felix",
"surname": "Purps",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Karlsruhe University of Applied Sciences,Faculty of Computer Science and Business Information Systems,Karlsruhe,Germany",
"fullName": "Jonas Deuchler",
"givenName": "Jonas",
"surname": "Deuchler",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Karlsruhe University of Applied Sciences,Faculty of Computer Science and Business Information Systems,Karlsruhe,Germany",
"fullName": "Wladimir Hettmann",
"givenName": "Wladimir",
"surname": "Hettmann",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-09-01T00:00:00",
"pubType": "proceedings",
"pages": "180-187",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4065-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "406500a174",
"articleId": "1yBF4BU6MBq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "406500a188",
"articleId": "1yBF5P8nrXy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccnea/2017/3981/0/3981a143",
"title": "Application Research of Virtual 3D Animation Technology in the Design of Human Computer Interface",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2017/3981a143/12OmNyGbIjP",
"parentPublication": {
"id": "proceedings/iccnea/2017/3981/0",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrais/1995/7084/0/70840028",
"title": "Realizing the full potential of virtual reality: human factors issues that could stand in the way",
"doi": null,
"abstractUrl": "/proceedings-article/vrais/1995/70840028/12OmNylKB6n",
"parentPublication": {
"id": "proceedings/vrais/1995/7084/0",
"title": "Virtual Reality Annual International Symposium",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wetice/2000/0798/0/07980012",
"title": "CVE Technology Development Based on Real World Application and User Needs",
"doi": null,
"abstractUrl": "/proceedings-article/wetice/2000/07980012/12OmNzayNCO",
"parentPublication": {
"id": "proceedings/wetice/2000/0798/0",
"title": "Proceedings IEEE 9th International Workshops on Enabling Technologies: Infrastructure for Collaborative Enterprises (WET ICE 2000)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a845",
"title": "Virtual Reality Sonification Training System Can Improve a Novice's Forehand Return of Serve in Tennis",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a845/1J7Wlv8mvKM",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a141",
"title": "Petting a cat helps you incarnate the avatar: Influence of the emotions over embodiment in VR",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a141/1JrRepqALbW",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798220",
"title": "RetroTracker: Upgrading Existing Virtual Reality Tracking Systems",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798220/1cJ0G4IStqM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998361",
"title": "Presence, Mixed Reality, and Risk-Taking Behavior: A Study in Safety Interventions",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998361/1hpPBmpcsXm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089452",
"title": "Real and Virtual Environment Mismatching Induces Arousal and Alters Movement Behavior",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089452/1jIxcobDHi0",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090623",
"title": "Using Screen Capture Video to Understand Learning in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090623/1jIxukBU3g4",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2020/7463/0/746300a068",
"title": "Verbal Mimicry Predicts Social Distance and Social Attraction to an Outgroup Member in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2020/746300a068/1qpzC44fheg",
"parentPublication": {
"id": "proceedings/aivr/2020/7463/0",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1G9DtzCwrjW",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1G9EkarxxgQ",
"doi": "10.1109/ICME52920.2022.9859870",
"title": "Feature-Guided Blind Face Restoration with GAN Prior",
"normalizedTitle": "Feature-Guided Blind Face Restoration with GAN Prior",
"abstract": "Blind face restoration (BFR) aims to restore high-quality face images from inputs with complex degradation, which is key to extensive applications. Existing methods usually learn a black-box mapping to achieve the goal, which however often produce over-smoothed results. This work proposes a novel feature-guided framework via leveraging prior from a pre-trained Generative Adversarial Network (GAN) model to recover reasonable textures. Furthermore, we design a feature fusion module to guide the generator with low-level spatial content information in the degraded input, for the sake of holding the consistency on both facial structure and background details. The proposed method can be seamlessly integrated with a learned GAN through a simple yet effective principle to recover realistic results under complex degradation circumstances. Extensive comparisons demonstrate the superiority of our strategy over other state-of-the-art methods in terms of restoration quality and training cost.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Blind face restoration (BFR) aims to restore high-quality face images from inputs with complex degradation, which is key to extensive applications. Existing methods usually learn a black-box mapping to achieve the goal, which however often produce over-smoothed results. This work proposes a novel feature-guided framework via leveraging prior from a pre-trained Generative Adversarial Network (GAN) model to recover reasonable textures. Furthermore, we design a feature fusion module to guide the generator with low-level spatial content information in the degraded input, for the sake of holding the consistency on both facial structure and background details. The proposed method can be seamlessly integrated with a learned GAN through a simple yet effective principle to recover realistic results under complex degradation circumstances. Extensive comparisons demonstrate the superiority of our strategy over other state-of-the-art methods in terms of restoration quality and training cost.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Blind face restoration (BFR) aims to restore high-quality face images from inputs with complex degradation, which is key to extensive applications. Existing methods usually learn a black-box mapping to achieve the goal, which however often produce over-smoothed results. This work proposes a novel feature-guided framework via leveraging prior from a pre-trained Generative Adversarial Network (GAN) model to recover reasonable textures. Furthermore, we design a feature fusion module to guide the generator with low-level spatial content information in the degraded input, for the sake of holding the consistency on both facial structure and background details. The proposed method can be seamlessly integrated with a learned GAN through a simple yet effective principle to recover realistic results under complex degradation circumstances. Extensive comparisons demonstrate the superiority of our strategy over other state-of-the-art methods in terms of restoration quality and training cost.",
"fno": "09859870",
"keywords": [
"Face Recognition",
"Feature Extraction",
"Image Representation",
"Image Restoration",
"Learning Artificial Intelligence",
"Feature Guided Blind Face Restoration",
"GAN Prior",
"High Quality Face Images",
"Black Box Mapping",
"Novel Feature Guided Framework",
"Pre Trained Generative Adversarial Network Model",
"Feature Fusion Module",
"Low Level Spatial Content Information",
"Degraded Input",
"Facial Structure",
"Background Details",
"Learned GAN",
"Complex Degradation Circumstances",
"Restoration Quality",
"Training Cost",
"Degradation",
"Training",
"Costs",
"Generative Adversarial Networks",
"Generators",
"Image Restoration",
"Faces",
"Blind Face Restoration",
"Feature Guidance",
"Feature Fusion",
"GAN Prior"
],
"authors": [
{
"affiliation": "College of Intelligence and Computing, Tianjin University,Tianjin,China",
"fullName": "Zhengzhang Hou",
"givenName": "Zhengzhang",
"surname": "Hou",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligence and Computing, Tianjin University,Tianjin,China",
"fullName": "Liang Li",
"givenName": "Liang",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "College of Intelligence and Computing, Tianjin University,Tianjin,China",
"fullName": "Xiaojie Guo",
"givenName": "Xiaojie",
"surname": "Guo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8563-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09859746",
"articleId": "1G9DO06Irn2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09859906",
"articleId": "1G9DNDucpGg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2011/1101/0/0459",
"title": "Close the loop: Joint blind image restoration and recognition with sparse representation prior",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2011/0459/12OmNrJRPmh",
"parentPublication": {
"id": "proceedings/iccv/2011/1101/0",
"title": "2011 IEEE International Conference on Computer Vision (ICCV 2011)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/navcomp/2014/1899/0/1899a018",
"title": "Underwater Single Image Restoration Using Dark Channel Prior",
"doi": null,
"abstractUrl": "/proceedings-article/navcomp/2014/1899a018/12OmNsdo6v1",
"parentPublication": {
"id": "proceedings/navcomp/2014/1899/0",
"title": "2014 Symposium on Automation and Computation for Naval, Offshore and Subsea (NAVCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2022/8739/0/873900a666",
"title": "Unpaired Face Restoration via Learnable Cross-Quality Shift",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2022/873900a666/1G56Pg7qwSc",
"parentPublication": {
"id": "proceedings/cvprw/2022/8739/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/05/09921338",
"title": "Learning Dual Memory Dictionaries for Blind Face Restoration",
"doi": null,
"abstractUrl": "/journal/tp/2023/05/09921338/1HxShKI7cEo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2021/07/08968618",
"title": "Physics-Based Generative Adversarial Models for Image Restoration and Beyond",
"doi": null,
"abstractUrl": "/journal/tp/2021/07/08968618/1gQYv5StsRy",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a024",
"title": "Dual-Attention GAN for Large-Pose Face Frontalization",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a024/1kecHPwIBLa",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2020/3079/0/307900a085",
"title": "ATFaceGAN: Single Face Image Restoration and Recognition from Atmospheric Turbulence",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2020/307900a085/1kecHT7g7JK",
"parentPublication": {
"id": "proceedings/fg/2020/3079/0/",
"title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/11/09547753",
"title": "Exploiting Deep Generative Prior for Versatile Image Restoration and Manipulation",
"doi": null,
"abstractUrl": "/journal/tp/2022/11/09547753/1x9Tvtcuj5e",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2021/4899/0/489900a294",
"title": "Toward Interactive Modulation for Photo-Realistic Image Restoration",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2021/489900a294/1yVzVvE7Ipq",
"parentPublication": {
"id": "proceedings/cvprw/2021/4899/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900a672",
"title": "GAN Prior Embedded Network for Blind Face Restoration in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900a672/1yeIXi2vdh6",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1H1gVMlkl32",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1H1kORDwthu",
"doi": "10.1109/CVPR52688.2022.01099",
"title": "DO-GAN: A Double Oracle Framework for Generative Adversarial Networks",
"normalizedTitle": "DO-GAN: A Double Oracle Framework for Generative Adversarial Networks",
"abstract": "In this paper, we propose a new approach to train Gen-erative Adversarial Networks (GANs) where we deploy a double-oracle framework using the generator and discrim-inator oracles. GAN is essentially a two-player zero-sum game between the generator and the discriminator. Training GANs is challenging as a pure Nash equilibrium may not exist and even finding the mixed Nash equilibrium is difficult as GANs have a large-scale strategy space. In DO-GAN, we extend the double oracle framework to GANs. We first generalize the players' strategies as the trained models of generator and discriminator from the best response or-acles. We then compute the meta-strategies using a linear program. For scalability of the framework where multi-ple generators and discriminator best responses are stored in the memory, we propose two solutions: 1) pruning the weakly-dominated players' strategies to keep the oracles from becoming intractable; 2) applying continual learning to retain the previous knowledge of the networks. We apply our framework to established GAN architectures such as vanilla GAN, Deep Convolutional GAN, Spectral Normalization GAN and Stacked GAN. Finally, we conduct experiments on MNIST, CIFAR-10 and CelebA datasets and show that DO-GAN variants have significant improvements in both subjective qualitative evaluation and quantitative metrics, compared with their respective GAN architectures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we propose a new approach to train Gen-erative Adversarial Networks (GANs) where we deploy a double-oracle framework using the generator and discrim-inator oracles. GAN is essentially a two-player zero-sum game between the generator and the discriminator. Training GANs is challenging as a pure Nash equilibrium may not exist and even finding the mixed Nash equilibrium is difficult as GANs have a large-scale strategy space. In DO-GAN, we extend the double oracle framework to GANs. We first generalize the players' strategies as the trained models of generator and discriminator from the best response or-acles. We then compute the meta-strategies using a linear program. For scalability of the framework where multi-ple generators and discriminator best responses are stored in the memory, we propose two solutions: 1) pruning the weakly-dominated players' strategies to keep the oracles from becoming intractable; 2) applying continual learning to retain the previous knowledge of the networks. We apply our framework to established GAN architectures such as vanilla GAN, Deep Convolutional GAN, Spectral Normalization GAN and Stacked GAN. Finally, we conduct experiments on MNIST, CIFAR-10 and CelebA datasets and show that DO-GAN variants have significant improvements in both subjective qualitative evaluation and quantitative metrics, compared with their respective GAN architectures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we propose a new approach to train Gen-erative Adversarial Networks (GANs) where we deploy a double-oracle framework using the generator and discrim-inator oracles. GAN is essentially a two-player zero-sum game between the generator and the discriminator. Training GANs is challenging as a pure Nash equilibrium may not exist and even finding the mixed Nash equilibrium is difficult as GANs have a large-scale strategy space. In DO-GAN, we extend the double oracle framework to GANs. We first generalize the players' strategies as the trained models of generator and discriminator from the best response or-acles. We then compute the meta-strategies using a linear program. For scalability of the framework where multi-ple generators and discriminator best responses are stored in the memory, we propose two solutions: 1) pruning the weakly-dominated players' strategies to keep the oracles from becoming intractable; 2) applying continual learning to retain the previous knowledge of the networks. We apply our framework to established GAN architectures such as vanilla GAN, Deep Convolutional GAN, Spectral Normalization GAN and Stacked GAN. Finally, we conduct experiments on MNIST, CIFAR-10 and CelebA datasets and show that DO-GAN variants have significant improvements in both subjective qualitative evaluation and quantitative metrics, compared with their respective GAN architectures.",
"fno": "694600l1265",
"keywords": [
"Convolutional Neural Nets",
"Deep Learning Artificial Intelligence",
"Face Recognition",
"Game Theory",
"Linear Programming",
"Two Player Zero Sum Game",
"Pure Nash Equilibrium",
"Mixed Nash Equilibrium",
"Large Scale Strategy Space",
"Double Oracle Framework",
"Vanilla GAN",
"Deep Convolutional GAN",
"Spectral Normalization GAN",
"Stacked GAN",
"DO GAN Variants",
"Generative Adversarial Networks",
"Double Oracle Framework",
"Discriminator Oracles",
"Generator Oracles",
"Linear Program",
"Weakly Dominated Player Strategies",
"Continual Learning",
"MNIST Dataset",
"CIFAR 10 Dataset",
"Celeb A Dataset",
"Measurement",
"Training",
"Scalability",
"Optimization Methods",
"Computer Architecture",
"Games",
"Generative Adversarial Networks"
],
"authors": [
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore",
"fullName": "Aye Phyu Phyu Aung",
"givenName": "Aye Phyu",
"surname": "Phyu Aung",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore",
"fullName": "Xinrun Wang",
"givenName": "Xinrun",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore",
"fullName": "Runsheng Yu",
"givenName": "Runsheng",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore",
"fullName": "Bo An",
"givenName": "Bo",
"surname": "An",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute for Infocomm Research, A *STAR,Singapore",
"fullName": "Senthilnath Jayavelu",
"givenName": "Senthilnath",
"surname": "Jayavelu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University,School of Computer Science and Engineering,Singapore",
"fullName": "Xiaoli Li",
"givenName": "Xiaoli",
"surname": "Li",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-06-01T00:00:00",
"pubType": "proceedings",
"pages": "11265-11274",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6946-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1H1kOEzYuju",
"name": "pcvpr202269460-09878743s1-mm_694600l1265.zip",
"size": "13.6 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878743s1-mm_694600l1265.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "694600l1255",
"articleId": "1H1jst5lvJm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "694600l1275",
"articleId": "1H1iMYhIV7W",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/spw/2018/8276/0/634901a089",
"title": "Detecting Deceptive Reviews Using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/spw/2018/634901a089/12UTFwbJKOG",
"parentPublication": {
"id": "proceedings/spw/2018/8276/0",
"title": "2018 IEEE Security and Privacy Workshops (SPW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i513",
"title": "Multi-agent Diverse Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i513/17D45WXIkDV",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000j455",
"title": "ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000j455/17D45XacGkf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a821",
"title": "FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a821/17D45Xh13pk",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/10012320",
"title": "RDP-GAN: A Rényi-Differential Privacy Based Generative Adversarial Network",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/10012320/1JNmQL4l7lm",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a178",
"title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093525",
"title": "FX-GAN: Self-Supervised GAN Learning via Feature Exchange",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093525/1jPbxvOsk6s",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2020/1331/0/09102779",
"title": "A Multi-Player Minimax Game for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2020/09102779/1kwr6BsKnRu",
"parentPublication": {
"id": "proceedings/icme/2020/1331/0",
"title": "2020 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800h796",
"title": "MSG-GAN: Multi-Scale Gradients for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800h796/1m3oneHfgTS",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2020/7168/0/716800i382",
"title": "On Positive-Unlabeled Classification in GAN",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2020/716800i382/1m3or4aYmUo",
"parentPublication": {
"id": "proceedings/cvpr/2020/7168/0",
"title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1gyr6w5YIIU",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1gyriUTXkcM",
"doi": "10.1109/CVPR.2019.01174",
"title": "R²GAN: Cross-Modal Recipe Retrieval With Generative Adversarial Network",
"normalizedTitle": "R²GAN: Cross-Modal Recipe Retrieval With Generative Adversarial Network",
"abstract": "Representing procedure text such as recipe for crossmodal retrieval is inherently a difficult problem, not mentioning to generate image from recipe for visualization. This paper studies a new version of GAN, named Recipe Retrieval Generative Adversarial Network (R2GAN), to explore the feasibility of generating image from procedure text for retrieval problem. The motivation of using GAN is twofold: learning compatible cross-modal features in an adversarial way, and explanation of search results by showing the images generated from recipes. The novelty of R2GAN comes from architecture design, specifically a GAN with one generator and dual discriminators is used, which makes the generation of image from recipe a feasible idea. Furthermore, empowered by the generated images, a two-level ranking loss in both embedding and image spaces are considered. These add-ons not only result in excellent retrieval performance, but also generate close-to-realistic food images useful for explaining ranking of recipes. On recipe1M dataset, R2GAN demonstrates high scalability to data size, outperforms all the existing approaches, and generates images intuitive for human to interpret the search results.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Representing procedure text such as recipe for crossmodal retrieval is inherently a difficult problem, not mentioning to generate image from recipe for visualization. This paper studies a new version of GAN, named Recipe Retrieval Generative Adversarial Network (R2GAN), to explore the feasibility of generating image from procedure text for retrieval problem. The motivation of using GAN is twofold: learning compatible cross-modal features in an adversarial way, and explanation of search results by showing the images generated from recipes. The novelty of R2GAN comes from architecture design, specifically a GAN with one generator and dual discriminators is used, which makes the generation of image from recipe a feasible idea. Furthermore, empowered by the generated images, a two-level ranking loss in both embedding and image spaces are considered. These add-ons not only result in excellent retrieval performance, but also generate close-to-realistic food images useful for explaining ranking of recipes. On recipe1M dataset, R2GAN demonstrates high scalability to data size, outperforms all the existing approaches, and generates images intuitive for human to interpret the search results.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Representing procedure text such as recipe for crossmodal retrieval is inherently a difficult problem, not mentioning to generate image from recipe for visualization. This paper studies a new version of GAN, named Recipe Retrieval Generative Adversarial Network (R2GAN), to explore the feasibility of generating image from procedure text for retrieval problem. The motivation of using GAN is twofold: learning compatible cross-modal features in an adversarial way, and explanation of search results by showing the images generated from recipes. The novelty of R2GAN comes from architecture design, specifically a GAN with one generator and dual discriminators is used, which makes the generation of image from recipe a feasible idea. Furthermore, empowered by the generated images, a two-level ranking loss in both embedding and image spaces are considered. These add-ons not only result in excellent retrieval performance, but also generate close-to-realistic food images useful for explaining ranking of recipes. On recipe1M dataset, R2GAN demonstrates high scalability to data size, outperforms all the existing approaches, and generates images intuitive for human to interpret the search results.",
"fno": "329300l1469",
"keywords": [
"Feature Extraction",
"Information Retrieval",
"Learning Artificial Intelligence",
"Neural Nets",
"Text Analysis",
"Recipe 1 M Dataset",
"R 2 GAN",
"2 GAN",
"Procedure Text",
"Crossmodal Retrieval",
"Image Generation",
"Retrieval Problem",
"Cross Modal Features",
"Close To Realistic Food Images",
"Recipe Retrieval Generative Adversarial Network",
"Visualization",
"Image Resolution",
"Image Synthesis",
"Scalability",
"Network Architecture",
"Generative Adversarial Networks",
"Robustness",
"Recognition Detection",
"Categorization",
"Retrieval",
"Image And Video Synthesis",
"Representation Learning",
"Vision Language"
],
"authors": [
{
"affiliation": "City Univ. of Hong Kong",
"fullName": "Bin Zhu",
"givenName": "Bin",
"surname": "Zhu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City Univ. of HongKong",
"fullName": "Chong-Wah Ngo",
"givenName": "Chong-Wah",
"surname": "Ngo",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City Univ. of Hong Kong",
"fullName": "Jingjing Chen",
"givenName": "Jingjing",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "City Univ. of Hong Kong",
"fullName": "Yanbin Hao",
"givenName": "Yanbin",
"surname": "Hao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-06-01T00:00:00",
"pubType": "proceedings",
"pages": "11469-11478",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3293-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "329300l1459",
"articleId": "1gyrEAHR4Vq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "329300l1479",
"articleId": "1gys9dHa3VC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iccv/2021/2812/0/281200o4041",
"title": "Omni-GAN: On the Secrets of cGANs and Beyond",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200o4041/1BmLcpOiAXS",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iwecai/2022/7997/0/799700a281",
"title": "Mask Removal Algorithm Using GAN Based Models",
"doi": null,
"abstractUrl": "/proceedings-article/iwecai/2022/799700a281/1CugwO4Xbfa",
"parentPublication": {
"id": "proceedings/iwecai/2022/7997/0",
"title": "2022 3rd International Conference on Electronic Communication and Artificial Intelligence (IWECAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09793670",
"title": "Learning Structural Representations for Recipe Generation and Food Retrieval",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09793670/1E5LBXy2kOA",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdh/2022/5478/0/547800a050",
"title": "VCL-GAN: A Variational Contrastive Learning Generative Adversarial Network for Image Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/icdh/2022/547800a050/1JeDs4zZhqE",
"parentPublication": {
"id": "proceedings/icdh/2022/5478/0",
"title": "2022 9th International Conference on Digital Home (ICDH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a178",
"title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09440692",
"title": "SymReg-GAN: Symmetric Image Registration With Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09440692/1tTp7JF3Mty",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412202",
"title": "DFH-GAN: A Deep Face Hashing with Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412202/1tmjLtNfwlO",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2021/0477/0/047700d188",
"title": "LT-GAN: Self-Supervised GAN with Latent Transformation Detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2021/047700d188/1uqGqafzEOI",
"parentPublication": {
"id": "proceedings/wacv/2021/0477/0",
"title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/sc/2022/06/09495171",
"title": "Learning TFIDF Enhanced Joint Embedding for Recipe-Image Cross-Modal Retrieval Service",
"doi": null,
"abstractUrl": "/journal/sc/2022/06/09495171/1vyje4tbQtO",
"parentPublication": {
"id": "trans/sc",
"title": "IEEE Transactions on Services Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5470",
"title": "Revamping Cross-Modal Recipe Retrieval with Hierarchical Transformers and Self-supervised Learning",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5470/1yeLl4npFD2",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pP3sSVh3BS",
"title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)",
"acronym": "ictai",
"groupId": "1000763",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pP3td7Dhuw",
"doi": "10.1109/ICTAI50040.2020.00144",
"title": "VC-GAN: Classifying Vessel Types by Maritime Trajectories using Generative Adversarial Networks",
"normalizedTitle": "VC-GAN: Classifying Vessel Types by Maritime Trajectories using Generative Adversarial Networks",
"abstract": "As maritime transport is the backbone of global trade, it is important to ensure the safety and security for sea transportation effectively. However, the dependence on experienced human operators for maritime surveillance does not scale in terms of coverage. While the ship information and trajectory data from the Automatic Identification System (AIS) can be used to automate maritime surveillance, the AIS data may be modified deliberately or accidentally, resulting in difficulties in the detection of illegal maritime activities. We have developed VC-GAN for Vessel Classification using Generative Adversarial Networks to identify vessel types based solely on the vessel trajectories in areas of interest. Our VC-GAN framework adversarially trains a multi-class classifier by learning from the labelled AIS data to classify the types of the vessels of interest, as well as to detect the out-of-class vessels. We evaluated the proposed VC-GAN method on two maritime datasets in Europe and Southeast Asia. The experimental results showed that VC-GAN significantly outperformed other vessel classification methods, especially in detecting out-of-class vessels.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As maritime transport is the backbone of global trade, it is important to ensure the safety and security for sea transportation effectively. However, the dependence on experienced human operators for maritime surveillance does not scale in terms of coverage. While the ship information and trajectory data from the Automatic Identification System (AIS) can be used to automate maritime surveillance, the AIS data may be modified deliberately or accidentally, resulting in difficulties in the detection of illegal maritime activities. We have developed VC-GAN for Vessel Classification using Generative Adversarial Networks to identify vessel types based solely on the vessel trajectories in areas of interest. Our VC-GAN framework adversarially trains a multi-class classifier by learning from the labelled AIS data to classify the types of the vessels of interest, as well as to detect the out-of-class vessels. We evaluated the proposed VC-GAN method on two maritime datasets in Europe and Southeast Asia. The experimental results showed that VC-GAN significantly outperformed other vessel classification methods, especially in detecting out-of-class vessels.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As maritime transport is the backbone of global trade, it is important to ensure the safety and security for sea transportation effectively. However, the dependence on experienced human operators for maritime surveillance does not scale in terms of coverage. While the ship information and trajectory data from the Automatic Identification System (AIS) can be used to automate maritime surveillance, the AIS data may be modified deliberately or accidentally, resulting in difficulties in the detection of illegal maritime activities. We have developed VC-GAN for Vessel Classification using Generative Adversarial Networks to identify vessel types based solely on the vessel trajectories in areas of interest. Our VC-GAN framework adversarially trains a multi-class classifier by learning from the labelled AIS data to classify the types of the vessels of interest, as well as to detect the out-of-class vessels. We evaluated the proposed VC-GAN method on two maritime datasets in Europe and Southeast Asia. The experimental results showed that VC-GAN significantly outperformed other vessel classification methods, especially in detecting out-of-class vessels.",
"fno": "922800a923",
"keywords": [
"Image Classification",
"Learning Artificial Intelligence",
"Marine Engineering",
"Marine Safety",
"Neural Nets",
"Ships",
"Vessel Types",
"Maritime Trajectories",
"Generative Adversarial Networks",
"Maritime Transport",
"Global Trade",
"Sea Transportation",
"Experienced Human Operators",
"Maritime Surveillance",
"Ship Information",
"Trajectory Data",
"Automatic Identification System",
"Illegal Maritime Activities",
"Vessel Trajectories",
"VC GAN Framework",
"Multiclass Classifier",
"Labelled AIS Data",
"Out Of Class Vessels",
"VC GAN Method",
"Maritime Datasets",
"Vessel Classification Methods",
"Surveillance",
"Transportation",
"Tools",
"Generative Adversarial Networks",
"Trajectory",
"Security",
"Artificial Intelligence",
"Generative Adversarial Networks",
"Maritime Trajectories",
"Vessel Type Classification"
],
"authors": [
{
"affiliation": "Institute of Data Science, National University of Singapore,Singapore",
"fullName": "Dan Li",
"givenName": "Dan",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Data Science, National University of Singapore,Singapore",
"fullName": "Hang Liu",
"givenName": "Hang",
"surname": "Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Computing Institute of Data Science, National University of Singapore,Singapore",
"fullName": "See-Kiong Ng",
"givenName": "See-Kiong",
"surname": "Ng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ictai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-11-01T00:00:00",
"pubType": "proceedings",
"pages": "923-928",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9228-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "922800a917",
"articleId": "1pP3vMvGpQA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "922800a929",
"articleId": "1pP3BLWWvMQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iciii/2009/3876/3/3876c611",
"title": "Vessel Real-Time Monitoring System Based on AIS Temporal Database",
"doi": null,
"abstractUrl": "/proceedings-article/iciii/2009/3876c611/12OmNC8uRmw",
"parentPublication": {
"id": "proceedings/iciii/2009/3876/3",
"title": "International Conference on Information Management, Innovation Management and Industrial Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acsat/2013/2758/0/2758a213",
"title": "Anomaly Detection in Vessel Tracking Using Support Vector Machines (SVMs)",
"doi": null,
"abstractUrl": "/proceedings-article/acsat/2013/2758a213/12OmNCf1Du6",
"parentPublication": {
"id": "proceedings/acsat/2013/2758/0",
"title": "2013 International Conference on Advanced Computer Science Applications and Technologies (ACSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2017/4662/0/08388628",
"title": "Detecting maneuvers in maritime observation data with CUSUM",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2017/08388628/12OmNrAMESy",
"parentPublication": {
"id": "proceedings/isspit/2017/4662/0",
"title": "2017 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363866",
"title": "Contextual verification for false alarm reduction in maritime anomaly detection",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363866/12OmNroijkL",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363894",
"title": "A document-based data model for large scale computational maritime situational awareness",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363894/12OmNwp74vN",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363883",
"title": "Maritime situation analysis framework: Vessel interaction classification and anomaly detection",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363883/12OmNyr8YyU",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0/751800a981",
"title": "Countering Real-Time Stream Poisoning: An Architecture for Detecting Vessel Spoofing in Streams of AIS Data",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2018/751800a981/17D45X2fUFO",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2018/7518/0",
"title": "2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing, 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2021/1762/0/176200a314",
"title": "Intelligent Data-Driven Vessel Trajectory Prediction in Marine Transportation Cyber-Physical System",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2021/176200a314/1AIMvpskXhS",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2021/1762/0",
"title": "2021 IEEE International Conferences on Internet of Things (iThings) and IEEE Green Computing & Communications (GreenCom) and IEEE Cyber, Physical & Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2022/6297/0/09927993",
"title": "Edge Computing-Enabled Multi-Sensor Data Fusion for Intelligent Surveillance in Maritime Transportation Systems",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-cbdcom-cyberscitech/2022/09927993/1J4CoFJlsas",
"parentPublication": {
"id": "proceedings/dasc-picom-cbdcom-cyberscitech/2022/6297/0",
"title": "2022 IEEE Intl Conf on Dependable, Autonomic and Secure Computing, Intl Conf on Pervasive Intelligence and Computing, Intl Conf on Cloud and Big Data Computing, Intl Conf on Cyber Science and Technology Congress (DASC/PiCom/CBDCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2021/3396/0/09663739",
"title": "A comprehensive maritime benchmark dataset for detection, tracking and threat recognition",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2021/09663739/1zUZ84uqIdq",
"parentPublication": {
"id": "proceedings/avss/2021/3396/0",
"title": "2021 17th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1uiluGq0Oo8",
"title": "2021 IEEE International Conference on Multimedia and Expo (ICME)",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1uim9HTyCSk",
"doi": "10.1109/ICME51207.2021.9428140",
"title": "CI-GAN : Co-Clustering By Information Maximizing Generative Adversarial Networks",
"normalizedTitle": "CI-GAN : Co-Clustering By Information Maximizing Generative Adversarial Networks",
"abstract": "Simultaneously clustering rows and columns of a matrix, co-clustering can exploit the complex relationships between two different domains and identify groups of distinct nature. In this work, we introduce CI-GAN, a novel GAN-based approach for co-clustering. The model exploits two distinct GAN that cluster each domain independently and combines them intelligently by maximizing the mutual information between the input data and the generated co-clusters. From the experiments constructed with image, audio, and text datasets, it is found that such a systematic way of sharing information between the networks can improve co-clustering performance substantially; when CI-GAN is compared against five standard algorithms, it consistently reports the highest accuracy on both synthetic and real datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Simultaneously clustering rows and columns of a matrix, co-clustering can exploit the complex relationships between two different domains and identify groups of distinct nature. In this work, we introduce CI-GAN, a novel GAN-based approach for co-clustering. The model exploits two distinct GAN that cluster each domain independently and combines them intelligently by maximizing the mutual information between the input data and the generated co-clusters. From the experiments constructed with image, audio, and text datasets, it is found that such a systematic way of sharing information between the networks can improve co-clustering performance substantially; when CI-GAN is compared against five standard algorithms, it consistently reports the highest accuracy on both synthetic and real datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Simultaneously clustering rows and columns of a matrix, co-clustering can exploit the complex relationships between two different domains and identify groups of distinct nature. In this work, we introduce CI-GAN, a novel GAN-based approach for co-clustering. The model exploits two distinct GAN that cluster each domain independently and combines them intelligently by maximizing the mutual information between the input data and the generated co-clusters. From the experiments constructed with image, audio, and text datasets, it is found that such a systematic way of sharing information between the networks can improve co-clustering performance substantially; when CI-GAN is compared against five standard algorithms, it consistently reports the highest accuracy on both synthetic and real datasets.",
"fno": "09428140",
"keywords": [
"Gallium Compounds",
"Pattern Clustering",
"Text Analysis",
"CI GAN",
"Novel GAN Based Approach",
"Distinct GAN",
"Cluster Each Domain",
"Mutual Information",
"Generated Co Clusters",
"Co Clustering Performance",
"Information Maximizing Generative Adversarial Networks",
"Distinct Nature",
"Systematics",
"Conferences",
"Tools",
"Generative Adversarial Networks",
"Data Models",
"Mutual Information",
"Standards",
"Co Clustering",
"Generative Modeling"
],
"authors": [
{
"affiliation": "University of Waterloo,David R. Cheriton School of Computer Science",
"fullName": "Jaejun Lee",
"givenName": "Jaejun",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Research America",
"fullName": "Hyun Chul Lee",
"givenName": "Hyun Chul",
"surname": "Lee",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Samsung Research America",
"fullName": "Tomasz Palczewski",
"givenName": "Tomasz",
"surname": "Palczewski",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-3864-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09428211",
"articleId": "1uilWILMawE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09428133",
"articleId": "1uimdG3tbva",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2022/6946/0/694600l1265",
"title": "DO-GAN: A Double Oracle Framework for Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2022/694600l1265/1H1kORDwthu",
"parentPublication": {
"id": "proceedings/cvpr/2022/6946/0",
"title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/5555/01/09965611",
"title": "BL-GAN: Semi-Supervised Bug Localization Via Generative Adversarial Network",
"doi": null,
"abstractUrl": "/journal/tk/5555/01/09965611/1IHMMPUiMnu",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ipdps/2019/1246/0/124600a866",
"title": "MD-GAN: Multi-Discriminator Generative Adversarial Networks for Distributed Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/ipdps/2019/124600a866/1cYhReGG1YA",
"parentPublication": {
"id": "proceedings/ipdps/2019/1246/0",
"title": "2019 IEEE International Parallel and Distributed Processing Symposium (IPDPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/03/09187695",
"title": "DE-GAN: A Conditional Generative Adversarial Network for Document Enhancement",
"doi": null,
"abstractUrl": "/journal/tp/2022/03/09187695/1mVFmWt3gGs",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2020/9228/0/922800a923",
"title": "VC-GAN: Classifying Vessel Types by Maritime Trajectories using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2020/922800a923/1pP3td7Dhuw",
"parentPublication": {
"id": "proceedings/ictai/2020/9228/0",
"title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2020/9228/0/922800a545",
"title": "PHC-GAN: Physical Constraint Generative Adversarial Network for Single Image Dehazing",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2020/922800a545/1pP3unJfNjG",
"parentPublication": {
"id": "proceedings/ictai/2020/9228/0",
"title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09440692",
"title": "SymReg-GAN: Symmetric Image Registration With Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09440692/1tTp7JF3Mty",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09411979",
"title": "MBD-GAN: Model-based image deblurring with a generative adversarial network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09411979/1tmjO23bJzW",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2021/4121/0/412100a079",
"title": "Ret-GAN: Retinal Image Enhancement using Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2021/412100a079/1vb8Vz0ZV0A",
"parentPublication": {
"id": "proceedings/cbms/2021/4121/0",
"title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccea/2021/2616/0/261600a352",
"title": "Implement Music Generation with GAN: A Systematic Review",
"doi": null,
"abstractUrl": "/proceedings-article/iccea/2021/261600a352/1y4ouUXQ1a0",
"parentPublication": {
"id": "proceedings/iccea/2021/2616/0",
"title": "2021 International Conference on Computer Engineering and Application (ICCEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1vb8N7rSv0A",
"title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)",
"acronym": "cbms",
"groupId": "1000153",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1vb8Vz0ZV0A",
"doi": "10.1109/CBMS52027.2021.00082",
"title": "Ret-GAN: Retinal Image Enhancement using Generative Adversarial Networks",
"normalizedTitle": "Ret-GAN: Retinal Image Enhancement using Generative Adversarial Networks",
"abstract": "With over 200K cases in the U.S. alone, retinal disorders are the most common cause of irreversible blindness. This serves as a primary aim to analyze automated screening tools to detect retinal disorders. We analyze the OCT dataset (84, 484 images) and enhance the images by using Generative Adversarial Networks (GANs). This work specifically focuses on enhancing the quality of source (training) images for better algorithm validatiorr/testing<sup>1</sup><sup>1</sup>Authors contributed equally to the work.. We synthesize super resolution-based images using generators, discriminators and the adversarial nature of the GANs. The performance of the Ret-GAN is validated by PSNR, SSIM, and loss functions. To test the Ret-GAN generated images, we train a convolutional neural network (CNN) with the original dataset images and super-resolution images. We achieve an accuracy of 0.9825 on Ret-GAN generated image data, and 0.9525 on the original data. We statistically analyze the CNN with a number of evaluation metrics to further validate the results. The proposed scheme is compared to benchmark research findings on the same dataset. Our results are encouraging.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With over 200K cases in the U.S. alone, retinal disorders are the most common cause of irreversible blindness. This serves as a primary aim to analyze automated screening tools to detect retinal disorders. We analyze the OCT dataset (84, 484 images) and enhance the images by using Generative Adversarial Networks (GANs). This work specifically focuses on enhancing the quality of source (training) images for better algorithm validatiorr/testing<sup>1</sup><sup>1</sup>Authors contributed equally to the work.. We synthesize super resolution-based images using generators, discriminators and the adversarial nature of the GANs. The performance of the Ret-GAN is validated by PSNR, SSIM, and loss functions. To test the Ret-GAN generated images, we train a convolutional neural network (CNN) with the original dataset images and super-resolution images. We achieve an accuracy of 0.9825 on Ret-GAN generated image data, and 0.9525 on the original data. We statistically analyze the CNN with a number of evaluation metrics to further validate the results. The proposed scheme is compared to benchmark research findings on the same dataset. Our results are encouraging.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With over 200K cases in the U.S. alone, retinal disorders are the most common cause of irreversible blindness. This serves as a primary aim to analyze automated screening tools to detect retinal disorders. We analyze the OCT dataset (84, 484 images) and enhance the images by using Generative Adversarial Networks (GANs). This work specifically focuses on enhancing the quality of source (training) images for better algorithm validatiorr/testing11Authors contributed equally to the work.. We synthesize super resolution-based images using generators, discriminators and the adversarial nature of the GANs. The performance of the Ret-GAN is validated by PSNR, SSIM, and loss functions. To test the Ret-GAN generated images, we train a convolutional neural network (CNN) with the original dataset images and super-resolution images. We achieve an accuracy of 0.9825 on Ret-GAN generated image data, and 0.9525 on the original data. We statistically analyze the CNN with a number of evaluation metrics to further validate the results. The proposed scheme is compared to benchmark research findings on the same dataset. Our results are encouraging.",
"fno": "412100a079",
"keywords": [
"Biomedical Optical Imaging",
"Convolutional Neural Nets",
"Eye",
"Image Enhancement",
"Image Resolution",
"Medical Image Processing",
"Optical Tomography",
"CNN",
"Superresolution Based Images",
"Image Data",
"Superresolution Images",
"Dataset Images",
"Convolutional Neural Network",
"Adversarial Nature",
"Source Images",
"Retinal Disorders",
"Generative Adversarial Networks",
"Retinal Image Enhancement",
"Ret GAN",
"Temperature 200 0 K",
"Training",
"Measurement",
"Superresolution",
"Tools",
"Retina",
"Generative Adversarial Networks",
"Generators",
"Ret GAN",
"Retinal Disorders",
"Generative Adversarial Networks"
],
"authors": [
{
"affiliation": "KC's PAMI Research Lab - Computer Science, University of South Dakota,Vermillion,SD,USA,57069",
"fullName": "K.C. Santosh",
"givenName": "K.C.",
"surname": "Santosh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KIIT University,Department of Electronics Engineering,Bhubaneswar,India",
"fullName": "Sourodip Ghosh",
"givenName": "Sourodip",
"surname": "Ghosh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "KIIT University,Department of Electronics Engineering,Bhubaneswar,India",
"fullName": "Moinak Bose",
"givenName": "Moinak",
"surname": "Bose",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cbms",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "79-84",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4121-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "412100a074",
"articleId": "1vb8UDb5mNy",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "412100a085",
"articleId": "1vb8VLHyorm",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2018/3788/0/08545881",
"title": "MMGAN: Manifold-Matching Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2018/08545881/17D45WHONmN",
"parentPublication": {
"id": "proceedings/icpr/2018/3788/0",
"title": "2018 24th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000j455",
"title": "ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000j455/17D45XacGkf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2023/01/09767588",
"title": "PlausMal-GAN: Plausible Malware Training Based on Generative Adversarial Networks for Analogous Zero-Day Malware Detection",
"doi": null,
"abstractUrl": "/journal/ec/2023/01/09767588/1D4SjLGhJT2",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2019/0990/0/08909900",
"title": "Image and Video Super Resolution using Recurrent Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2019/08909900/1febJclnLyM",
"parentPublication": {
"id": "proceedings/avss/2019/0990/0",
"title": "2019 16th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a178",
"title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2022/09/09440692",
"title": "SymReg-GAN: Symmetric Image Registration With Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/journal/tp/2022/09/09440692/1tTp7JF3Mty",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412331",
"title": "Thermal Image Enhancement using Generative Adversarial Network for Pedestrian Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412331/1tmiz5DG7Qc",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412202",
"title": "DFH-GAN: A Deep Face Hashing with Generative Adversarial Network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412202/1tmjLtNfwlO",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09411979",
"title": "MBD-GAN: Model-based image deblurring with a generative adversarial network",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09411979/1tmjO23bJzW",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2021/0191/0/019100d228",
"title": "VTGAN: Semi-supervised Retinal Image Synthesis and Disease Prediction using Vision Transformers",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2021/019100d228/1yNhP9mcUFi",
"parentPublication": {
"id": "proceedings/iccvw/2021/0191/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeKfpa1XIQ",
"doi": "10.1109/CVPR46437.2021.01342",
"title": "House-GAN++: Generative Adversarial Layout Refinement Network towards Intelligent Computational Agent for Professional Architects",
"normalizedTitle": "House-GAN++: Generative Adversarial Layout Refinement Network towards Intelligent Computational Agent for Professional Architects",
"abstract": "This paper proposes a generative adversarial layout refinement network for automated floorplan generation. Our architecture is an integration of a graph-constrained relational GAN and a conditional GAN, where a previously generated layout becomes the next input constraint, enabling iterative refinement. A surprising discovery of our research is that a simple non-iterative training process, dubbed component-wise GT-conditioning, is effective in learning such a generator. The iterative generator further allows us to improve a metric of choice via meta-optimization techniques by controlling when to pass which input constraints during iterative refinement. Our qualitative and quantitative evaluation based on the three standard metrics demonstrate that the proposed system makes significant improvements over the current state-of-the-art, even competitive against the ground-truth floorplans, designed by professional architects. Code, model, and data are available at https://ennauata.github.io/houseganpp/page.html.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a generative adversarial layout refinement network for automated floorplan generation. Our architecture is an integration of a graph-constrained relational GAN and a conditional GAN, where a previously generated layout becomes the next input constraint, enabling iterative refinement. A surprising discovery of our research is that a simple non-iterative training process, dubbed component-wise GT-conditioning, is effective in learning such a generator. The iterative generator further allows us to improve a metric of choice via meta-optimization techniques by controlling when to pass which input constraints during iterative refinement. Our qualitative and quantitative evaluation based on the three standard metrics demonstrate that the proposed system makes significant improvements over the current state-of-the-art, even competitive against the ground-truth floorplans, designed by professional architects. Code, model, and data are available at https://ennauata.github.io/houseganpp/page.html.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a generative adversarial layout refinement network for automated floorplan generation. Our architecture is an integration of a graph-constrained relational GAN and a conditional GAN, where a previously generated layout becomes the next input constraint, enabling iterative refinement. A surprising discovery of our research is that a simple non-iterative training process, dubbed component-wise GT-conditioning, is effective in learning such a generator. The iterative generator further allows us to improve a metric of choice via meta-optimization techniques by controlling when to pass which input constraints during iterative refinement. Our qualitative and quantitative evaluation based on the three standard metrics demonstrate that the proposed system makes significant improvements over the current state-of-the-art, even competitive against the ground-truth floorplans, designed by professional architects. Code, model, and data are available at https://ennauata.github.io/houseganpp/page.html.",
"fno": "450900n3627",
"keywords": [
"Architecture",
"Civil Engineering Computing",
"Iterative Methods",
"Learning Artificial Intelligence",
"Neural Net Architecture",
"Optimisation",
"Generative Adversarial Layout Refinement Network",
"Intelligent Computational Agent",
"Professional Architects",
"Automated Floorplan Generation",
"Graph Constrained Relational GAN",
"Conditional GAN",
"Input Constraint",
"Iterative Refinement",
"Noniterative Training Process",
"Dubbed Component Wise GT Conditioning",
"Iterative Generator",
"House GAN",
"Meta Optimization Techniques",
"Quantitative Evaluation",
"Qualitative Evaluation",
"Ground Truth Floorplans",
"Measurement",
"Training",
"Layout",
"Refining",
"Generative Adversarial Networks",
"Generators",
"Pattern Recognition"
],
"authors": [
{
"affiliation": "Simon Fraser University",
"fullName": "Nelson Nauata",
"givenName": "Nelson",
"surname": "Nauata",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University",
"fullName": "Sepidehsadat Hosseini",
"givenName": "Sepidehsadat",
"surname": "Hosseini",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Autodesk Research",
"fullName": "Kai-Hung Chang",
"givenName": "Kai-Hung",
"surname": "Chang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Autodesk Research",
"fullName": "Hang Chu",
"givenName": "Hang",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Autodesk Research",
"fullName": "Chin-Yi Cheng",
"givenName": "Chin-Yi",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Simon Fraser University",
"fullName": "Yasutaka Furukawa",
"givenName": "Yasutaka",
"surname": "Furukawa",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "13627-13636",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeKfl0ibCg",
"name": "pcvpr202145090-09577959s1-mm_450900n3627.zip",
"size": "3.94 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577959s1-mm_450900n3627.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900n3617",
"articleId": "1yeJaoI4BZ6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900n3637",
"articleId": "1yeHYPqFSQo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/2018/6420/0/642000j455",
"title": "ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000j455/17D45XacGkf",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000a821",
"title": "FaceID-GAN: Learning a Symmetry Three-Player GAN for Identity-Preserving Face Synthesis",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000a821/17D45Xh13pk",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2023/03/09792208",
"title": "GAN Inversion: A Survey",
"doi": null,
"abstractUrl": "/journal/tp/2023/03/09792208/1E5LCU0gJAk",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2022/8563/0/09859791",
"title": "GR-GAN: Gradual Refinement Text-To-Image Generation",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2022/09859791/1G9ESovhXWw",
"parentPublication": {
"id": "proceedings/icme/2022/8563/0",
"title": "2022 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a763",
"title": "A GAN-Based Feature Generator for Table Detection",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a763/1h81piYOPiE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2019/3014/0/301400a178",
"title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE",
"parentPublication": {
"id": "proceedings/icdar/2019/3014/0",
"title": "2019 International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2020/6553/0/09093525",
"title": "FX-GAN: Self-Supervised GAN Learning via Feature Exchange",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2020/09093525/1jPbxvOsk6s",
"parentPublication": {
"id": "proceedings/wacv/2020/6553/0",
"title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/10/09106863",
"title": "Attribute-Conditioned Layout GAN for Automatic Graphic Design",
"doi": null,
"abstractUrl": "/journal/tg/2021/10/09106863/1kkFGfMRO36",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2021/4509/0/450900p5735",
"title": "DECOR-GAN: 3D Shape Detailization by Conditional Refinement",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2021/450900p5735/1yeK92zTzgs",
"parentPublication": {
"id": "proceedings/cvpr/2021/4509/0",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscipt/2021/4137/0/413700a097",
"title": "Stock Market Prediction Based on SF-GAN Network",
"doi": null,
"abstractUrl": "/proceedings-article/iscipt/2021/413700a097/1zzpsCAXZN6",
"parentPublication": {
"id": "proceedings/iscipt/2021/4137/0",
"title": "2021 6th International Symposium on Computer and Information Processing Technology (ISCIPT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxwWoqU",
"title": "Image Processing, International Conference on",
"acronym": "icip",
"groupId": "1000349",
"volume": "2",
"displayVolume": "2",
"year": "1998",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBpEeWP",
"doi": "10.1109/ICIP.1998.723625",
"title": "Reconstruction problems in 3D for viral cryo electron microscopy",
"normalizedTitle": "Reconstruction problems in 3D for viral cryo electron microscopy",
"abstract": "Cryo electron microscopy of viruses provides 2D projections of the scattering intensity of the viral particle but the orientation of the projections is not known. We describe an approach to reconstructing the 3D scattering intensity in spite of the unknown projection orientations using nonlinear least squares ideas where the reconstruction is guaranteed to have the icosahedral symmetry known to be present in the viral particle.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Cryo electron microscopy of viruses provides 2D projections of the scattering intensity of the viral particle but the orientation of the projections is not known. We describe an approach to reconstructing the 3D scattering intensity in spite of the unknown projection orientations using nonlinear least squares ideas where the reconstruction is guaranteed to have the icosahedral symmetry known to be present in the viral particle.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Cryo electron microscopy of viruses provides 2D projections of the scattering intensity of the viral particle but the orientation of the projections is not known. We describe an approach to reconstructing the 3D scattering intensity in spite of the unknown projection orientations using nonlinear least squares ideas where the reconstruction is guaranteed to have the icosahedral symmetry known to be present in the viral particle.",
"fno": "882120706",
"keywords": [
"Electron Microscopy",
"Image Reconstruction",
"Least Squares Approximations",
"Biological Techniques",
"Biology Computing",
"Cellular Biophysics",
"Reconstruction",
"3 D",
"Viral Cryo Electron Microscopy",
"Scattering Intensity",
"Orientation",
"3 D Scattering Intensity",
"Nonlinear Least Squares",
"Icosahedral Symmetry",
"Electron Microscopy",
"Particle Scattering",
"Viruses Medical",
"Fourier Transforms",
"Electron Beams",
"Computer Viruses",
"Least Squares Methods",
"Signal Reconstruction",
"Proteins",
"Tires"
],
"authors": [
{
"affiliation": "Sch. of Electr. & Comput. Eng., Purdue Univ., West Lafayette, IN, USA",
"fullName": "Wen Gao",
"givenName": "Wen",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "P.C. Doerschuk",
"givenName": "P.C.",
"surname": "Doerschuk",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "1998-01-01T00:00:00",
"pubType": "proceedings",
"pages": "706-708 vol.2",
"year": "1998",
"issn": null,
"isbn": "0-8186-8821-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "882120700",
"articleId": "12OmNqGRGiL",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "882120709",
"articleId": "12OmNyaGeMv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icip/1995/7310/3/73103041",
"title": "How image processing can push electron microscopy to its limits",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1995/73103041/12OmNB0nWc2",
"parentPublication": {
"id": "proceedings/icip/1995/7310/3",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2017/0733/0/0733b084",
"title": "Multi-Resolution Data Fusion for Super-Resolution Electron Microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733b084/12OmNqNXEsL",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1988/9999/0/00196730",
"title": "3-D electron microscopy and incomplete angular coverage: a restoration scheme based on projections onto convex sets",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1988/00196730/12OmNvTTcat",
"parentPublication": {
"id": "proceedings/icassp/1988/9999/0",
"title": "ICASSP-88., International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pg/1997/8028/0/80280191",
"title": "A Visualization Environment for Electron Microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/pg/1997/80280191/12OmNviHKbZ",
"parentPublication": {
"id": "proceedings/pg/1997/8028/0",
"title": "Computer Graphics and Applications, Pacific Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460090",
"title": "Watershed merge tree classification for electron microscopy image segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460090/12OmNyYm2wU",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1992/2910/0/00201559",
"title": "Determination of the relative orientation of projections of asymmetric objects",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1992/00201559/12OmNzahc03",
"parentPublication": {
"id": "proceedings/icpr/1992/2910/0",
"title": "1992 11th IAPR International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icip/1997/8183/1/81831173",
"title": "Three-dimensional reconstruction of noisy electron microscopy virus particle images",
"doi": null,
"abstractUrl": "/proceedings-article/icip/1997/81831173/12OmNzn391t",
"parentPublication": {
"id": "proceedings/icip/1997/8183/1",
"title": "Image Processing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlhpc/2018/0180/0/08638633",
"title": "Automated Labeling of Electron Microscopy Images Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/mlhpc/2018/08638633/18jXU8u0DVS",
"parentPublication": {
"id": "proceedings/mlhpc/2018/0180/0",
"title": "2018 IEEE/ACM Machine Learning in HPC Environments (MLHPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2021/2812/0/281200c662",
"title": "A Hybrid Frequency-Spatial Domain Model for Sparse Image Reconstruction in Scanning Transmission Electron Microscopy",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2021/281200c662/1BmIoiM5Xag",
"parentPublication": {
"id": "proceedings/iccv/2021/2812/0",
"title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2022/6124/0/612400a267",
"title": "Enabling Autonomous Electron Microscopy for Networked Computation and Steering",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2022/612400a267/1J6horEKDlu",
"parentPublication": {
"id": "proceedings/e-science/2022/6124/0",
"title": "2022 IEEE 18th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNwdbUZX",
"title": "2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"acronym": "bibm",
"groupId": "1001586",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwF0BNI",
"doi": "10.1109/BIBM.2013.6732714",
"title": "A novel seeding method based on spatial sliding volume filter for neuron reconstruction",
"normalizedTitle": "A novel seeding method based on spatial sliding volume filter for neuron reconstruction",
"abstract": "Automatic neuron reconstruction is one of the foremost challenging and important problem in the field of neuroscience. However, none of the prevalent algorithms can automatically reconstruct full anatomy structure. All of these make it is essential of developing new method for the tracing task. This paper introduced a novel seeding method for reconstructing neuron structures from 3-D microscopy images stacks. The protocol was initialized with a set of seeds which were detected by our proposed Sliding Volume Filter. And then the open curve snake was applied to the detected seeds to reconstruct the full structural of neuron cells. Results showed the proposed method exhibited excellent performance with its accuracy compared with traditional method. It is worth noting that the seeding method can clearly benefit for 3-D neuron fiber detection and reconstruction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Automatic neuron reconstruction is one of the foremost challenging and important problem in the field of neuroscience. However, none of the prevalent algorithms can automatically reconstruct full anatomy structure. All of these make it is essential of developing new method for the tracing task. This paper introduced a novel seeding method for reconstructing neuron structures from 3-D microscopy images stacks. The protocol was initialized with a set of seeds which were detected by our proposed Sliding Volume Filter. And then the open curve snake was applied to the detected seeds to reconstruct the full structural of neuron cells. Results showed the proposed method exhibited excellent performance with its accuracy compared with traditional method. It is worth noting that the seeding method can clearly benefit for 3-D neuron fiber detection and reconstruction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Automatic neuron reconstruction is one of the foremost challenging and important problem in the field of neuroscience. However, none of the prevalent algorithms can automatically reconstruct full anatomy structure. All of these make it is essential of developing new method for the tracing task. This paper introduced a novel seeding method for reconstructing neuron structures from 3-D microscopy images stacks. The protocol was initialized with a set of seeds which were detected by our proposed Sliding Volume Filter. And then the open curve snake was applied to the detected seeds to reconstruct the full structural of neuron cells. Results showed the proposed method exhibited excellent performance with its accuracy compared with traditional method. It is worth noting that the seeding method can clearly benefit for 3-D neuron fiber detection and reconstruction.",
"fno": "06732714",
"keywords": [
"Neurons",
"Image Reconstruction",
"Microscopy",
"Three Dimensional Displays",
"Olfactory",
"Estimation",
"Protocols",
"Open Curve Snake",
"Spatial Convergence Index Filter",
"Seeding Method",
"Neuron Reconstruction"
],
"authors": [
{
"affiliation": "Biocomputing Research Center, School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China",
"fullName": "Dong Sui",
"givenName": "Dong",
"surname": "Sui",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Biocomputing Research Center, School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China",
"fullName": "Kuanquan Wang",
"givenName": "Kuanquan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Biocomputing Research Center, School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China",
"fullName": "Yue Zhang",
"givenName": "Yue",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Biocomputing Research Center, School of Computer Science and Technology, Harbin Institute of Technology, Harbin, China",
"fullName": "Henggui Zhang",
"givenName": "Henggui",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bibm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "29-34",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-1309-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06732713",
"articleId": "12OmNyL0TEX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06732715",
"articleId": "12OmNwDSdz1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/bibm/2011/1799/0/06120467",
"title": "3D Neuron Tip Detection in Volumetric Microscopy Images",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120467/12OmNBK5m7q",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2016/1437/0/1437b324",
"title": "Neuron Segmentation Based on CNN with Semi-Supervised Regularization",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2016/1437b324/12OmNrJAedS",
"parentPublication": {
"id": "proceedings/cvprw/2016/1437/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/2016/9036/0/9036a130",
"title": "Automatic Neuron Tracing Using a Locally Tunable Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/2016/9036a130/12OmNwdbV1n",
"parentPublication": {
"id": "proceedings/cbms/2016/9036/0",
"title": "2016 IEEE 29th International Symposium on Computer-Based Medical Systems (CBMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2017/1629/0/08024696",
"title": "Secure self-seeding with power-up SRAM states",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2017/08024696/12OmNyaXPRX",
"parentPublication": {
"id": "proceedings/iscc/2017/1629/0",
"title": "2017 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2017/1034/0/1034a126",
"title": "Automatic 3D Single Neuron Reconstruction with Exhaustive Tracing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034a126/12OmNyvY9sc",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2017/0560/0/08026258",
"title": "A distance transform based tip point detection method for neurons in confocal microscopy images",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2017/08026258/12OmNzmLxU6",
"parentPublication": {
"id": "proceedings/icmew/2017/0560/0",
"title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2018/5488/0/08621212",
"title": "Automatic 3D Neuron Tracing Based on Terminations Detection",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2018/08621212/17D45VVho2h",
"parentPublication": {
"id": "proceedings/bibm/2018/5488/0",
"title": "2018 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600f776",
"title": "PointNeuron: 3D Neuron Reconstruction via Geometry and Topology Learning of Point Clouds",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600f776/1KxUsxFsCfS",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0/298000a289",
"title": "Optimization Algorithms in Reconstructions of Neuron Morphology: An Overview",
"doi": null,
"abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata/2019/298000a289/1ehBI4wpbUc",
"parentPublication": {
"id": "proceedings/ithings-greencom-cpscom-smartdata/2019/2980/0",
"title": "2019 International Conference on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09226101",
"title": "Improving the Usability of Virtual Reality Neuron Tracing with Topological Elements",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09226101/1nWKGhzMhb2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1KaFN97eXLi",
"title": "2022 26th International Conference Information Visualisation (IV)",
"acronym": "iv",
"groupId": "1000370",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KaH0uXY3i8",
"doi": "10.1109/IV56949.2022.00069",
"title": "Interactive Web-based 3D Viewer for Multidimensional Microscope Imaging Modalities",
"normalizedTitle": "Interactive Web-based 3D Viewer for Multidimensional Microscope Imaging Modalities",
"abstract": "Recent advancements in the acquisition of digital imaging modalities with high-throughput technologies, such as confocal laser scanner microscopy (CLSM) and focused-ion beam scanning electron microscopy (FIB-SEM), are providing researchers with unprecedented opportunities to collect massive amounts of multidimensional datasets. This data can be used to visualize the internal structure of tiny particles (mostly cells and organisms) or to develop analytic algorithms. Visualizing newly obtained multidimensional microscope imaging data is beyond the capabilities of traditional 3D visualization packages, as it carries much information in the form of additional dimensions. Typically, these extra dimensions correspond to space, time, and channels, which has driven the development of new visualization applications. In this article, we describe the design and implementation of an interactive web-based multidimensional 3D visualization tool for CLSM and FIB-SEM microscope imaging modalities. The proposed 3D visualization application accepts DICOM files as input and provides a variety of visualization choices ranging from 3D volume/surface rendering to multiplanar reconstruction approaches. The solution performance was tested by uploading and rendering microscopy images of distinct modalities.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Recent advancements in the acquisition of digital imaging modalities with high-throughput technologies, such as confocal laser scanner microscopy (CLSM) and focused-ion beam scanning electron microscopy (FIB-SEM), are providing researchers with unprecedented opportunities to collect massive amounts of multidimensional datasets. This data can be used to visualize the internal structure of tiny particles (mostly cells and organisms) or to develop analytic algorithms. Visualizing newly obtained multidimensional microscope imaging data is beyond the capabilities of traditional 3D visualization packages, as it carries much information in the form of additional dimensions. Typically, these extra dimensions correspond to space, time, and channels, which has driven the development of new visualization applications. In this article, we describe the design and implementation of an interactive web-based multidimensional 3D visualization tool for CLSM and FIB-SEM microscope imaging modalities. The proposed 3D visualization application accepts DICOM files as input and provides a variety of visualization choices ranging from 3D volume/surface rendering to multiplanar reconstruction approaches. The solution performance was tested by uploading and rendering microscopy images of distinct modalities.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Recent advancements in the acquisition of digital imaging modalities with high-throughput technologies, such as confocal laser scanner microscopy (CLSM) and focused-ion beam scanning electron microscopy (FIB-SEM), are providing researchers with unprecedented opportunities to collect massive amounts of multidimensional datasets. This data can be used to visualize the internal structure of tiny particles (mostly cells and organisms) or to develop analytic algorithms. Visualizing newly obtained multidimensional microscope imaging data is beyond the capabilities of traditional 3D visualization packages, as it carries much information in the form of additional dimensions. Typically, these extra dimensions correspond to space, time, and channels, which has driven the development of new visualization applications. In this article, we describe the design and implementation of an interactive web-based multidimensional 3D visualization tool for CLSM and FIB-SEM microscope imaging modalities. The proposed 3D visualization application accepts DICOM files as input and provides a variety of visualization choices ranging from 3D volume/surface rendering to multiplanar reconstruction approaches. The solution performance was tested by uploading and rendering microscopy images of distinct modalities.",
"fno": "900700a379",
"keywords": [
"Biomedical Optical Imaging",
"Data Visualisation",
"Focused Ion Beam Technology",
"Image Reconstruction",
"Medical Image Processing",
"Microscopy",
"Optical Microscopy",
"Rendering Computer Graphics",
"Scanning Electron Microscopy",
"3 D Visualization Application",
"Additional Dimensions",
"Analytic Algorithms",
"CLSM",
"Confocal Laser Scanner Microscopy",
"Digital Imaging Modalities",
"Distinct Modalities",
"Electron Microscopy",
"Extra Dimensions",
"FIB SEM Microscope Imaging Modalities",
"Focused Ion Beam",
"High Throughput Technologies",
"Interactive Web Based 3 D Viewer",
"Interactive Web Based Multidimensional 3 D Visualization Tool",
"Internal Structure",
"Multidimensional Datasets",
"Multidimensional Microscope Imaging",
"Newly Obtained Multidimensional Microscope",
"Organisms",
"Tiny Particles",
"Traditional 3 D Visualization Packages",
"Unprecedented Opportunities",
"Uploading Rendering Microscopy Images",
"Visualization Applications",
"Visualization Choices",
"Scanning Electron Microscopy",
"Three Dimensional Displays",
"Shape",
"Microscopy",
"Data Visualization",
"Rendering Computer Graphics",
"DICOM",
"Multidimensional",
"3 D Visualization",
"CLSM",
"FIB SEM",
"VTK"
],
"authors": [
{
"affiliation": "University of Aveiro,Department of Computer Engineering,Aveiro,Portugal",
"fullName": "Yubraj Gupta",
"givenName": "Yubraj",
"surname": "Gupta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Aveiro,Department of Computer Engineering,Aveiro,Portugal",
"fullName": "Rodrigo E. D. Guerrero",
"givenName": "Rodrigo E. D.",
"surname": "Guerrero",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Aveiro,Department of Computer Engineering,Aveiro,Portugal",
"fullName": "Carlos Costa",
"givenName": "Carlos",
"surname": "Costa",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "BMD Software,Aveiro,Portugal",
"fullName": "Rui Jesus",
"givenName": "Rui",
"surname": "Jesus",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "BMD Software,Aveiro,Portugal",
"fullName": "Eduardo Pinho",
"givenName": "Eduardo",
"surname": "Pinho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "BMD Software,Aveiro,Portugal",
"fullName": "Luís A. Bastião Silva",
"givenName": "Luís A.",
"surname": "Bastião Silva",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iv",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "379-384",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-9007-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "900700a373",
"articleId": "1KaH2WUL1hm",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "900700a385",
"articleId": "1KaH7zjs47e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvprw/2017/0733/0/0733a851",
"title": "Transferring Microscopy Image Modalities with Conditional Generative Adversarial Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2017/0733a851/12OmNwGIcza",
"parentPublication": {
"id": "proceedings/cvprw/2017/0733/0",
"title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biomedvis/1995/7198/0/71980018",
"title": "Crumbs: a virtual environment tracking tool for biological imaging",
"doi": null,
"abstractUrl": "/proceedings-article/biomedvis/1995/71980018/12OmNx3Zjjf",
"parentPublication": {
"id": "proceedings/biomedvis/1995/7198/0",
"title": "Biomedical Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dicta/2011/4588/0/4588a319",
"title": "An Evaluation of Multi-resolution Microscope Slide Scanning Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/dicta/2011/4588a319/12OmNyRxFJK",
"parentPublication": {
"id": "proceedings/dicta/2011/4588/0",
"title": "2011 International Conference on Digital Image Computing: Techniques and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2015/9721/0/9721a023",
"title": "Wrinkle Image Registration for Serial Microscopy Sections",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2015/9721a023/12OmNzTYCaI",
"parentPublication": {
"id": "proceedings/sitis/2015/9721/0",
"title": "2015 11th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/05/v1165",
"title": "Visualization of Fibrous and Thread-like Data",
"doi": null,
"abstractUrl": "/journal/tg/2006/05/v1165/13rRUwjXZS4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/04/07482745",
"title": "Two-Photon Imaging of DiO-Labelled Meissner Corpuscle in Living Mouse's Fingertip",
"doi": null,
"abstractUrl": "/journal/th/2016/04/07482745/13rRUxlgxOt",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlhpc/2018/0180/0/08638633",
"title": "Automated Labeling of Electron Microscopy Images Using Deep Learning",
"doi": null,
"abstractUrl": "/proceedings-article/mlhpc/2018/08638633/18jXU8u0DVS",
"parentPublication": {
"id": "proceedings/mlhpc/2018/0180/0",
"title": "2018 IEEE/ACM Machine Learning in HPC Environments (MLHPC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedme/2020/8145/0/09122233",
"title": "Failure Analysis of Planetary Gear in a Geared Rotary Actuator Used in Aircraft Flight Control System",
"doi": null,
"abstractUrl": "/proceedings-article/icedme/2020/09122233/1kRSDtj3wSQ",
"parentPublication": {
"id": "proceedings/icedme/2020/8145/0",
"title": "2020 3rd International Conference on Electron Device and Mechanical Engineering (ICEDME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09413098",
"title": "EM-net: Deep learning for electron microscopy image segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09413098/1tmhO5klgAg",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2021/2744/0/09631529",
"title": "A DICOM Standard Pipeline for Microscope Imaging Modalities",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2021/09631529/1zmvNdEZd8A",
"parentPublication": {
"id": "proceedings/iscc/2021/2744/0",
"title": "2021 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNqGA5im",
"title": "2014 IEEE Frontiers in Education Conference (FIE)",
"acronym": "fie",
"groupId": "1000297",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxEBz3P",
"doi": "10.1109/FIE.2014.7044207",
"title": "Interactive visualizations for teaching quantum mechanics and semiconductor physics",
"normalizedTitle": "Interactive visualizations for teaching quantum mechanics and semiconductor physics",
"abstract": "Work in Progress: The theory of Quantum Mechanics (QM) provides a foundation for many fields of science and engineering; however, its abstract nature and technical difficulty make QM a challenging subject for students to approach and grasp. This is partly because complex mathematical concepts involved in QM are difficult to visualize for students and the existing visualization are minimal and limited. We propose that many of these concepts can be communicated and experienced through interactive visualizations and games, drawing on the strengths and affordances of digital media. A game environment can make QM concepts more accessible and understandable by immersing students in nano-sized worlds governed by unique QM rules. Furthermore, replayability of games allows students to experience the probabilistic nature of QM concepts. In this paper, we present a game and a series of interactive visualizations that we are developing to provide students with an experiential environment to learn quantum mechanics. We will discuss how these visualizations and games can enable students to experiment with QM concepts, compare QM with classical physics, and get accustomed to the often counterintuitive laws of QM.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Work in Progress: The theory of Quantum Mechanics (QM) provides a foundation for many fields of science and engineering; however, its abstract nature and technical difficulty make QM a challenging subject for students to approach and grasp. This is partly because complex mathematical concepts involved in QM are difficult to visualize for students and the existing visualization are minimal and limited. We propose that many of these concepts can be communicated and experienced through interactive visualizations and games, drawing on the strengths and affordances of digital media. A game environment can make QM concepts more accessible and understandable by immersing students in nano-sized worlds governed by unique QM rules. Furthermore, replayability of games allows students to experience the probabilistic nature of QM concepts. In this paper, we present a game and a series of interactive visualizations that we are developing to provide students with an experiential environment to learn quantum mechanics. We will discuss how these visualizations and games can enable students to experiment with QM concepts, compare QM with classical physics, and get accustomed to the often counterintuitive laws of QM.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Work in Progress: The theory of Quantum Mechanics (QM) provides a foundation for many fields of science and engineering; however, its abstract nature and technical difficulty make QM a challenging subject for students to approach and grasp. This is partly because complex mathematical concepts involved in QM are difficult to visualize for students and the existing visualization are minimal and limited. We propose that many of these concepts can be communicated and experienced through interactive visualizations and games, drawing on the strengths and affordances of digital media. A game environment can make QM concepts more accessible and understandable by immersing students in nano-sized worlds governed by unique QM rules. Furthermore, replayability of games allows students to experience the probabilistic nature of QM concepts. In this paper, we present a game and a series of interactive visualizations that we are developing to provide students with an experiential environment to learn quantum mechanics. We will discuss how these visualizations and games can enable students to experiment with QM concepts, compare QM with classical physics, and get accustomed to the often counterintuitive laws of QM.",
"fno": "07044207",
"keywords": [
"Games",
"Visualization",
"Quantum Mechanics",
"Education",
"Abstracts",
"Hydrogen",
"Games",
"Quantum Mechanics",
"Semiconductor Physics",
"Education",
"Scientific Visualization"
],
"authors": [
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Rose Peng",
"givenName": "Rose",
"surname": "Peng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Bill Dorn",
"givenName": "Bill",
"surname": "Dorn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Azad Naeemi",
"givenName": "Azad",
"surname": "Naeemi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Georgia Institute of Technology, Atlanta, GA",
"fullName": "Nassim Jafarinaimi",
"givenName": "Nassim",
"surname": "Jafarinaimi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "fie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-3922-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "07044206",
"articleId": "12OmNvrMUkV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "07044208",
"articleId": "12OmNvk7JLh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2012/4905/0/4905a900",
"title": "A New Anomaly Detection Algorithm Based on Quantum Mechanics",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2012/4905a900/12OmNB1wkM5",
"parentPublication": {
"id": "proceedings/icdm/2012/4905/0",
"title": "2012 IEEE 12th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icqnm/2009/3524/0/3524a011",
"title": "Does Quantum Mechanics Need Interpretation?",
"doi": null,
"abstractUrl": "/proceedings-article/icqnm/2009/3524a011/12OmNC8uRvM",
"parentPublication": {
"id": "proceedings/icqnm/2009/3524/0",
"title": "Quantum, Nano, and Micro Technologies, First International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sc/2002/1524/0/15240063",
"title": "Collaborative Simulation Grid: Multiscale Quantum-Mechanical/Classical Atomistic Simulations on Distributed PC Clusters in the US and Japan",
"doi": null,
"abstractUrl": "/proceedings-article/sc/2002/15240063/12OmNrHSD2w",
"parentPublication": {
"id": "proceedings/sc/2002/1524/0",
"title": "SC Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pci/2009/3788/0/3788a220",
"title": "Quantum Game Simulator, Using the Circuit Model of Quantum",
"doi": null,
"abstractUrl": "/proceedings-article/pci/2009/3788a220/12OmNrNh0E2",
"parentPublication": {
"id": "proceedings/pci/2009/3788/0",
"title": "2009 13th Panhellenic Conference on Informatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2012/4769/0/06280443",
"title": "Strong Complementarity and Non-locality in Categorical Quantum Mechanics",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2012/06280443/12OmNxFsmoR",
"parentPublication": {
"id": "proceedings/lics/2012/4769/0",
"title": "Logic in Computer Science, Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/qce/2022/9113/0/911300a653",
"title": "Updated and Adapted Curriculum and Pedagogy of Physics with the Fourth Industrial Revolution and Quantum Revolution: From Waves Principles to Quantum Mechanics Fundamentals",
"doi": null,
"abstractUrl": "/proceedings-article/qce/2022/911300a653/1IvLWhzEboQ",
"parentPublication": {
"id": "proceedings/qce/2022/9113/0",
"title": "2022 IEEE International Conference on Quantum Computing and Engineering (QCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2022/1041/0/10051767",
"title": "Probabilistic Neural Synapse Based in Quantum Mechanics",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2022/10051767/1LiNWSqa9Gw",
"parentPublication": {
"id": "proceedings/snpd/2022/1041/0",
"title": "2022 IEEE/ACIS 23rd International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/igsc/2018/7466/0/08752111",
"title": "Information, Quantum Mechanics, and the Universe",
"doi": null,
"abstractUrl": "/proceedings-article/igsc/2018/08752111/1bhIjqVyYxi",
"parentPublication": {
"id": "proceedings/igsc/2018/7466/0",
"title": "2018 Ninth International Green and Sustainable Computing Conference (IGSC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/qce/2020/8969/0/896900a323",
"title": "Quantum Computing for High-School Students An Experience Report",
"doi": null,
"abstractUrl": "/proceedings-article/qce/2020/896900a323/1p2VnYdbx5e",
"parentPublication": {
"id": "proceedings/qce/2020/8969/0",
"title": "2020 IEEE International Conference on Quantum Computing and Engineering (QCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismvl/2021/9224/0/922400a093",
"title": "Binary, Multi-Valued and Quantum Board and Computer Games to Teach Synthesis of Classical and Quantum Logic Circuits",
"doi": null,
"abstractUrl": "/proceedings-article/ismvl/2021/922400a093/1uOubi8nPEY",
"parentPublication": {
"id": "proceedings/ismvl/2021/9224/0",
"title": "2021 IEEE 51st International Symposium on Multiple-Valued Logic (ISMVL)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1KzzoaQp2WQ",
"title": "2022 4th International Workshop on Artificial Intelligence and Education (WAIE)",
"acronym": "waie",
"groupId": "1846204",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KzzolbliEw",
"doi": "10.1109/WAIE57417.2022.00008",
"title": "Design and Implementation of a Teaching Verbal Behavior Analysis Aid in Instructional Videos",
"normalizedTitle": "Design and Implementation of a Teaching Verbal Behavior Analysis Aid in Instructional Videos",
"abstract": "With the rapid development of artificial intelligence technology and its deep integration with education, the use of intelligent means to analyze and study teaching behaviors has become a research hotspot. Teaching behavior can be divided into verbal behavior and non-verbal behavior, among which verbal behavior is the main way of classroom teacher-student interaction, accounting for about 80% of all teaching behaviors. At present, teaching researchers mainly use manual annotation to code and analyze teaching verbal behaviors, which has the problems of low efficiency and individual subjectivity, and it is difficult to collect a large amount of teaching verbal behavior data to analyze teaching patterns. This paper designs and implements a teaching verbal behavior analysis tool using teaching verbal behavior recognition algorithm. It provides the functions of speech transcription of teaching videos, automatic recognition and analysis of teaching verbal behaviors, and analysis of teaching verbal contents, with a view to providing assistance to teaching researchers in analyzing teaching verbal behaviors in instructional videos by using intelligent technology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "With the rapid development of artificial intelligence technology and its deep integration with education, the use of intelligent means to analyze and study teaching behaviors has become a research hotspot. Teaching behavior can be divided into verbal behavior and non-verbal behavior, among which verbal behavior is the main way of classroom teacher-student interaction, accounting for about 80% of all teaching behaviors. At present, teaching researchers mainly use manual annotation to code and analyze teaching verbal behaviors, which has the problems of low efficiency and individual subjectivity, and it is difficult to collect a large amount of teaching verbal behavior data to analyze teaching patterns. This paper designs and implements a teaching verbal behavior analysis tool using teaching verbal behavior recognition algorithm. It provides the functions of speech transcription of teaching videos, automatic recognition and analysis of teaching verbal behaviors, and analysis of teaching verbal contents, with a view to providing assistance to teaching researchers in analyzing teaching verbal behaviors in instructional videos by using intelligent technology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "With the rapid development of artificial intelligence technology and its deep integration with education, the use of intelligent means to analyze and study teaching behaviors has become a research hotspot. Teaching behavior can be divided into verbal behavior and non-verbal behavior, among which verbal behavior is the main way of classroom teacher-student interaction, accounting for about 80% of all teaching behaviors. At present, teaching researchers mainly use manual annotation to code and analyze teaching verbal behaviors, which has the problems of low efficiency and individual subjectivity, and it is difficult to collect a large amount of teaching verbal behavior data to analyze teaching patterns. This paper designs and implements a teaching verbal behavior analysis tool using teaching verbal behavior recognition algorithm. It provides the functions of speech transcription of teaching videos, automatic recognition and analysis of teaching verbal behaviors, and analysis of teaching verbal contents, with a view to providing assistance to teaching researchers in analyzing teaching verbal behaviors in instructional videos by using intelligent technology.",
"fno": "635100a001",
"keywords": [
"Artificial Intelligence",
"Behavioural Sciences Computing",
"Computer Aided Instruction",
"Data Analysis",
"Teaching",
"Artificial Intelligence",
"Classroom Teacher Student Interaction",
"Instructional Videos",
"Nonverbal Behavior",
"Speech Transcription",
"Teaching Researchers",
"Teaching Verbal Behavior Analysis Aid",
"Teaching Verbal Behavior Analysis Tool",
"Teaching Verbal Behavior Data",
"Teaching Verbal Behavior Recognition",
"Teaching Videos",
"Annotations",
"Conferences",
"Education",
"Speech Recognition",
"Manuals",
"Learning Artificial Intelligence",
"Encoding",
"Teaching Verbal Behavior",
"Smart Analytics",
"Text Analytics",
"Deep Learning"
],
"authors": [
{
"affiliation": "Central China Normal University,Faculty of Artificial Intelligence in Education,Wuhan,China",
"fullName": "Gang Zhao",
"givenName": "Gang",
"surname": "Zhao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Central China Normal University,Faculty of Artificial Intelligence in Education,Wuhan,China",
"fullName": "Lijun Yang",
"givenName": "Lijun",
"surname": "Yang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Central China Normal University,Faculty of Artificial Intelligence in Education,Wuhan,China",
"fullName": "Jiaojiao Li",
"givenName": "Jiaojiao",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Central China Normal University,Faculty of Artificial Intelligence in Education,Wuhan,China",
"fullName": "Jie Chu",
"givenName": "Jie",
"surname": "Chu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "NO.18 Primary School of Wuhan Optics Valley,Wuhan,China",
"fullName": "Yuheng Qi",
"givenName": "Yuheng",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "waie",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-6351-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "635100z011",
"articleId": "1KzzpIpPvdC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "635100a006",
"articleId": "1KzzpYYf1yU",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iset/2017/3031/0/08005385",
"title": "Learners’ Appeal: An Analysis of Teachers’ Behavior in Online Live Teaching",
"doi": null,
"abstractUrl": "/proceedings-article/iset/2017/08005385/12OmNAndipl",
"parentPublication": {
"id": "proceedings/iset/2017/3031/0",
"title": "2017 International Symposium on Educational Technology (ISET)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2017/0621/0/0621a633",
"title": "The Role of Visual/Verbal Cognitive Styles and Self-Efficacy in Online Searching Behaviors and Performance in the Text-Based Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2017/0621a633/12OmNCdk2Za",
"parentPublication": {
"id": "proceedings/iiai-aai/2017/0621/0",
"title": "2017 6th IIAI International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi-iat/2008/3496/3/3496c139",
"title": "Verbal Explaining of the Behavior of Time-Series Data",
"doi": null,
"abstractUrl": "/proceedings-article/wi-iat/2008/3496c139/12OmNrJRPa3",
"parentPublication": {
"id": "proceedings/wi-iat/2008/3496/3",
"title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/1998/4762/1/00736826",
"title": "Graphical representations of engineering design behavior",
"doi": null,
"abstractUrl": "/proceedings-article/fie/1998/00736826/12OmNx76TQC",
"parentPublication": {
"id": "proceedings/fie/1998/4762/1",
"title": "FIE '98. 28th Annual Frontiers in Education Conference. Moving from 'Teacher-Centered' to 'Learner-Centered' Education. Conference Proceedings (Cat. No.98CH36214)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2016/4065/0/07588847",
"title": "Causal Association Analysis Algorithm for MOOC Learning Behavior and Learning Effect",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2016/07588847/12OmNyqiaNo",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2016/4065/0",
"title": "2016 IEEE 14th Intl Conf on Dependable, Autonomic and Secure Computing, 14th Intl Conf on Pervasive Intelligence and Computing, 2nd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2016/04/07265021",
"title": "Dynamics of Non-Verbal Vocalizations and Hormones during Father-Infant Interaction",
"doi": null,
"abstractUrl": "/journal/ta/2016/04/07265021/13rRUILc8dN",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eitt/2019/4288/0/428800a147",
"title": "Analysis of Instructional Interaction Behaviors Based on OOTIAS in Smart Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/eitt/2019/428800a147/1fHkPyoKj4I",
"parentPublication": {
"id": "proceedings/eitt/2019/4288/0",
"title": "2019 Eighth International Conference of Educational Innovation through Technology (EITT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aivr/2019/5604/0/560400a041",
"title": "Non-Verbal Behavior Generation for Virtual Characters in Group Conversations",
"doi": null,
"abstractUrl": "/proceedings-article/aivr/2019/560400a041/1grOjknU1Bm",
"parentPublication": {
"id": "proceedings/aivr/2019/5604/0",
"title": "2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a715",
"title": "[DC] Privacy in VR: Empowering Users with Emotional Privacy from Verbal and Non-verbal Behavior of Their Avatars",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a715/1tnXsX6EMBa",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icscde/2021/0142/0/014200a188",
"title": "Interactive Behavior Analysis Based on Social Network",
"doi": null,
"abstractUrl": "/proceedings-article/icscde/2021/014200a188/1xtSDXTFp4s",
"parentPublication": {
"id": "proceedings/icscde/2021/0142/0",
"title": "2021 International Conference of Social Computing and Digital Economy (ICSCDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJiaV",
"title": "Pattern Recognition, International Conference on",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBAqZGn",
"doi": "10.1109/ICPR.2010.683",
"title": "Adaptive Diffusion Flow for Parametric Active Contours",
"normalizedTitle": "Adaptive Diffusion Flow for Parametric Active Contours",
"abstract": "This paper proposes a novel external force for active contours, called adaptive diffusion flow (ADF). We reconsider the generative mechanism of gradient vector flow (GVF) diffusion process from the perspective of image restoration, and exploit a harmonic hyper surface minimal function to substitute smoothness energy term of GVF for alleviating the possible leakage problem. Meanwhile, a ∞- laplacian functional is incorporated in the ADF framework to ensure that the vector flow diffuses mainly along normal direction in homogenous regions of an image. Experiments on synthetic and real images demonstrate the good properties of the ADF snake, including noise robustness, weak edge preserving, and concavity convergence.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper proposes a novel external force for active contours, called adaptive diffusion flow (ADF). We reconsider the generative mechanism of gradient vector flow (GVF) diffusion process from the perspective of image restoration, and exploit a harmonic hyper surface minimal function to substitute smoothness energy term of GVF for alleviating the possible leakage problem. Meanwhile, a ∞- laplacian functional is incorporated in the ADF framework to ensure that the vector flow diffuses mainly along normal direction in homogenous regions of an image. Experiments on synthetic and real images demonstrate the good properties of the ADF snake, including noise robustness, weak edge preserving, and concavity convergence.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper proposes a novel external force for active contours, called adaptive diffusion flow (ADF). We reconsider the generative mechanism of gradient vector flow (GVF) diffusion process from the perspective of image restoration, and exploit a harmonic hyper surface minimal function to substitute smoothness energy term of GVF for alleviating the possible leakage problem. Meanwhile, a ∞- laplacian functional is incorporated in the ADF framework to ensure that the vector flow diffuses mainly along normal direction in homogenous regions of an image. Experiments on synthetic and real images demonstrate the good properties of the ADF snake, including noise robustness, weak edge preserving, and concavity convergence.",
"fno": "4109c788",
"keywords": [
"Adaptive Diffusion Flow",
"Gradient Vector Flow",
"Active Contours",
"Image Segmentation"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuwei Wu",
"givenName": "Yuwei",
"surname": "Wu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yunde Jia",
"givenName": "Yunde",
"surname": "Jia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yuanquan Wang",
"givenName": "Yuanquan",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-08-01T00:00:00",
"pubType": "proceedings",
"pages": "2788-2791",
"year": "2010",
"issn": "1051-4651",
"isbn": "978-0-7695-4109-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4109c784",
"articleId": "12OmNBUAw0i",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4109c792",
"articleId": "12OmNzn38Ni",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/avss/2005/9385/0/01577338",
"title": "A parallel hardware design for parametric active contour models",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2005/01577338/12OmNAKLZZN",
"parentPublication": {
"id": "proceedings/avss/2005/9385/0",
"title": "IEEE Conference on Advanced Video and Signal Based Surveillance, 2005.",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761589",
"title": "Accelerating active contour algorithms with the Gradient Diffusion Field",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761589/12OmNBA9oyI",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2001/1143/1/00937500",
"title": "Gradient vector flow fast geodesic active contours",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2001/00937500/12OmNC943NW",
"parentPublication": {
"id": "proceedings/iccv/2001/1143/1",
"title": "Computer Vision, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/1/3336b017",
"title": "Image Segmentation with GVF Snake and Corner Detection",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336b017/12OmNvjgWXZ",
"parentPublication": {
"id": "proceedings/csse/2008/3336/1",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1996/7258/0/72580680",
"title": "Vector-Valued Active Contours",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1996/72580680/12OmNvjyy1p",
"parentPublication": {
"id": "proceedings/cvpr/1996/7258/0",
"title": "Proceedings CVPR IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cis/2011/4584/0/4584b071",
"title": "Active Contours with Adaptively Normal Biased Gradient Vector Flow External Force",
"doi": null,
"abstractUrl": "/proceedings-article/cis/2011/4584b071/12OmNyYDDIo",
"parentPublication": {
"id": "proceedings/cis/2011/4584/0",
"title": "2011 Seventh International Conference on Computational Intelligence and Security",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iai/2004/8387/0/01300965",
"title": "A DCT based gradient vector flow snake for object boundary detection",
"doi": null,
"abstractUrl": "/proceedings-article/iai/2004/01300965/12OmNzE54Kg",
"parentPublication": {
"id": "proceedings/iai/2004/8387/0",
"title": "2004 Southwest Symposium on Image Analysis and Interpretation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2004/03/i0402",
"title": "Gradient Vector Flow Fast Geometric Active Contours",
"doi": null,
"abstractUrl": "/journal/tp/2004/03/i0402/13rRUwdrdLO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2008/03/ttp2008030412",
"title": "Finsler Active Contours",
"doi": null,
"abstractUrl": "/journal/tp/2008/03/ttp2008030412/13rRUxZ0o2F",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxQOjzU",
"title": "2011 Irish Machine Vision and Image Processing Conference (IMVIP)",
"acronym": "imvip",
"groupId": "1001328",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC4eSyz",
"doi": "10.1109/IMVIP.2011.24",
"title": "Optic Flow Providing External Force for Active Contours in Visually Tracking Dense Cell Population",
"normalizedTitle": "Optic Flow Providing External Force for Active Contours in Visually Tracking Dense Cell Population",
"abstract": "Intense current research requires quantitative analysis of cell behaviours in dense cell populations. The low contrast cellular image quality, diversity of cell shapes, frequent cell interactions, and complex cell motions all pose significant problems to the efficient and robust cell tracking in phase contrast cellular images. We have proposed an automated cell tracking system based on active contours for tracking cell deformation and movement. The pyramidal optic flow scheme is exploited for providing external motion force to guide active contour evolution, and thus helps to address the particular difficulty in tracking relatively fast moving cells in dense cell population. We have evaluated the proposed framework on one real cellular dataset and proved an 80.2% tracking accuracy.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Intense current research requires quantitative analysis of cell behaviours in dense cell populations. The low contrast cellular image quality, diversity of cell shapes, frequent cell interactions, and complex cell motions all pose significant problems to the efficient and robust cell tracking in phase contrast cellular images. We have proposed an automated cell tracking system based on active contours for tracking cell deformation and movement. The pyramidal optic flow scheme is exploited for providing external motion force to guide active contour evolution, and thus helps to address the particular difficulty in tracking relatively fast moving cells in dense cell population. We have evaluated the proposed framework on one real cellular dataset and proved an 80.2% tracking accuracy.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Intense current research requires quantitative analysis of cell behaviours in dense cell populations. The low contrast cellular image quality, diversity of cell shapes, frequent cell interactions, and complex cell motions all pose significant problems to the efficient and robust cell tracking in phase contrast cellular images. We have proposed an automated cell tracking system based on active contours for tracking cell deformation and movement. The pyramidal optic flow scheme is exploited for providing external motion force to guide active contour evolution, and thus helps to address the particular difficulty in tracking relatively fast moving cells in dense cell population. We have evaluated the proposed framework on one real cellular dataset and proved an 80.2% tracking accuracy.",
"fno": "06167845",
"keywords": [
"Optical Imaging",
"Force",
"Tracking",
"Optical Filters",
"Biomedical Optical Imaging",
"Image Segmentation",
"Active Contours",
"Dense Cellular Datasets",
"Active Contour",
"Optic Flow",
"Multi Scale",
"Cell Tracking"
],
"authors": [
{
"affiliation": null,
"fullName": "Sha Yu",
"givenName": "Sha",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Derek Molloy",
"givenName": "Derek",
"surname": "Molloy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "imvip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "84-87",
"year": "2011",
"issn": null,
"isbn": "978-1-4673-0230-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06167844",
"articleId": "12OmNBhpSa2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06167846",
"articleId": "12OmNz5JBM5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/cvpr/1997/7822/0/78221094",
"title": "Stereo Coupled Active Contours",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1997/78221094/12OmNqBbHCa",
"parentPublication": {
"id": "proceedings/cvpr/1997/7822/0",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2016/3251/0/07880227",
"title": "Automatic imaging method for optic disc segmentation using morphological techniques and active contour fitting",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2016/07880227/12OmNvjyxxU",
"parentPublication": {
"id": "proceedings/ic3/2016/3251/0",
"title": "2016 Ninth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isspit/2014/1812/0/07300621",
"title": "Improved cell tracking via automated removal of particulates",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2014/07300621/12OmNxy4N3Y",
"parentPublication": {
"id": "proceedings/isspit/2014/1812/0",
"title": "2014 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2014/7100/0/07073197",
"title": "Fast active contour for object tracking in image sequence",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2014/07073197/12OmNyuya4c",
"parentPublication": {
"id": "proceedings/aiccsa/2014/7100/0",
"title": "2014 IEEE/ACS 11th International Conference on Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995310",
"title": "A Sobolev-type metric for polar active contours",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995310/12OmNzSQdsp",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761748",
"title": "Automated quantification of cell endocytosis using active contours and wavelets",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761748/12OmNzzfTma",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/08/i1470",
"title": "Tracking Deforming Objects Using Particle Filtering for Geometric Active Contours",
"doi": null,
"abstractUrl": "/journal/tp/2007/08/i1470/13rRUzpzeC8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2020/03/08490710",
"title": "Automated Cell Tracking Using Motion Prediction-Based Matching and Event Handling",
"doi": null,
"abstractUrl": "/journal/tb/2020/03/08490710/14jQfOsmjli",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i877",
"title": "Learning Deep Structured Active Contours End-to-End",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i877/17D45XeKgrq",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzQhP7U",
"title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)",
"acronym": "icsip",
"groupId": "1800261",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNwEJ0SD",
"doi": "10.1109/ICSIP.2014.28",
"title": "An Active Contour Method for MR Image Segmentation of Anterior Cruciate Ligament (ACL)",
"normalizedTitle": "An Active Contour Method for MR Image Segmentation of Anterior Cruciate Ligament (ACL)",
"abstract": "Image segmentation is a fundamental task in image analysis which is responsible for partitioning an image into multiple sub-regions based on a desired feature. Active contours have been widely used as attractive image segmentation methods because they always produce sub-regions with continuous boundaries, while the kernel-based edge detection methods, e.g. Sobel edge detectors, often produce discontinuous boundaries. The use of level set theory has provided more flexibility and convenience in the implementation of active contours. However, traditional edge-based active contour models have been applicable to only relatively simple images whose sub-regions are uniform without internal edges. Here in this paper we attempt to brief the taxonomy and current state of the art in Image segmentation and usage of Active Contours. The goal of medical image segmentation is to partition a medical image in to separate regions, usually anatomic structures that are meaningful for a specific task. In many medical applications, such as diagnosis, surgery planning, and radiation treatment planning determining of the volume and position of an anatomic structure is required and plays a critical role in the treatment outcome.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Image segmentation is a fundamental task in image analysis which is responsible for partitioning an image into multiple sub-regions based on a desired feature. Active contours have been widely used as attractive image segmentation methods because they always produce sub-regions with continuous boundaries, while the kernel-based edge detection methods, e.g. Sobel edge detectors, often produce discontinuous boundaries. The use of level set theory has provided more flexibility and convenience in the implementation of active contours. However, traditional edge-based active contour models have been applicable to only relatively simple images whose sub-regions are uniform without internal edges. Here in this paper we attempt to brief the taxonomy and current state of the art in Image segmentation and usage of Active Contours. The goal of medical image segmentation is to partition a medical image in to separate regions, usually anatomic structures that are meaningful for a specific task. In many medical applications, such as diagnosis, surgery planning, and radiation treatment planning determining of the volume and position of an anatomic structure is required and plays a critical role in the treatment outcome.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Image segmentation is a fundamental task in image analysis which is responsible for partitioning an image into multiple sub-regions based on a desired feature. Active contours have been widely used as attractive image segmentation methods because they always produce sub-regions with continuous boundaries, while the kernel-based edge detection methods, e.g. Sobel edge detectors, often produce discontinuous boundaries. The use of level set theory has provided more flexibility and convenience in the implementation of active contours. However, traditional edge-based active contour models have been applicable to only relatively simple images whose sub-regions are uniform without internal edges. Here in this paper we attempt to brief the taxonomy and current state of the art in Image segmentation and usage of Active Contours. The goal of medical image segmentation is to partition a medical image in to separate regions, usually anatomic structures that are meaningful for a specific task. In many medical applications, such as diagnosis, surgery planning, and radiation treatment planning determining of the volume and position of an anatomic structure is required and plays a critical role in the treatment outcome.",
"fno": "5100a142",
"keywords": [
"Image Segmentation",
"Knee",
"Ligaments",
"Joints",
"Active Contours",
"Injuries",
"Biomedical Imaging",
"Level Sets",
"Active Contours",
"Snakes"
],
"authors": [
{
"affiliation": null,
"fullName": "N.A. Vinay",
"givenName": "N.A.",
"surname": "Vinay",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "H.C. Vinay",
"givenName": "H.C.",
"surname": "Vinay",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "T.V. Narendra",
"givenName": "T.V.",
"surname": "Narendra",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icsip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-01-01T00:00:00",
"pubType": "proceedings",
"pages": "142-147",
"year": "2014",
"issn": null,
"isbn": "978-0-7695-5100-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5100a137",
"articleId": "12OmNqOOrKg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5100a151",
"articleId": "12OmNx19jZ2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icdar/2011/4520/0/4520a222",
"title": "Enhanced Active Contour Method for Locating Text",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2011/4520a222/12OmNBtUdKB",
"parentPublication": {
"id": "proceedings/icdar/2011/4520/0",
"title": "2011 International Conference on Document Analysis and Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisim/2008/3184/0/3184a207",
"title": "Verification of Hypothesis about Image Content Using Active Contour Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cisim/2008/3184a207/12OmNC3Xhr3",
"parentPublication": {
"id": "proceedings/cisim/2008/3184/0",
"title": "7th Computer Information Systems and Industrial Management Applications (CISIM 2008)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2011/1799/0/06120477",
"title": "Object Segmentation by Comparison of Active Contour Snake and Level Set in Biomedical Applications",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2011/06120477/12OmNvk7JSs",
"parentPublication": {
"id": "proceedings/bibm/2011/1799/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visapp/2014/8133/1/07294780",
"title": "Active contour segmentation with affine coordinate-based parametrization",
"doi": null,
"abstractUrl": "/proceedings-article/visapp/2014/07294780/12OmNwDACvo",
"parentPublication": {
"id": "proceedings/visapp/2014/8133/1",
"title": "2014 International Conference on Computer Vision Theory and Applications (VISAPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/at-equal/2010/8842/0/05663596",
"title": "Anterior Cruciate Ligament Reconstruction: Soft Tissue vs. Bone-Tendon-Bone",
"doi": null,
"abstractUrl": "/proceedings-article/at-equal/2010/05663596/12OmNx1qV22",
"parentPublication": {
"id": "proceedings/at-equal/2010/8842/0",
"title": "2010 Advanced Technologies for Enhancing Quality of Life (ATEQUAL 2010)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mvhi/2010/4009/0/4009a006",
"title": "Color Image Segmentation Based on a New Geometric Active Contour Model",
"doi": null,
"abstractUrl": "/proceedings-article/mvhi/2010/4009a006/12OmNyyO8OO",
"parentPublication": {
"id": "proceedings/mvhi/2010/4009/0",
"title": "Machine Vision and Human-machine Interface, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1997/01/i0063",
"title": "A Robust Snake Implementation; A Dual Active Contour",
"doi": null,
"abstractUrl": "/journal/tp/1997/01/i0063/13rRUNvgzjo",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2021/3965/0/396500a035",
"title": "RT-ACL: Identification of High-Risk Youth Patients and their Most Significant Risk Factors to Reduce Anterior Cruciate Ligament Reinjury Risk",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2021/396500a035/1AIMKbpCUrm",
"parentPublication": {
"id": "proceedings/chase/2021/3965/0",
"title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/arace/2022/5153/0/515300a013",
"title": "Active Contour Model for Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/arace/2022/515300a013/1Ip7ISO7Zqo",
"parentPublication": {
"id": "proceedings/arace/2022/5153/0",
"title": "2022 Asia Conference on Advanced Robotics, Automation, and Control Engineering (ARACE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/chase/2019/4687/0/468700a027",
"title": "Poster Abstract: Examining Cross-Validation Strategies for Predictive Modeling of Anterior Cruciate Ligament Reinjury",
"doi": null,
"abstractUrl": "/proceedings-article/chase/2019/468700a027/1febVQ1jvwc",
"parentPublication": {
"id": "proceedings/chase/2019/4687/0",
"title": "2019 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNx0A7Jf",
"title": "Digital Image Processing, International Conference on",
"acronym": "icdip",
"groupId": "1002808",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzgNXZU",
"doi": "10.1109/ICDIP.2009.26",
"title": "A New Algorithm for Human Motion Capture via 3D Active Contours",
"normalizedTitle": "A New Algorithm for Human Motion Capture via 3D Active Contours",
"abstract": "Motion capture is one of the most challenging problems in computer vision. In this paper, we propose a new algorithm for markerless human body motion capture. We compute volume data (voxels) representation from the images using the method of SFS (shape from silhouettes). Then we match a predefined human body model with pose parameter to the volume data, and the calculation of this matching is transformed into energy function minimization. In minimizing the energy function, we use a method of 3D active contours to solve this problem. In the process of curving surface evolution, the curving surface will drive the human model close to the visual hull. On the other hand, when the human model is superposed with the human real pose, the curving surface can create a 3D human body reconstruction based on the visual hull and human model. Promising results on real images demonstrate the potentials of the presented method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Motion capture is one of the most challenging problems in computer vision. In this paper, we propose a new algorithm for markerless human body motion capture. We compute volume data (voxels) representation from the images using the method of SFS (shape from silhouettes). Then we match a predefined human body model with pose parameter to the volume data, and the calculation of this matching is transformed into energy function minimization. In minimizing the energy function, we use a method of 3D active contours to solve this problem. In the process of curving surface evolution, the curving surface will drive the human model close to the visual hull. On the other hand, when the human model is superposed with the human real pose, the curving surface can create a 3D human body reconstruction based on the visual hull and human model. Promising results on real images demonstrate the potentials of the presented method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Motion capture is one of the most challenging problems in computer vision. In this paper, we propose a new algorithm for markerless human body motion capture. We compute volume data (voxels) representation from the images using the method of SFS (shape from silhouettes). Then we match a predefined human body model with pose parameter to the volume data, and the calculation of this matching is transformed into energy function minimization. In minimizing the energy function, we use a method of 3D active contours to solve this problem. In the process of curving surface evolution, the curving surface will drive the human model close to the visual hull. On the other hand, when the human model is superposed with the human real pose, the curving surface can create a 3D human body reconstruction based on the visual hull and human model. Promising results on real images demonstrate the potentials of the presented method.",
"fno": "3565a112",
"keywords": [
"Motion Capture",
"Active Contours",
"Level Set"
],
"authors": [
{
"affiliation": null,
"fullName": "Chengkai Wan",
"givenName": "Chengkai",
"surname": "Wan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Baozong Yuan",
"givenName": "Baozong",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhenjiang Miao",
"givenName": "Zhenjiang",
"surname": "Miao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdip",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-03-01T00:00:00",
"pubType": "proceedings",
"pages": "112-116",
"year": "2009",
"issn": null,
"isbn": "978-0-7695-3565-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3565a107",
"articleId": "12OmNAq3hKP",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3565a117",
"articleId": "12OmNBKmXtP",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/isspit/2013/4796/0/06781905",
"title": "Automated parameterization of active contours: A brief survey",
"doi": null,
"abstractUrl": "/proceedings-article/isspit/2013/06781905/12OmNAlNixW",
"parentPublication": {
"id": "proceedings/isspit/2013/4796/0",
"title": "2013 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2006/2597/1/259710841",
"title": "Neighborhood Aided Implicit Active Contours",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2006/259710841/12OmNBEYzRi",
"parentPublication": {
"id": "proceedings/cvpr/2006/2597/2",
"title": "2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1997/7822/0/78221094",
"title": "Stereo Coupled Active Contours",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1997/78221094/12OmNqBbHCa",
"parentPublication": {
"id": "proceedings/cvpr/1997/7822/0",
"title": "Proceedings of IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04284846",
"title": "Model-Based Markerless Human Body Motion Capture using Multiple Cameras",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04284846/12OmNvmXJ37",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460548",
"title": "Active contours segmentation with edge based and local region based",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460548/12OmNxEjY8g",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2010/4109/0/4109c266",
"title": "Active Contours with Thresholding Value for Image Segmentation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2010/4109c266/12OmNxT56Bc",
"parentPublication": {
"id": "proceedings/icpr/2010/4109/0",
"title": "Pattern Recognition, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2007/01/04016546",
"title": "Local or Global Minima: Flexible Dual-Front Active Contours",
"doi": null,
"abstractUrl": "/journal/tp/2007/01/04016546/13rRUwInvBZ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2004/03/i0402",
"title": "Gradient Vector Flow Fast Geometric Active Contours",
"doi": null,
"abstractUrl": "/journal/tp/2004/03/i0402/13rRUwdrdLO",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2015/08/06995952",
"title": "Multi-Region Active Contours with a Single Level Set Function",
"doi": null,
"abstractUrl": "/journal/tp/2015/08/06995952/13rRUwjXZKV",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2000/03/i0266",
"title": "Geodesic Active Contours and Level Sets for the Detection and Tracking of Moving Objects",
"doi": null,
"abstractUrl": "/journal/tp/2000/03/i0266/13rRUxAASUh",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1tuAeQeDJja",
"title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1tuArQOYe9q",
"doi": "10.1109/VR50410.2021.00023",
"title": "Revealable Volume Displays: 3D Exploration of Mixed-Reality Public Exhibitions",
"normalizedTitle": "Revealable Volume Displays: 3D Exploration of Mixed-Reality Public Exhibitions",
"abstract": "In this paper, we present a class of mixed-reality displays which allow for the 3D exploration of content in public exhibitions. The shared experience of the exhibition and the preservation of artworks are two very important aspects of these contexts, in particular for museum exhibits. The use of display cases as a protection tool is substantially accepted. It decreases the risks of damages to artworks and cultural materials hosted in museums. In addition, the transparent panels create a reflection of the visitors inside the display case. This reflection can be used to augment and interact in 3D with the exhibited content, by coupling Spatial Augmented-Reality and Optical Combiners. We call such a combination a Revealable Volume Display (RVD). It allows visitors to reveal information placed freely inside or around protected artefacts, visible by all, using their reflection in the panel. However, it may also suffer from unfamiliar gestures and disrupted depth perception cues, making 3D exploration of content difficult. In this paper, we first discuss the implementation of RVDs, providing both projector-based and mobile versions. We then present a design space that describes the interaction possibilities that it offers. Drawing on insights from a field study during a first exhibition, we finally propose and evaluate techniques for facilitating 3D exploration with RVDs.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a class of mixed-reality displays which allow for the 3D exploration of content in public exhibitions. The shared experience of the exhibition and the preservation of artworks are two very important aspects of these contexts, in particular for museum exhibits. The use of display cases as a protection tool is substantially accepted. It decreases the risks of damages to artworks and cultural materials hosted in museums. In addition, the transparent panels create a reflection of the visitors inside the display case. This reflection can be used to augment and interact in 3D with the exhibited content, by coupling Spatial Augmented-Reality and Optical Combiners. We call such a combination a Revealable Volume Display (RVD). It allows visitors to reveal information placed freely inside or around protected artefacts, visible by all, using their reflection in the panel. However, it may also suffer from unfamiliar gestures and disrupted depth perception cues, making 3D exploration of content difficult. In this paper, we first discuss the implementation of RVDs, providing both projector-based and mobile versions. We then present a design space that describes the interaction possibilities that it offers. Drawing on insights from a field study during a first exhibition, we finally propose and evaluate techniques for facilitating 3D exploration with RVDs.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a class of mixed-reality displays which allow for the 3D exploration of content in public exhibitions. The shared experience of the exhibition and the preservation of artworks are two very important aspects of these contexts, in particular for museum exhibits. The use of display cases as a protection tool is substantially accepted. It decreases the risks of damages to artworks and cultural materials hosted in museums. In addition, the transparent panels create a reflection of the visitors inside the display case. This reflection can be used to augment and interact in 3D with the exhibited content, by coupling Spatial Augmented-Reality and Optical Combiners. We call such a combination a Revealable Volume Display (RVD). It allows visitors to reveal information placed freely inside or around protected artefacts, visible by all, using their reflection in the panel. However, it may also suffer from unfamiliar gestures and disrupted depth perception cues, making 3D exploration of content difficult. In this paper, we first discuss the implementation of RVDs, providing both projector-based and mobile versions. We then present a design space that describes the interaction possibilities that it offers. Drawing on insights from a field study during a first exhibition, we finally propose and evaluate techniques for facilitating 3D exploration with RVDs.",
"fno": "255600a031",
"keywords": [
"Augmented Reality",
"Museums",
"Three Dimensional Displays",
"Virtual Reality",
"Visual Perception",
"Display Case",
"Protection Tool",
"Artworks",
"Transparent Panels",
"Exhibited Content",
"Spatial Augmented Reality",
"Revealable Volume Display",
"Protected Artefacts",
"3 D Exploration",
"Exhibition",
"Revealable Volume Displays",
"Mixed Reality Public",
"Mixed Reality Displays",
"Public Exhibitions",
"Shared Experience",
"Museum Exhibits",
"Couplings",
"Three Dimensional Displays",
"Input Devices",
"User Interfaces",
"Tools",
"Reflection",
"Optical Coupling",
"Human Centered Computing Human Computer Interaction HCI Interaction Devices Graphics Input Devices Human Centered Computing Human Computer Interaction HCI Interaction Devices Displays And Imagers Human Centered Computing Human Computer Interaction HCI Interaction Paradigms Mixed Augmented Reality"
],
"authors": [
{
"affiliation": "CRISTAL, Université de Lille,France",
"fullName": "Fatma Ben Guefrech",
"givenName": "Fatma Ben",
"surname": "Guefrech",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CRISTAL, Université de Lille,France",
"fullName": "Florent Berthaut",
"givenName": "Florent",
"surname": "Berthaut",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CRISTAL, Université de Lille,France",
"fullName": "Patricia Plénacoste",
"givenName": "Patricia",
"surname": "Plénacoste",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CRISTAL, Université de Lille,France",
"fullName": "Yvan Peter",
"givenName": "Yvan",
"surname": "Peter",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CRISTAL, Université de Lille,France",
"fullName": "Laurent Grisoni",
"givenName": "Laurent",
"surname": "Grisoni",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-03-01T00:00:00",
"pubType": "proceedings",
"pages": "31-39",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-1838-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1tuArDgtY0o",
"name": "pvr202118380-09417691s1-mm_255600a031.zip",
"size": "35.4 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417691s1-mm_255600a031.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "255600a021",
"articleId": "1tuAsajTP8c",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "255600a040",
"articleId": "1tuAUKpERUc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/mra/2016/1375/0/07858995",
"title": "Node Kara: An Audiovisual Mixed Reality Installation",
"doi": null,
"abstractUrl": "/proceedings-article/mra/2016/07858995/12OmNwB2dVW",
"parentPublication": {
"id": "proceedings/mra/2016/1375/0",
"title": "2016 IEEE International Workshop on Mixed Reality Art (MRA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444815",
"title": "Egocentric space-distorting visualizations for rapid environment exploration in mobile mixed reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444815/12OmNylsZU8",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761299",
"title": "Specularity removal and relighting of 3D object model for virtual exhibition",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761299/12OmNyuPKTC",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2005/08/r8031",
"title": "Autostereoscopic 3D Displays",
"doi": null,
"abstractUrl": "/magazine/co/2005/08/r8031/13rRUB7a16j",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/05/mco2012050034",
"title": "Reflections on Long-Term Experiments with Public Displays",
"doi": null,
"abstractUrl": "/magazine/co/2012/05/mco2012050034/13rRUwh80Ml",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2002/03/mcg2002030056",
"title": "Evaluating Graphics Displays for Complex 3D Models",
"doi": null,
"abstractUrl": "/magazine/cg/2002/03/mcg2002030056/13rRUx0xPvw",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/02/mcg2013020080",
"title": "Touch-Based Interfaces for Interacting with 3D Content in Public Exhibitions",
"doi": null,
"abstractUrl": "/magazine/cg/2013/02/mcg2013020080/13rRUxZRbrO",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a928",
"title": "[DC] Mixed Reality Interaction for Mobile Knowledge Work",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a928/1CJdRhDCDTO",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvrv/2019/4752/0/09212896",
"title": "Interactive Modeling of Trees Using VR Devices",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2019/09212896/1nHRRssduko",
"parentPublication": {
"id": "proceedings/icvrv/2019/4752/0",
"title": "2019 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icaie/2020/6659/0/665900a258",
"title": "The Construction and Practice of Blended Learning Mode for the Course Market Research Based on WeChat Public Platform",
"doi": null,
"abstractUrl": "/proceedings-article/icaie/2020/665900a258/1oZBEHbTQxq",
"parentPublication": {
"id": "proceedings/icaie/2020/6659/0",
"title": "2020 International Conference on Artificial Intelligence and Education (ICAIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzayN6t",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"acronym": "eurosim",
"groupId": "1803000",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBlXs5Y",
"doi": "10.1109/EUROSIM.2013.83",
"title": "Real-Time Traffic Information System Using Microscopic Traffic Simulation",
"normalizedTitle": "Real-Time Traffic Information System Using Microscopic Traffic Simulation",
"abstract": "This paper describes the online, real-time traffic information system OLSIMv4 which is the updated version of the traffic information platform for the large-scale, real-world highway network of North Rhine-Westphalia. OLSIMv4 gathers its traffic information from microscopic traffic simulations that are based on loop detector data. The simulations take advantage of the topological road traffic network information such as speed limits, lane closings or mergings, and overtaking restrictions. As a result OLSIMv4 is prepared to use dynamic traffic information as provided by variable traffic signs and traffic or road works messages. Additionally, OLSIMv4 exploits thread-level parallelism on multi-core machines using a coarse-grained parallel simulation model. Moreover, it substitutes nonexistent and faulty loop detector data with calculated values in order to provide failure-safety. Its simulation results are available for four varying time horizons and they are in good accordance with empirical findings even in scenarios with larger distances between subsequent loop detectors.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper describes the online, real-time traffic information system OLSIMv4 which is the updated version of the traffic information platform for the large-scale, real-world highway network of North Rhine-Westphalia. OLSIMv4 gathers its traffic information from microscopic traffic simulations that are based on loop detector data. The simulations take advantage of the topological road traffic network information such as speed limits, lane closings or mergings, and overtaking restrictions. As a result OLSIMv4 is prepared to use dynamic traffic information as provided by variable traffic signs and traffic or road works messages. Additionally, OLSIMv4 exploits thread-level parallelism on multi-core machines using a coarse-grained parallel simulation model. Moreover, it substitutes nonexistent and faulty loop detector data with calculated values in order to provide failure-safety. Its simulation results are available for four varying time horizons and they are in good accordance with empirical findings even in scenarios with larger distances between subsequent loop detectors.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper describes the online, real-time traffic information system OLSIMv4 which is the updated version of the traffic information platform for the large-scale, real-world highway network of North Rhine-Westphalia. OLSIMv4 gathers its traffic information from microscopic traffic simulations that are based on loop detector data. The simulations take advantage of the topological road traffic network information such as speed limits, lane closings or mergings, and overtaking restrictions. As a result OLSIMv4 is prepared to use dynamic traffic information as provided by variable traffic signs and traffic or road works messages. Additionally, OLSIMv4 exploits thread-level parallelism on multi-core machines using a coarse-grained parallel simulation model. Moreover, it substitutes nonexistent and faulty loop detector data with calculated values in order to provide failure-safety. Its simulation results are available for four varying time horizons and they are in good accordance with empirical findings even in scenarios with larger distances between subsequent loop detectors.",
"fno": "5073a448",
"keywords": [
"Vehicles",
"Detectors",
"Roads",
"Microscopy",
"Data Models",
"Simulation",
"Parallel Programming Model",
"Traffic Information System",
"Real Time Simulation",
"Microscopic Traffic Simulation"
],
"authors": [
{
"affiliation": null,
"fullName": "Johannes Brugmann",
"givenName": "Johannes",
"surname": "Brugmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Michael Schreckenberg",
"givenName": "Michael",
"surname": "Schreckenberg",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Wolfram Luther",
"givenName": "Wolfram",
"surname": "Luther",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "eurosim",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-09-01T00:00:00",
"pubType": "proceedings",
"pages": "448-453",
"year": "2013",
"issn": null,
"isbn": "978-0-7695-5073-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "5073a443",
"articleId": "12OmNBzAcl6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "5073a454",
"articleId": "12OmNyen1t3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/uic-atc-scalcom-cbdcom-iop-smartworld/2016/2771/0/07816841",
"title": "Using Real Traffic Data for ITS Simulation: Procedure and Validation",
"doi": null,
"abstractUrl": "/proceedings-article/uic-atc-scalcom-cbdcom-iop-smartworld/2016/07816841/12OmNAYoKhy",
"parentPublication": {
"id": "proceedings/uic-atc-scalcom-cbdcom-iop-smartworld/2016/2771/0",
"title": "2016 Intl IEEE Conferences on Ubiquitous Intelligence & Computing, Advanced and Trusted Computing, Scalable Computing and Communications, Cloud and Big Data Computing, Internet of People, and Smart World Congress (UIC/ATC/ScalCom/CBDCom/IoP/SmartWorld)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccps/2014/4931/0/06843714",
"title": "Real-time privacy-preserving model-based estimation of traffic flows",
"doi": null,
"abstractUrl": "/proceedings-article/iccps/2014/06843714/12OmNqFJhG4",
"parentPublication": {
"id": "proceedings/iccps/2014/4931/0",
"title": "2014 ACM/IEEE International Conference on Cyber-Physical Systems (ICCPS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbs-eerc/2015/7967/0/7967a111",
"title": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement",
"doi": null,
"abstractUrl": "/proceedings-article/ecbs-eerc/2015/7967a111/12OmNvSKNQI",
"parentPublication": {
"id": "proceedings/ecbs-eerc/2015/7967/0",
"title": "2015 4th Eastern European Regional Conference on the Engineering of Computer Based Systems (ECBS-EERC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/candar/2013/2796/0/06726951",
"title": "Road Network Determination by Cellular Automata Traffic Flow Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/candar/2013/06726951/12OmNwCsdEd",
"parentPublication": {
"id": "proceedings/candar/2013/2796/0",
"title": "2013 First International Symposium on Computing and Networking - Across Practical Development and Theoretical Research (CANDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiccsa/2016/4320/0/07945719",
"title": "An agile verification framework for traffic sign classification algorithms in heavy vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/aiccsa/2016/07945719/12OmNwErpQE",
"parentPublication": {
"id": "proceedings/aiccsa/2016/4320/0",
"title": "2016 IEEE/ACS 13th International Conference of Computer Systems and Applications (AICCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2011/1732/0/06042438",
"title": "Potential field-based microscopic modeling of road networks with heterogeneous vehicular traffic",
"doi": null,
"abstractUrl": "/proceedings-article/case/2011/06042438/12OmNyLiuuB",
"parentPublication": {
"id": "proceedings/case/2011/1732/0",
"title": "2011 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2019/3363/0/336300a320",
"title": "Building a Large-Scale Microscopic Road Network Traffic Simulator in Apache Spark",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2019/336300a320/1ckrPWX3OQ8",
"parentPublication": {
"id": "proceedings/mdm/2019/3363/0",
"title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2019/6934/0/08909555",
"title": "Implementing an Urban Dynamic Traffic Model",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2019/08909555/1febnlRP2DK",
"parentPublication": {
"id": "proceedings/wi/2019/6934/0",
"title": "2019 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icuems/2020/8832/0/09151610",
"title": "Analysis of the impact of Detector Accuracy on Semi-actuated Traffic Signal Control",
"doi": null,
"abstractUrl": "/proceedings-article/icuems/2020/09151610/1lRlSl9odDG",
"parentPublication": {
"id": "proceedings/icuems/2020/8832/0",
"title": "2020 International Conference on Urban Engineering and Management Science (ICUEMS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09616433",
"title": "A Calibrated Force-Based Model for Mixed Traffic Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09616433/1yA76RmrVtK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxYUW",
"title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"acronym": "icicta",
"groupId": "1002487",
"volume": "0",
"displayVolume": "0",
"year": "2014",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs5rl1t",
"doi": "10.1109/ICICTA.2014.210",
"title": "Traffic Simulation Modeling and Analysis of BRT Based on Vissim",
"normalizedTitle": "Traffic Simulation Modeling and Analysis of BRT Based on Vissim",
"abstract": "A field survey of Guangzhou Bus Rapid Transit system was carried out to obtain data along the road and the junction, and on this basis, simulation modeling for Guangzhou city bus rapid transit (BRT) was carried out by the simulation software Vissim microscopic simulation platform. Combined the simulation data with the relevant definition of HCM, an evaluative analysis of impact of Guangzhou BRT on the intersection was carried out.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A field survey of Guangzhou Bus Rapid Transit system was carried out to obtain data along the road and the junction, and on this basis, simulation modeling for Guangzhou city bus rapid transit (BRT) was carried out by the simulation software Vissim microscopic simulation platform. Combined the simulation data with the relevant definition of HCM, an evaluative analysis of impact of Guangzhou BRT on the intersection was carried out.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A field survey of Guangzhou Bus Rapid Transit system was carried out to obtain data along the road and the junction, and on this basis, simulation modeling for Guangzhou city bus rapid transit (BRT) was carried out by the simulation software Vissim microscopic simulation platform. Combined the simulation data with the relevant definition of HCM, an evaluative analysis of impact of Guangzhou BRT on the intersection was carried out.",
"fno": "6636a879",
"keywords": [
"Roads",
"Vehicles",
"Data Models",
"Delays",
"Analytical Models",
"Microscopy",
"Computational Modeling",
"BRT Vissim Simulation Evaluation The Level Of Service"
],
"authors": [
{
"affiliation": null,
"fullName": "Wu Xiaodan",
"givenName": "Wu",
"surname": "Xiaodan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Huang Junhao",
"givenName": "Huang",
"surname": "Junhao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icicta",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2014-10-01T00:00:00",
"pubType": "proceedings",
"pages": "879-882",
"year": "2014",
"issn": null,
"isbn": "978-1-4799-6636-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "6636a875",
"articleId": "12OmNCcbE3L",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "6636a883",
"articleId": "12OmNvk7JNs",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/eurosim/2013/5073/0/5073a448",
"title": "Real-Time Traffic Information System Using Microscopic Traffic Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/eurosim/2013/5073a448/12OmNBlXs5Y",
"parentPublication": {
"id": "proceedings/eurosim/2013/5073/0",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2014/4443/0/06847475",
"title": "Traffic Simulation Software: Traffic Flow Characteristics in CORSIM",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2014/06847475/12OmNC8uRBz",
"parentPublication": {
"id": "proceedings/icisa/2014/4443/0",
"title": "2014 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbs-eerc/2015/7967/0/7967a111",
"title": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement",
"doi": null,
"abstractUrl": "/proceedings-article/ecbs-eerc/2015/7967a111/12OmNvSKNQI",
"parentPublication": {
"id": "proceedings/ecbs-eerc/2015/7967/0",
"title": "2015 4th Eastern European Regional Conference on the Engineering of Computer Based Systems (ECBS-EERC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2014/4443/0/06847430",
"title": "Modeling Traffic Congestion Using Simulation Software",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2014/06847430/12OmNvSbBrh",
"parentPublication": {
"id": "proceedings/icisa/2014/4443/0",
"title": "2014 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccnea/2017/3981/0/3981a486",
"title": "Evaluation on Traffic Guidance Plan During Construction Period Based on Vissim Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/iccnea/2017/3981a486/12OmNwqft1g",
"parentPublication": {
"id": "proceedings/iccnea/2017/3981/0",
"title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2011/1732/0/06042438",
"title": "Potential field-based microscopic modeling of road networks with heterogeneous vehicular traffic",
"doi": null,
"abstractUrl": "/proceedings-article/case/2011/06042438/12OmNyLiuuB",
"parentPublication": {
"id": "proceedings/case/2011/1732/0",
"title": "2011 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2015/0464/0/0464a951",
"title": "Traffic Influence of Road Traffic Fire Based on VISSIM",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2015/0464a951/12OmNyaGeIA",
"parentPublication": {
"id": "proceedings/icitbs/2015/0464/0",
"title": "2015 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdma/2013/5016/0/5016b627",
"title": "Transit Network Optimization for Feeder Bus of BRT Based on Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2013/5016b627/12OmNznCl2f",
"parentPublication": {
"id": "proceedings/icdma/2013/5016/0",
"title": "2013 Fourth International Conference on Digital Manufacturing & Automation (ICDMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartcomp/2021/1252/0/125200a069",
"title": "TRANSIT-GYM: A Simulation and Evaluation Engine for Analysis of Bus Transit Systems",
"doi": null,
"abstractUrl": "/proceedings-article/smartcomp/2021/125200a069/1xxcC8kDBEQ",
"parentPublication": {
"id": "proceedings/smartcomp/2021/1252/0",
"title": "2021 IEEE International Conference on Smart Computing (SMARTCOMP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09616433",
"title": "A Calibrated Force-Based Model for Mixed Traffic Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09616433/1yA76RmrVtK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNvpw7hy",
"title": "2015 4th Eastern European Regional Conference on the Engineering of Computer Based Systems (ECBS-EERC)",
"acronym": "ecbs-eerc",
"groupId": "1002918",
"volume": "0",
"displayVolume": "0",
"year": "2015",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvSKNQI",
"doi": "10.1109/ECBS-EERC.2015.25",
"title": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement",
"normalizedTitle": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement",
"abstract": "In this paper, we describe an approach to speedup of the microscopic road traffic simulation. This approach is based on an aggregation of the movement of each vehicle from several time steps into a single long movement. This aggregated vehicle movement (AVM) reduces the number of computations necessary for the movement of the vehicles. This, in turn, leads to the savings of the computation time. The AVM is utilizable for the traffic models commonly used in microscopic road traffic simulations. The performance of the AVM was thoroughly tested using five road traffic networks of various sizes and three microscopic traffic models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we describe an approach to speedup of the microscopic road traffic simulation. This approach is based on an aggregation of the movement of each vehicle from several time steps into a single long movement. This aggregated vehicle movement (AVM) reduces the number of computations necessary for the movement of the vehicles. This, in turn, leads to the savings of the computation time. The AVM is utilizable for the traffic models commonly used in microscopic road traffic simulations. The performance of the AVM was thoroughly tested using five road traffic networks of various sizes and three microscopic traffic models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we describe an approach to speedup of the microscopic road traffic simulation. This approach is based on an aggregation of the movement of each vehicle from several time steps into a single long movement. This aggregated vehicle movement (AVM) reduces the number of computations necessary for the movement of the vehicles. This, in turn, leads to the savings of the computation time. The AVM is utilizable for the traffic models commonly used in microscopic road traffic simulations. The performance of the AVM was thoroughly tested using five road traffic networks of various sizes and three microscopic traffic models.",
"fno": "7967a111",
"keywords": [
"Vehicles",
"Computational Modeling",
"Roads",
"Microscopy",
"Acceleration",
"Traffic Control",
"Mathematical Model",
"Computation Time Savings",
"Road Traffic Simulation",
"Traffic Model",
"Aggregated Vehicle Movement"
],
"authors": [
{
"affiliation": null,
"fullName": "Tomas Potuzak",
"givenName": "Tomas",
"surname": "Potuzak",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ecbs-eerc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2015-08-01T00:00:00",
"pubType": "proceedings",
"pages": "111-118",
"year": "2015",
"issn": null,
"isbn": "978-1-4673-7967-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "7967a104",
"articleId": "12OmNwp74CX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "7967a119",
"articleId": "12OmNyQph00",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/eurosim/2013/5073/0/5073a448",
"title": "Real-Time Traffic Information System Using Microscopic Traffic Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/eurosim/2013/5073a448/12OmNBlXs5Y",
"parentPublication": {
"id": "proceedings/eurosim/2013/5073/0",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uksim/2011/4376/0/4376a409",
"title": "Comparison of Road Traffic Network Division Based on Microscopic and Macroscopic Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/uksim/2011/4376a409/12OmNqESueh",
"parentPublication": {
"id": "proceedings/uksim/2011/4376/0",
"title": "Computer Modeling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dsdis/2015/0214/0/0214a355",
"title": "Impact of Driving Behaviour on Emissions and Road Network Performance",
"doi": null,
"abstractUrl": "/proceedings-article/dsdis/2015/0214a355/12OmNwCsdB5",
"parentPublication": {
"id": "proceedings/dsdis/2015/0214/0",
"title": "2015 IEEE International Conference on Data Science and Data Intensive Systems (DSDIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2011/1732/0/06042438",
"title": "Potential field-based microscopic modeling of road networks with heterogeneous vehicular traffic",
"doi": null,
"abstractUrl": "/proceedings-article/case/2011/06042438/12OmNyLiuuB",
"parentPublication": {
"id": "proceedings/case/2011/1732/0",
"title": "2011 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isdea/2014/4261/0/4261a889",
"title": "Researching on Microscopic Traffic Flow Characteristics of Freeway under Rainy Environment",
"doi": null,
"abstractUrl": "/proceedings-article/isdea/2014/4261a889/12OmNynJMML",
"parentPublication": {
"id": "proceedings/isdea/2014/4261/0",
"title": "2014 Fifth International Conference on Intelligent Systems Design and Engineering Applications (ISDEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2012/4637/0/4637a102",
"title": "Application of Improved Quantitative Theory for Microscopic Prediction of Traffic Accidents",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2012/4637a102/12OmNyrIas1",
"parentPublication": {
"id": "proceedings/icicta/2012/4637/0",
"title": "Intelligent Computation Technology and Automation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a679",
"title": "A high performance approach with MATSim for traffic road simulation",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a679/1GU762UY6as",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2019/3363/0/336300a320",
"title": "Building a Large-Scale Microscopic Road Network Traffic Simulator in Apache Spark",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2019/336300a320/1ckrPWX3OQ8",
"parentPublication": {
"id": "proceedings/mdm/2019/3363/0",
"title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b104",
"title": "RONIN: a SUMO Interoperable Mesoscopic Urban Traffic Simulator",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b104/1t7mQqrHmiQ",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b080",
"title": "Analysis of Urban Traffic Incidents Through Road Network Features",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b080/1t7n2Rdahsk",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyNQSGF",
"title": "Knowledge and Systems Engineering, International Conference on",
"acronym": "kse",
"groupId": "1003010",
"volume": "0",
"displayVolume": "0",
"year": "2010",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNyXMQo0",
"doi": "10.1109/KSE.2010.21",
"title": "Simulation of Mixed Traffic Flow within Intersection",
"normalizedTitle": "Simulation of Mixed Traffic Flow within Intersection",
"abstract": "Although many studies have targeted homogeneous and heterogeneous traffic flow, they are normally little or no attention to driver reaction passing intersection. Understanding complex movement within an intersection is the critical task which can explain the reason of traffic jam especially in big cities (for example, Ho Chi Minh City, Hanoi). This paper describes a microscopic model based on Cellular Automata (CA) for mixed traffic flow and is concerned with how the driver interacting with other vehicles in the intersection. A simulation environment is also built with a set of basic rules integrated to handle the way vehicle passing an intersection. Given an O/D matrix, the simulation results present a strong relation between traffic and control parameters even with the given set of rules. Based on this research, further studies can be performed easily in order to understand the heterogeneous 2-wheel dominated traffic.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Although many studies have targeted homogeneous and heterogeneous traffic flow, they are normally little or no attention to driver reaction passing intersection. Understanding complex movement within an intersection is the critical task which can explain the reason of traffic jam especially in big cities (for example, Ho Chi Minh City, Hanoi). This paper describes a microscopic model based on Cellular Automata (CA) for mixed traffic flow and is concerned with how the driver interacting with other vehicles in the intersection. A simulation environment is also built with a set of basic rules integrated to handle the way vehicle passing an intersection. Given an O/D matrix, the simulation results present a strong relation between traffic and control parameters even with the given set of rules. Based on this research, further studies can be performed easily in order to understand the heterogeneous 2-wheel dominated traffic.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Although many studies have targeted homogeneous and heterogeneous traffic flow, they are normally little or no attention to driver reaction passing intersection. Understanding complex movement within an intersection is the critical task which can explain the reason of traffic jam especially in big cities (for example, Ho Chi Minh City, Hanoi). This paper describes a microscopic model based on Cellular Automata (CA) for mixed traffic flow and is concerned with how the driver interacting with other vehicles in the intersection. A simulation environment is also built with a set of basic rules integrated to handle the way vehicle passing an intersection. Given an O/D matrix, the simulation results present a strong relation between traffic and control parameters even with the given set of rules. Based on this research, further studies can be performed easily in order to understand the heterogeneous 2-wheel dominated traffic.",
"fno": "4213a131",
"keywords": [
"Cellular Automata",
"Driver Information Systems",
"Driver Reaction Passing Intersection",
"Traffic Jam",
"Microscopic Model",
"Cellular Automata",
"Mixed Traffic Flow",
"O D Matrix",
"Driver Circuits",
"Roads",
"Biological System Modeling",
"Motorcycles",
"Cities And Towns",
"Layout",
"CA Model",
"Mixed Traffic Flow",
"Driver Behaviors In Intersection"
],
"authors": [
{
"affiliation": "Fac. of Comput. Sci. & Eng., Ho Chi Minh City Univ. of Technol., Ho Chi Minh City, Vietnam",
"fullName": "Vo Hong Thanh",
"givenName": "Vo Hong",
"surname": "Thanh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tran Van Hoai",
"givenName": "Tran Van",
"surname": "Hoai",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "kse",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2010-10-01T00:00:00",
"pubType": "proceedings",
"pages": "131-137",
"year": "2010",
"issn": null,
"isbn": "978-1-4244-8334-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4213a125",
"articleId": "12OmNC3FGoT",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4213a141",
"articleId": "12OmNAhxjEf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/iscsct/2008/3498/2/3498b268",
"title": "Research and Implementation of Signal Control Simulation for a Single Intersection Based on 3DS MAX",
"doi": null,
"abstractUrl": "/proceedings-article/iscsct/2008/3498b268/12OmNAfPIQo",
"parentPublication": {
"id": "proceedings/iscsct/2008/3498/1",
"title": "2008 International Symposium on Computer Science and Computational Technology (ISCSCT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icoin/2018/2290/0/08343159",
"title": "Reservation-based cooperative traffic management at an intersection of multi-lane roads",
"doi": null,
"abstractUrl": "/proceedings-article/icoin/2018/08343159/12OmNAndik0",
"parentPublication": {
"id": "proceedings/icoin/2018/2290/0",
"title": "2018 International Conference on Information Networking (ICOIN)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cimsim/2010/4262/0/4262a172",
"title": "Optimization of Traffic Flow within an Urban Traffic Light Intersection with Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/cimsim/2010/4262a172/12OmNB1NVPW",
"parentPublication": {
"id": "proceedings/cimsim/2010/4262/0",
"title": "Computational Intelligence, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/etcs/2009/3557/2/3557c789",
"title": "A Study on the Traffic Intersection Vehicle Emission Base on Urban Microscopic Traffic Simulation Model",
"doi": null,
"abstractUrl": "/proceedings-article/etcs/2009/3557c789/12OmNyXMQll",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2009/3804/3/3804c523",
"title": "Chinese Driver Behavior Characteristics Research at Intersection based on Intelligent Vehicle-Infrastructure Integration Experimental Platform",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2009/3804c523/12OmNyjLoPN",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ithings/2014/5967/0/5967a009",
"title": "Timing Optimization and Control for Smart Traffic",
"doi": null,
"abstractUrl": "/proceedings-article/ithings/2014/5967a009/12OmNyuPLoQ",
"parentPublication": {
"id": "proceedings/ithings/2014/5967/0",
"title": "2014 IEEE International Conference on Internet of Things(iThings), and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2017/1230/0/1230a378",
"title": "Study on Traffic Congestion Pre-Control of Link between Non-Signalized Intersections",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2017/1230a378/12OmNyz5JTX",
"parentPublication": {
"id": "proceedings/icicta/2017/1230/0",
"title": "2017 10th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rtcsa/2019/3197/0/08864572",
"title": "V2V-based Synchronous Intersection Protocols for Mixed Traffic of Human-Driven and Self-Driving Vehicles",
"doi": null,
"abstractUrl": "/proceedings-article/rtcsa/2019/08864572/1e5ZcBmquWc",
"parentPublication": {
"id": "proceedings/rtcsa/2019/3197/0",
"title": "2019 IEEE 25th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/rtss/2019/6463/0/09052182",
"title": "Work-in-Progress: Synchronous Intersection Management Protocol for Mixed Traffic Flows",
"doi": null,
"abstractUrl": "/proceedings-article/rtss/2019/09052182/1iHT7L37rpK",
"parentPublication": {
"id": "proceedings/rtss/2019/6463/0",
"title": "2019 IEEE Real-Time Systems Symposium (RTSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlbdbi/2020/9638/0/963800a464",
"title": "Timing scheme for intelligent intersection with uncertain traffic flow",
"doi": null,
"abstractUrl": "/proceedings-article/mlbdbi/2020/963800a464/1rxhxuOfhW8",
"parentPublication": {
"id": "proceedings/mlbdbi/2020/9638/0",
"title": "2020 2nd International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKisf",
"title": "2018 IEEE/ACM 22nd International Symposium on Distributed Simulation and Real Time Applications (DS-RT)",
"acronym": "ds-rt",
"groupId": "1000218",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45XdBRR1",
"doi": "10.1109/DISTRA.2018.8601016",
"title": "Exploring Execution Schemes for Agent-Based Traffic Simulation on Heterogeneous Hardware",
"normalizedTitle": "Exploring Execution Schemes for Agent-Based Traffic Simulation on Heterogeneous Hardware",
"abstract": "Microscopic traffic simulation is associated with substantial runtimes, limiting the feasibility of large-scale evaluation of traffic scenarios. Even though today heterogeneous hardware comprised of CPUs, graphics processing units (GPUs) and fused CPU-GPU devices is inexpensive and widely available, common traffic simulators still rely purely on CPU-based execution, leaving substantial acceleration potentials untapped. A number of existing works have considered the execution of traffic simulations on accelerators, but have relied on simplified models of road networks and driver behaviour tailored to the given hardware platform. Thus, the existing approaches cannot directly benefit from the vast body of research on the validity of common traffic simulation models. In this paper, we explore the performance gains achievable through the use of heterogeneous hardware when relying on typical traffic simulation models used in CPU-based simulators. We propose a partial offloading approach that relies either on a dedicated GPU or a fused CPU-GPU device. Further, we present a traffic simulation running fully on a manycore GPU and discuss the challenges of this approach. Our results show that a CPU-based parallelisation closely approaches the results of partial offloading, while full offloading substantially outperforms the other approaches. We achieve a speedup of up to 28.7x over the sequential execution on a CPU.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Microscopic traffic simulation is associated with substantial runtimes, limiting the feasibility of large-scale evaluation of traffic scenarios. Even though today heterogeneous hardware comprised of CPUs, graphics processing units (GPUs) and fused CPU-GPU devices is inexpensive and widely available, common traffic simulators still rely purely on CPU-based execution, leaving substantial acceleration potentials untapped. A number of existing works have considered the execution of traffic simulations on accelerators, but have relied on simplified models of road networks and driver behaviour tailored to the given hardware platform. Thus, the existing approaches cannot directly benefit from the vast body of research on the validity of common traffic simulation models. In this paper, we explore the performance gains achievable through the use of heterogeneous hardware when relying on typical traffic simulation models used in CPU-based simulators. We propose a partial offloading approach that relies either on a dedicated GPU or a fused CPU-GPU device. Further, we present a traffic simulation running fully on a manycore GPU and discuss the challenges of this approach. Our results show that a CPU-based parallelisation closely approaches the results of partial offloading, while full offloading substantially outperforms the other approaches. We achieve a speedup of up to 28.7x over the sequential execution on a CPU.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Microscopic traffic simulation is associated with substantial runtimes, limiting the feasibility of large-scale evaluation of traffic scenarios. Even though today heterogeneous hardware comprised of CPUs, graphics processing units (GPUs) and fused CPU-GPU devices is inexpensive and widely available, common traffic simulators still rely purely on CPU-based execution, leaving substantial acceleration potentials untapped. A number of existing works have considered the execution of traffic simulations on accelerators, but have relied on simplified models of road networks and driver behaviour tailored to the given hardware platform. Thus, the existing approaches cannot directly benefit from the vast body of research on the validity of common traffic simulation models. In this paper, we explore the performance gains achievable through the use of heterogeneous hardware when relying on typical traffic simulation models used in CPU-based simulators. We propose a partial offloading approach that relies either on a dedicated GPU or a fused CPU-GPU device. Further, we present a traffic simulation running fully on a manycore GPU and discuss the challenges of this approach. Our results show that a CPU-based parallelisation closely approaches the results of partial offloading, while full offloading substantially outperforms the other approaches. We achieve a speedup of up to 28.7x over the sequential execution on a CPU.",
"fno": "08601016",
"keywords": [
"Graphics Processing Units",
"Computational Modeling",
"Roads",
"Hardware",
"Acceleration",
"Central Processing Unit",
"Vehicles"
],
"authors": [
{
"affiliation": "TUMCREATE and Technische Universitä t München",
"fullName": "Jiajian Xiao",
"givenName": "Jiajian",
"surname": "Xiao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUMCREATE and Nanyang Technological University",
"fullName": "Philipp Andelfinger",
"givenName": "Philipp",
"surname": "Andelfinger",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "TUMCREATE and Technische Universitä t München",
"fullName": "David Eckhoff",
"givenName": "David",
"surname": "Eckhoff",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nanyang Technological University Singapore",
"fullName": "Wentong Cai",
"givenName": "Wentong",
"surname": "Cai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität München and Nanyang Technological University",
"fullName": "Alois Knoll",
"givenName": "Alois",
"surname": "Knoll",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ds-rt",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-10-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2018",
"issn": "1550-6525",
"isbn": "978-1-5386-5048-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08601004",
"articleId": "17D45WHONnM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08601011",
"articleId": "17D45WZZ7CC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/pact/2012/1182/0/07842955",
"title": "Acceleration of bulk memory operations in a heterogeneous multicore architecture",
"doi": null,
"abstractUrl": "/proceedings-article/pact/2012/07842955/12OmNCfjetc",
"parentPublication": {
"id": "proceedings/pact/2012/1182/0",
"title": "2012 21st International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic2e/2016/1961/0/1961a222",
"title": "Exploring GPU Acceleration of Apache Spark",
"doi": null,
"abstractUrl": "/proceedings-article/ic2e/2016/1961a222/12OmNCzKlLZ",
"parentPublication": {
"id": "proceedings/ic2e/2016/1961/0",
"title": "2016 IEEE International Conference on Cloud Engineering (IC2E)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpp/2017/1042/0/1042a151",
"title": "MPI-GDS: High Performance MPI Designs with GPUDirect-aSync for CPU-GPU Control Flow Decoupling",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2017/1042a151/12OmNwe2IAB",
"parentPublication": {
"id": "proceedings/icpp/2017/1042/0",
"title": "2017 46th International Conference on Parallel Processing (ICPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpca/2013/5585/0/06522332",
"title": "Reducing GPU offload latency via fine-grained CPU-GPU synchronization",
"doi": null,
"abstractUrl": "/proceedings-article/hpca/2013/06522332/12OmNyQGRYT",
"parentPublication": {
"id": "proceedings/hpca/2013/5585/0",
"title": "2013 IEEE 19th International Symposium on High Performance Computer Architecture (HPCA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dcc/2011/279/0/05749536",
"title": "Parallel Processing of DCT on GPU",
"doi": null,
"abstractUrl": "/proceedings-article/dcc/2011/05749536/12OmNyVes1L",
"parentPublication": {
"id": "proceedings/dcc/2011/279/0",
"title": "2011 Data Compression Conference (DCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tc/2016/10/07366552",
"title": "Hardware Support for Concurrent Detection of Multiple Concurrency Bugs on Fused CPU-GPU Architectures",
"doi": null,
"abstractUrl": "/journal/tc/2016/10/07366552/13rRUyYSWkh",
"parentPublication": {
"id": "trans/tc",
"title": "IEEE Transactions on Computers",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase-icess/2017/4906/0/08029512",
"title": "A Sample-Based Dynamic CPU and GPU LLC Bypassing Method for Heterogeneous CPU-GPU Architectures",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-icess/2017/08029512/17D45VtKixh",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-icess/2017/4906/0",
"title": "2017 IEEE Trustcom/BigDataSE/ICESS",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pact/2019/3613/0/361300a337",
"title": "EDGE: Event-Driven GPU Execution",
"doi": null,
"abstractUrl": "/proceedings-article/pact/2019/361300a337/1eLy02geUZa",
"parentPublication": {
"id": "proceedings/pact/2019/3613/0",
"title": "2019 28th International Conference on Parallel Architectures and Compilation Techniques (PACT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnp/2019/2700/0/08888129",
"title": "FlowShader: a Generalized Framework for GPU-accelerated VNF Flow Processing",
"doi": null,
"abstractUrl": "/proceedings-article/icnp/2019/08888129/1ezRJxUria4",
"parentPublication": {
"id": "proceedings/icnp/2019/2700/0",
"title": "2019 IEEE 27th International Conference on Network Protocols (ICNP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdp/2020/6582/0/09092314",
"title": "CoopCL: Cooperative Execution of OpenCL Programs on Heterogeneous CPU-GPU Platforms",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2020/09092314/1jPaZus7afK",
"parentPublication": {
"id": "proceedings/pdp/2020/6582/0",
"title": "2020 28th Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1GU6OXhOl4k",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"acronym": "iiai-aai",
"groupId": "1801921",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1GU762UY6as",
"doi": "10.1109/IIAIAAI55812.2022.00140",
"title": "A high performance approach with MATSim for traffic road simulation",
"normalizedTitle": "A high performance approach with MATSim for traffic road simulation",
"abstract": "Road traffic simulation becomes more and more accurate over time. From macroscopic simulations based on fluid equations, for example, to microscopic simulations based on the multi-agent paradigm, innovations have continued to emerge in recent years. Multi-agent models such as MATSim [1], POLARIS [2] or SimMobility [3] have been highlighted lately in response to complex and microscopic simulation requirements. However, this microscopic and large-scale approach requires much higher computing power than that delivered by a home computer. High-performance computing approach can be a relevant response to this kind of problem. MATsim due to its module structure was selected as the basis of our study. We introduce a new concept for the design of parallel algorithm of MATSim. The parallel architectures used as support for the experiments presented are provided by GRID’5000 (or G5K) [4].",
"abstracts": [
{
"abstractType": "Regular",
"content": "Road traffic simulation becomes more and more accurate over time. From macroscopic simulations based on fluid equations, for example, to microscopic simulations based on the multi-agent paradigm, innovations have continued to emerge in recent years. Multi-agent models such as MATSim [1], POLARIS [2] or SimMobility [3] have been highlighted lately in response to complex and microscopic simulation requirements. However, this microscopic and large-scale approach requires much higher computing power than that delivered by a home computer. High-performance computing approach can be a relevant response to this kind of problem. MATsim due to its module structure was selected as the basis of our study. We introduce a new concept for the design of parallel algorithm of MATSim. The parallel architectures used as support for the experiments presented are provided by GRID’5000 (or G5K) [4].",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Road traffic simulation becomes more and more accurate over time. From macroscopic simulations based on fluid equations, for example, to microscopic simulations based on the multi-agent paradigm, innovations have continued to emerge in recent years. Multi-agent models such as MATSim [1], POLARIS [2] or SimMobility [3] have been highlighted lately in response to complex and microscopic simulation requirements. However, this microscopic and large-scale approach requires much higher computing power than that delivered by a home computer. High-performance computing approach can be a relevant response to this kind of problem. MATsim due to its module structure was selected as the basis of our study. We introduce a new concept for the design of parallel algorithm of MATSim. The parallel architectures used as support for the experiments presented are provided by GRID’5000 (or G5K) [4].",
"fno": "975500a679",
"keywords": [
"Multi Agent Systems",
"Parallel Processing",
"Road Traffic",
"Traffic Engineering Computing",
"Road Traffic Simulation",
"Macroscopic Simulations",
"Fluid Equations",
"Microscopic Simulations",
"Multiagent Models",
"MAT Sim",
"Complex Simulation Requirements",
"Microscopic Simulation Requirements",
"Home Computer",
"High Performance Computing Approach",
"POLARIS",
"Sim Mobility",
"Technological Innovation",
"Roads",
"Microscopy",
"Computational Modeling",
"High Performance Computing",
"Traffic Control",
"Mathematical Models",
"Road Traffic Simulation",
"Big Data Analysis",
"High Performance Computing",
"Complex And Heterogeneous Dynamic System"
],
"authors": [
{
"affiliation": null,
"fullName": "Sara Moukir",
"givenName": "Sara",
"surname": "Moukir",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Nahid Emad",
"givenName": "Nahid",
"surname": "Emad",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Stéphane Baudelocq",
"givenName": "Stéphane",
"surname": "Baudelocq",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iiai-aai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "679-681",
"year": "2022",
"issn": "2472-0070",
"isbn": "978-1-6654-9755-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "975500a677",
"articleId": "1GU79GCCTD2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "975500a682",
"articleId": "1GU70nQznVe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2009/3583/3/3583c713",
"title": "An Extensible Multi-agent Based Traffic Simulation System",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583c713/12OmNBRKwBn",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eurosim/2013/5073/0/5073a448",
"title": "Real-Time Traffic Information System Using Microscopic Traffic Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/eurosim/2013/5073a448/12OmNBlXs5Y",
"parentPublication": {
"id": "proceedings/eurosim/2013/5073/0",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2014/4443/0/06847475",
"title": "Traffic Simulation Software: Traffic Flow Characteristics in CORSIM",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2014/06847475/12OmNC8uRBz",
"parentPublication": {
"id": "proceedings/icisa/2014/4443/0",
"title": "2014 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cso/2010/6812/1/05532914",
"title": "Dynamic Relocating Vehicle Resources Using a Microscopic Traffic Simulation Model for Carsharing Services",
"doi": null,
"abstractUrl": "/proceedings-article/cso/2010/05532914/12OmNqGRGp5",
"parentPublication": {
"id": "proceedings/cso/2010/6812/1",
"title": "2010 Third International Joint Conference on Computational Science and Optimization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbs-eerc/2015/7967/0/7967a111",
"title": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement",
"doi": null,
"abstractUrl": "/proceedings-article/ecbs-eerc/2015/7967a111/12OmNvSKNQI",
"parentPublication": {
"id": "proceedings/ecbs-eerc/2015/7967/0",
"title": "2015 4th Eastern European Regional Conference on the Engineering of Computer Based Systems (ECBS-EERC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fskd/2009/3735/4/3735d370",
"title": "Simulation of Signal Control Optimization for Intersections in Large-scale Transport Network",
"doi": null,
"abstractUrl": "/proceedings-article/fskd/2009/3735d370/12OmNvlxJs4",
"parentPublication": {
"id": "proceedings/fskd/2009/3735/4",
"title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mdm/2019/3363/0/336300a320",
"title": "Building a Large-Scale Microscopic Road Network Traffic Simulator in Apache Spark",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2019/336300a320/1ckrPWX3OQ8",
"parentPublication": {
"id": "proceedings/mdm/2019/3363/0",
"title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b104",
"title": "RONIN: a SUMO Interoperable Mesoscopic Urban Traffic Simulator",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b104/1t7mQqrHmiQ",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b080",
"title": "Analysis of Urban Traffic Incidents Through Road Network Features",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b080/1t7n2Rdahsk",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09616433",
"title": "A Calibrated Force-Based Model for Mixed Traffic Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09616433/1yA76RmrVtK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1ckrJNTI5Ko",
"title": "2019 20th IEEE International Conference on Mobile Data Management (MDM)",
"acronym": "mdm",
"groupId": "1000468",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1ckrPWX3OQ8",
"doi": "10.1109/MDM.2019.00-42",
"title": "Building a Large-Scale Microscopic Road Network Traffic Simulator in Apache Spark",
"normalizedTitle": "Building a Large-Scale Microscopic Road Network Traffic Simulator in Apache Spark",
"abstract": "Road network traffic data has been widely studied by researchers and practitioners in different areas such as urban planning, traffic prediction, and spatial-temporal databases. For instance, researchers use such data to evaluate the impact of road network changes. Unfortunately, collecting large-scale high-quality urban traffic data requires tremendous efforts because participating vehicles must install GPS receivers and administrators must continuously monitor these devices. There has been a number of urban traffic simulators trying to generate such data with different features. However, they suffer from two critical issues (1) scalability: most of them only offer single-machine solution which is not adequate to produce large-scale data. Some simulators can generate traffic in parallel but do not well balance the load among machines in a cluster. (2) granularity: many simulators do not consider microscopic traffic situations including traffic lights, lane changing, car following. In the paper, we propose GeoSparkSim, a scalable traffic simulator which extends Apache Spark to generate large-scale road network traffic datasets with microscopic traffic simulation. The proposed system seamlessly integrates with a Spark-based spatial data management system, GeoSpark, to deliver a holistic approach that allows data scientists to simulate, analyze and visualize largescale urban traffic data. To implement microscopic traffic models, GeoSparkSim employs a simulation-aware vehicle partitioning method to partition vehicles among different machines such that each machine has a balanced workload. The experimental analysis shows that GeoSparkSim can simulate the movements of 200 thousand vehicles over a very large road network (250 thousand road junctions and 300 thousand road segments).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Road network traffic data has been widely studied by researchers and practitioners in different areas such as urban planning, traffic prediction, and spatial-temporal databases. For instance, researchers use such data to evaluate the impact of road network changes. Unfortunately, collecting large-scale high-quality urban traffic data requires tremendous efforts because participating vehicles must install GPS receivers and administrators must continuously monitor these devices. There has been a number of urban traffic simulators trying to generate such data with different features. However, they suffer from two critical issues (1) scalability: most of them only offer single-machine solution which is not adequate to produce large-scale data. Some simulators can generate traffic in parallel but do not well balance the load among machines in a cluster. (2) granularity: many simulators do not consider microscopic traffic situations including traffic lights, lane changing, car following. In the paper, we propose GeoSparkSim, a scalable traffic simulator which extends Apache Spark to generate large-scale road network traffic datasets with microscopic traffic simulation. The proposed system seamlessly integrates with a Spark-based spatial data management system, GeoSpark, to deliver a holistic approach that allows data scientists to simulate, analyze and visualize largescale urban traffic data. To implement microscopic traffic models, GeoSparkSim employs a simulation-aware vehicle partitioning method to partition vehicles among different machines such that each machine has a balanced workload. The experimental analysis shows that GeoSparkSim can simulate the movements of 200 thousand vehicles over a very large road network (250 thousand road junctions and 300 thousand road segments).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Road network traffic data has been widely studied by researchers and practitioners in different areas such as urban planning, traffic prediction, and spatial-temporal databases. For instance, researchers use such data to evaluate the impact of road network changes. Unfortunately, collecting large-scale high-quality urban traffic data requires tremendous efforts because participating vehicles must install GPS receivers and administrators must continuously monitor these devices. There has been a number of urban traffic simulators trying to generate such data with different features. However, they suffer from two critical issues (1) scalability: most of them only offer single-machine solution which is not adequate to produce large-scale data. Some simulators can generate traffic in parallel but do not well balance the load among machines in a cluster. (2) granularity: many simulators do not consider microscopic traffic situations including traffic lights, lane changing, car following. In the paper, we propose GeoSparkSim, a scalable traffic simulator which extends Apache Spark to generate large-scale road network traffic datasets with microscopic traffic simulation. The proposed system seamlessly integrates with a Spark-based spatial data management system, GeoSpark, to deliver a holistic approach that allows data scientists to simulate, analyze and visualize largescale urban traffic data. To implement microscopic traffic models, GeoSparkSim employs a simulation-aware vehicle partitioning method to partition vehicles among different machines such that each machine has a balanced workload. The experimental analysis shows that GeoSparkSim can simulate the movements of 200 thousand vehicles over a very large road network (250 thousand road junctions and 300 thousand road segments).",
"fno": "336300a320",
"keywords": [
"Cluster Computing",
"Computer Simulation",
"Data Analysis",
"Data Visualisation",
"Parallel Processing",
"Pattern Clustering",
"Road Traffic",
"Traffic Engineering Computing",
"Urban Traffic Simulators",
"Apache Spark",
"Microscopic Traffic Simulation",
"Spark Based Spatial Data Management System",
"Road Network Traffic Data",
"Simulation Aware Vehicle Partitioning",
"Microscopic Road Network Traffic Simulator",
"Geo Spark Sim Traffic Simulator",
"Geo Spark",
"Urban Traffic Data Analysis",
"Urban Traffic Data Visualization",
"Roads",
"Microscopy",
"Computational Modeling",
"Data Models",
"Cluster Computing",
"Sparks",
"Graphical User Interfaces",
"Spatio Temporal Data",
"Apache Spark",
"Traffic Model",
"Microscopic Traffic Simulation"
],
"authors": [
{
"affiliation": "Arizona State University",
"fullName": "Zishan Fu",
"givenName": "Zishan",
"surname": "Fu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Arizona State University",
"fullName": "Jia Yu",
"givenName": "Jia",
"surname": "Yu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Arizona State University",
"fullName": "Mohamed Sarwat",
"givenName": "Mohamed",
"surname": "Sarwat",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "mdm",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-06-01T00:00:00",
"pubType": "proceedings",
"pages": "320-328",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-3363-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "336300a304",
"articleId": "1ckrQynyBby",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "336300a329",
"articleId": "1ckrPkrRiVy",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/eurosim/2013/5073/0/5073a448",
"title": "Real-Time Traffic Information System Using Microscopic Traffic Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/eurosim/2013/5073a448/12OmNBlXs5Y",
"parentPublication": {
"id": "proceedings/eurosim/2013/5073/0",
"title": "2013 8th EUROSIM Congress on Modelling and Simulation (EUROSIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ecbs-eerc/2015/7967/0/7967a111",
"title": "Speedup of the Microscopic Road Traffic Simulation Using Aggregated Vehicle Movement",
"doi": null,
"abstractUrl": "/proceedings-article/ecbs-eerc/2015/7967a111/12OmNvSKNQI",
"parentPublication": {
"id": "proceedings/ecbs-eerc/2015/7967/0",
"title": "2015 4th Eastern European Regional Conference on the Engineering of Computer Based Systems (ECBS-EERC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/uksim/2016/0888/0/07796714",
"title": "Stochastic Optimization for Macroscopic Urban Traffic Model with Microscopic Elements",
"doi": null,
"abstractUrl": "/proceedings-article/uksim/2016/07796714/12OmNvrMUfy",
"parentPublication": {
"id": "proceedings/uksim/2016/0888/0",
"title": "2016 UKSim-AMSS 18th International Conference on Computer Modelling and Simulation (UKSim)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/case/2011/1732/0/06042438",
"title": "Potential field-based microscopic modeling of road networks with heterogeneous vehicular traffic",
"doi": null,
"abstractUrl": "/proceedings-article/case/2011/06042438/12OmNyLiuuB",
"parentPublication": {
"id": "proceedings/case/2011/1732/0",
"title": "2011 IEEE International Conference on Automation Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iiai-aai/2022/9755/0/975500a679",
"title": "A high performance approach with MATSim for traffic road simulation",
"doi": null,
"abstractUrl": "/proceedings-article/iiai-aai/2022/975500a679/1GU762UY6as",
"parentPublication": {
"id": "proceedings/iiai-aai/2022/9755/0",
"title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiotcs/2022/3410/0/341000a441",
"title": "Optimization model of road route selection based on road design and traffic planning",
"doi": null,
"abstractUrl": "/proceedings-article/aiotcs/2022/341000a441/1MuZIBBtXMI",
"parentPublication": {
"id": "proceedings/aiotcs/2022/3410/0",
"title": "2022 International Conference on Artificial Intelligence of Things and Crowdsensing (AIoTCs)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b104",
"title": "RONIN: a SUMO Interoperable Mesoscopic Urban Traffic Simulator",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b104/1t7mQqrHmiQ",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b080",
"title": "Analysis of Urban Traffic Incidents Through Road Network Features",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b080/1t7n2Rdahsk",
"parentPublication": {
"id": "proceedings/hpcc-dss-smartcity/2020/7649/0",
"title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2020/8666/0/866600a131",
"title": "Mountain Road Planning and Analysis Model Based on Data Mining",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2020/866600a131/1wRIsOgqFXy",
"parentPublication": {
"id": "proceedings/icicta/2020/8666/0",
"title": "2020 13th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/03/09616433",
"title": "A Calibrated Force-Based Model for Mixed Traffic Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2023/03/09616433/1yA76RmrVtK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1uGXUBMLmlG",
"title": "2020 8th International Conference on Digital Home (ICDH)",
"acronym": "icdh",
"groupId": "1802037",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1uGXV1Qs8IU",
"doi": "10.1109/ICDH51081.2020.00016",
"title": "A Controllable Spring Force Based Method for Fluid Surface Disturbance Details Simulation",
"normalizedTitle": "A Controllable Spring Force Based Method for Fluid Surface Disturbance Details Simulation",
"abstract": "Particle based simulations are widely used in computer graphics. However, few methods use active control for fluid surface. Thus we present a new controllable method based on the spring force model, which provides active control for the disturbance of the fluid surface. This method modifies the spatial distribution of the particles to generate disturbance. To make it controllable, we design a disturbance equation, using the relationship between the macrostate of fluid and the microscopic particles in the Boltzmann's Entropic Equation. Moreover, we also propose multiple optimizations in order to improve appearance of fluid surface. The correction of fluid splash first detects the target particles by a hybrid method, then adjusts the spring force that acts on these particles. The spring force attenuation model uses a sigmoid function to represent the attenuation of particles at different depths. Experimental results show that our method generates the expected disturbance details. The optimizations enhance the realism of the disturbance simulation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Particle based simulations are widely used in computer graphics. However, few methods use active control for fluid surface. Thus we present a new controllable method based on the spring force model, which provides active control for the disturbance of the fluid surface. This method modifies the spatial distribution of the particles to generate disturbance. To make it controllable, we design a disturbance equation, using the relationship between the macrostate of fluid and the microscopic particles in the Boltzmann's Entropic Equation. Moreover, we also propose multiple optimizations in order to improve appearance of fluid surface. The correction of fluid splash first detects the target particles by a hybrid method, then adjusts the spring force that acts on these particles. The spring force attenuation model uses a sigmoid function to represent the attenuation of particles at different depths. Experimental results show that our method generates the expected disturbance details. The optimizations enhance the realism of the disturbance simulation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Particle based simulations are widely used in computer graphics. However, few methods use active control for fluid surface. Thus we present a new controllable method based on the spring force model, which provides active control for the disturbance of the fluid surface. This method modifies the spatial distribution of the particles to generate disturbance. To make it controllable, we design a disturbance equation, using the relationship between the macrostate of fluid and the microscopic particles in the Boltzmann's Entropic Equation. Moreover, we also propose multiple optimizations in order to improve appearance of fluid surface. The correction of fluid splash first detects the target particles by a hybrid method, then adjusts the spring force that acts on these particles. The spring force attenuation model uses a sigmoid function to represent the attenuation of particles at different depths. Experimental results show that our method generates the expected disturbance details. The optimizations enhance the realism of the disturbance simulation.",
"fno": "923400a045",
"keywords": [
"Flow Simulation",
"Optimisation",
"Springs Mechanical",
"Controllable Spring Force Based Method",
"Particle Based Simulations",
"Active Control",
"Spring Force Model",
"Disturbance Equation",
"Microscopic Particles",
"Fluid Splash",
"Hybrid Method",
"Spring Force Attenuation Model",
"Disturbance Simulation",
"Fluid Surface Disturbance",
"Computer Graphics",
"Spatial Distribution",
"Boltzmann Entropic Equation",
"Multiple Optimizations",
"Sigmoid Function",
"Visualization",
"Fluids",
"Graphical Models",
"Computational Modeling",
"Microscopy",
"Force",
"Attenuation",
"Fluid Simulation",
"Active Control",
"Spring Force"
],
"authors": [
{
"affiliation": "Sun Yat-sen University,School of Data and Computer Science,Guangzhou,China",
"fullName": "Languang Gao",
"givenName": "Languang",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University,School of Data and Computer Science,Guangzhou,China",
"fullName": "Weina Jiang",
"givenName": "Weina",
"surname": "Jiang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sun Yat-sen University,School of Data and Computer Science,Guangzhou,China",
"fullName": "Chengying Gao",
"givenName": "Chengying",
"surname": "Gao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdh",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-09-01T00:00:00",
"pubType": "proceedings",
"pages": "45-50",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-9234-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "923400a039",
"articleId": "1uGXXh2qEko",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "923400a051",
"articleId": "1uGY1CeGJtS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [
{
"id": "proceedings/icvrv/2015/7673/0/7673a295",
"title": "SPH-based Fluid Simulation with a New Surface Tension Formulation",
"doi": null,
"abstractUrl": "/proceedings-article/icvrv/2015/7673a295/12OmNAS9zo4",
"parentPublication": {
"id": "proceedings/icvrv/2015/7673/0",
"title": "2015 International Conference on Virtual Reality and Visualization (ICVRV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ams/2011/4414/0/4414a117",
"title": "Simulation of Routing in Nano-manipulation for Creating Pattern with Atomic Force Microscopy Using Genetic Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/ams/2011/4414a117/12OmNC4wtzn",
"parentPublication": {
"id": "proceedings/ams/2011/4414/0",
"title": "Asia International Conference on Modelling & Simulation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2016/2303/0/2303a143",
"title": "Adaptiving Time Steps for SPH Cloth-Fluid Coupling",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a143/12OmNrJAe5i",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2016/2303/0/2303a235",
"title": "Individual Time-Stepping for Rigid-Fluid Coupling of Particle Based Fluids",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2016/2303a235/12OmNvT2oZL",
"parentPublication": {
"id": "proceedings/cw/2016/2303/0",
"title": "2016 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sbgames/2009/3963/0/3963a032",
"title": "GPU Based Fluid Animation over Elastic Surface Models",
"doi": null,
"abstractUrl": "/proceedings-article/sbgames/2009/3963a032/12OmNvlPkwB",
"parentPublication": {
"id": "proceedings/sbgames/2009/3963/0",
"title": "2009 VIII Brazilian Symposium on Games and Digital Entertainment",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2007/06/mcg2007060087",
"title": "Spring-Bead Animation of Viscoelastic Materials",
"doi": null,
"abstractUrl": "/magazine/cg/2007/06/mcg2007060087/13rRUxjQy6F",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icitbs/2019/1307/0/130700a665",
"title": "Prediction of Quasi - Fluid Performance in Different Volume Concentration of Deep Sea Mining",
"doi": null,
"abstractUrl": "/proceedings-article/icitbs/2019/130700a665/18Av1GBA6ly",
"parentPublication": {
"id": "proceedings/icitbs/2019/1307/0",
"title": "2019 International Conference on Intelligent Transportation, Big Data & Smart City (ICITBS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/5555/01/10015628",
"title": "Full-Volume 3D Fluid Flow Reconstruction With Light Field PIV",
"doi": null,
"abstractUrl": "/journal/tp/5555/01/10015628/1JR6d0EQ2o8",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ifeea/2021/0088/0/008800a530",
"title": "Numerical simulation study on the effect of supercritical carbon dioxide on particles in the microchannel slip zone",
"doi": null,
"abstractUrl": "/proceedings-article/ifeea/2021/008800a530/1LHcPrBq0xy",
"parentPublication": {
"id": "proceedings/ifeea/2021/0088/0",
"title": "2021 8th International Forum on Electrical Engineering and Automation (IFEEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aiam/2022/6399/0/639900a504",
"title": "Study on the simulation method of fluid-structure interaction in high voltage circuit breaker repulsion device",
"doi": null,
"abstractUrl": "/proceedings-article/aiam/2022/639900a504/1LRlzB91Edi",
"parentPublication": {
"id": "proceedings/aiam/2022/6399/0",
"title": "2022 4th International Conference on Artificial Intelligence and Advanced Manufacturing (AIAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzSh1aC",
"title": "2012 Third International Conference on Digital Manufacturing & Automation",
"acronym": "icdma",
"groupId": "1800272",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNs0TKLE",
"doi": "10.1109/ICDMA.2012.1",
"title": "3D Model Retrieval Based on Projected Area at Mesh Vertex",
"normalizedTitle": "3D Model Retrieval Based on Projected Area at Mesh Vertex",
"abstract": "The conventional shape feature-based method did not describe the local feature enough. This paper proposed a new 3D model retrieval method based on projected area at mesh vertex. First, sum the projected area on vertical plane of the normal vector at mesh vertex, then normalized the list of the projected area distributions and transfer these data by Fourier transform method. Then the result is defined as 3D model's feature vector which can be used to calculate the similarity of different models. Experiments were conducted to evaluate the proposed algorithm utilizing the Engineering Shape Benchmark (ESB) database. The experiential results show that the proposed methods effectively reflect the similarity among engineering models, and the match result of the models is accurate and the retrieval performance is significantly improved compared to traditional shape distribution method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The conventional shape feature-based method did not describe the local feature enough. This paper proposed a new 3D model retrieval method based on projected area at mesh vertex. First, sum the projected area on vertical plane of the normal vector at mesh vertex, then normalized the list of the projected area distributions and transfer these data by Fourier transform method. Then the result is defined as 3D model's feature vector which can be used to calculate the similarity of different models. Experiments were conducted to evaluate the proposed algorithm utilizing the Engineering Shape Benchmark (ESB) database. The experiential results show that the proposed methods effectively reflect the similarity among engineering models, and the match result of the models is accurate and the retrieval performance is significantly improved compared to traditional shape distribution method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The conventional shape feature-based method did not describe the local feature enough. This paper proposed a new 3D model retrieval method based on projected area at mesh vertex. First, sum the projected area on vertical plane of the normal vector at mesh vertex, then normalized the list of the projected area distributions and transfer these data by Fourier transform method. Then the result is defined as 3D model's feature vector which can be used to calculate the similarity of different models. Experiments were conducted to evaluate the proposed algorithm utilizing the Engineering Shape Benchmark (ESB) database. The experiential results show that the proposed methods effectively reflect the similarity among engineering models, and the match result of the models is accurate and the retrieval performance is significantly improved compared to traditional shape distribution method.",
"fno": "4772a001",
"keywords": [
"Solid Modeling",
"Vectors",
"Shape",
"Computational Modeling",
"Databases",
"Fourier Transforms",
"Feature Extraction",
"3 D Model Retrieval",
"Projected Area",
"Fourier Transform"
],
"authors": [
{
"affiliation": null,
"fullName": "Chen Xiaofeng",
"givenName": "Chen",
"surname": "Xiaofeng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Gong Chuanwei",
"givenName": "Gong",
"surname": "Chuanwei",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zhang Xutang",
"givenName": "Zhang",
"surname": "Xutang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Jiang Lijun",
"givenName": "Jiang",
"surname": "Lijun",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icdma",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "1-4",
"year": "2012",
"issn": null,
"isbn": "978-1-4673-2217-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4772z023",
"articleId": "12OmNzlUKEv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4772a005",
"articleId": "12OmNBEpnD1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNySXF3d",
"title": "2013 IEEE 38th Conference on Local Computer Networks Workshops (LCN Workshops)",
"acronym": "lcn-workshops",
"groupId": "1802198",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNCxtyMd",
"doi": "10.1109/LCNW.2013.6758510",
"title": "All eyes on code: Using call graphs for WSN software optimization",
"normalizedTitle": "All eyes on code: Using call graphs for WSN software optimization",
"abstract": "Efficient code is essential for Wireless Sensor Networks. Limited computational resources and low memory capacities require a disciplined and provident programming style. However, optimizing code requires tools to provide a deep insight into where the code may have potential for improvement. In this paper we present a way of generating call graphs of software for standard Wireless Sensor Nodes. We execute the software on the actual nodes to collect profiling information and visualize this data on a PC-based host system. The call graphs are enriched with information about function execution time, execution count and visualize the call chain of the program to allow the programmer to identify room for optimization.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Efficient code is essential for Wireless Sensor Networks. Limited computational resources and low memory capacities require a disciplined and provident programming style. However, optimizing code requires tools to provide a deep insight into where the code may have potential for improvement. In this paper we present a way of generating call graphs of software for standard Wireless Sensor Nodes. We execute the software on the actual nodes to collect profiling information and visualize this data on a PC-based host system. The call graphs are enriched with information about function execution time, execution count and visualize the call chain of the program to allow the programmer to identify room for optimization.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Efficient code is essential for Wireless Sensor Networks. Limited computational resources and low memory capacities require a disciplined and provident programming style. However, optimizing code requires tools to provide a deep insight into where the code may have potential for improvement. In this paper we present a way of generating call graphs of software for standard Wireless Sensor Nodes. We execute the software on the actual nodes to collect profiling information and visualize this data on a PC-based host system. The call graphs are enriched with information about function execution time, execution count and visualize the call chain of the program to allow the programmer to identify room for optimization.",
"fno": "06758510",
"keywords": [
"Conferences",
"Wireless Sensor Networks",
"Hardware",
"Manuals",
"Libraries"
],
"authors": [
{
"affiliation": "Technische Universität Braunschweig, Institute of Operating Systems and Computer Networks, Mühlenpfordtstraße 23, 38106, Germany",
"fullName": "Wolf-Bastian Pottner",
"givenName": "Wolf-Bastian",
"surname": "Pottner",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Braunschweig, Institute of Operating Systems and Computer Networks, Mühlenpfordtstraße 23, 38106, Germany",
"fullName": "Daniel Willmann",
"givenName": "Daniel",
"surname": "Willmann",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Braunschweig, Institute of Operating Systems and Computer Networks, Mühlenpfordtstraße 23, 38106, Germany",
"fullName": "Felix Busching",
"givenName": "Felix",
"surname": "Busching",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Technische Universität Braunschweig, Institute of Operating Systems and Computer Networks, Mühlenpfordtstraße 23, 38106, Germany",
"fullName": "Lars Wolf",
"givenName": "Lars",
"surname": "Wolf",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "lcn-workshops",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-10-01T00:00:00",
"pubType": "proceedings",
"pages": "137-145",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-0540-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06758509",
"articleId": "12OmNyYm2Cb",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06758511",
"articleId": "12OmNvkpl7e",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxZfq",
"title": "2011 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2011)",
"acronym": "vlhcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNxjjEdP",
"doi": "10.1109/VLHCC.2011.6070388",
"title": "Visualizing call graphs",
"normalizedTitle": "Visualizing call graphs",
"abstract": "Developers navigate and reason about call graphs throughout investigation and debugging activities. This is often difficult: developers can spend tens of minutes answering a single question, get lost and disoriented, and erroneously make assumptions, causing bugs. To address these problems, we designed a new form of interactive call graph visualization - REACHER. Instead of leaving developers to manually traverse the call graph, REACHER lets developers search along control flow. The interactive call graph visualization encodes a number of properties that help developers answer questions about causality, ordering, type membership, repetition, choice, and other relationships. And developers remain oriented while navigating. To evaluate REACHER'S benefits, we conducted a lab study in which 12 participants answered control flow questions. Compared to an existing IDE, participants with REACHER were over 5 times more successful in significantly less time. All enthusiastically preferred REACHER, with many positive comments.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Developers navigate and reason about call graphs throughout investigation and debugging activities. This is often difficult: developers can spend tens of minutes answering a single question, get lost and disoriented, and erroneously make assumptions, causing bugs. To address these problems, we designed a new form of interactive call graph visualization - REACHER. Instead of leaving developers to manually traverse the call graph, REACHER lets developers search along control flow. The interactive call graph visualization encodes a number of properties that help developers answer questions about causality, ordering, type membership, repetition, choice, and other relationships. And developers remain oriented while navigating. To evaluate REACHER'S benefits, we conducted a lab study in which 12 participants answered control flow questions. Compared to an existing IDE, participants with REACHER were over 5 times more successful in significantly less time. All enthusiastically preferred REACHER, with many positive comments.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Developers navigate and reason about call graphs throughout investigation and debugging activities. This is often difficult: developers can spend tens of minutes answering a single question, get lost and disoriented, and erroneously make assumptions, causing bugs. To address these problems, we designed a new form of interactive call graph visualization - REACHER. Instead of leaving developers to manually traverse the call graph, REACHER lets developers search along control flow. The interactive call graph visualization encodes a number of properties that help developers answer questions about causality, ordering, type membership, repetition, choice, and other relationships. And developers remain oriented while navigating. To evaluate REACHER'S benefits, we conducted a lab study in which 12 participants answered control flow questions. Compared to an existing IDE, participants with REACHER were over 5 times more successful in significantly less time. All enthusiastically preferred REACHER, with many positive comments.",
"fno": "06070388",
"keywords": [
"Visualization",
"Navigation",
"Context",
"Joining Processes",
"Debugging",
"Vegetation",
"Upper Bound",
"Program Comprehension",
"Code Exploration",
"Call Graphs",
"Control Flow",
"Program Visualization"
],
"authors": [
{
"affiliation": "Sch. of Comput. Sci., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "T. D. LaToza",
"givenName": "T. D.",
"surname": "LaToza",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Sch. of Comput. Sci., Carnegie Mellon Univ., Pittsburgh, PA, USA",
"fullName": "B. A. Myers",
"givenName": "B. A.",
"surname": "Myers",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vlhcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-09-01T00:00:00",
"pubType": "proceedings",
"pages": "117-124",
"year": "2011",
"issn": "1943-6092",
"isbn": "978-1-4577-1246-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06070387",
"articleId": "12OmNBhHt5l",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06070389",
"articleId": "12OmNrJ11zi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "17D45VtKiqy",
"title": "2018 IEEE 18th International Working Conference on Source Code Analysis and Manipulation (SCAM)",
"acronym": "scam",
"groupId": "1000715",
"volume": "0",
"displayVolume": "0",
"year": "2018",
"__typename": "ProceedingType"
},
"article": {
"id": "17D45X0yjW1",
"doi": "10.1109/SCAM.2018.00028",
"title": "[Research Paper] Static JavaScript Call Graphs: A Comparative Study",
"normalizedTitle": "[Research Paper] Static JavaScript Call Graphs: A Comparative Study",
"abstract": "The popularity and wide adoption of JavaScript both at the client and server side makes its code analysis more important than ever before. Most of the algorithms for vulnerability analysis, coding issue detection, or type inference rely on the call graph representation of the underlying program. Despite some obvious advantages of dynamic analysis, static algorithms should also be considered for call graph construction as they do not require extensive test beds for programs and their costly execution and tracing. In this paper, we systematically compare five widely adopted static algorithms - implemented by the npm call graph, IBM WALA, Google Closure Compiler, Approximate Call Graph, and Type Analyzer for JavaScript tools - for building JavaScript call graphs on 26 WebKit SunSpider benchmark programs and 6 real-world Node.js modules. We provide a performance analysis as well as a quantitative and qualitative evaluation of the results. We found that there was a relatively large intersection of the found call edges among the algorithms, which proved to be 100% precise. However, most of the tools found edges that were missed by all others. ACG had the highest precision followed immediately by TAJS, but ACG found significantly more call edges. As for the combination of tools, ACG and TAJS together covered 99% of the found true edges by all algorithms, while maintaining a precision as high as 98%. Only two of the tools were able to analyze up-to-date multi-file Node.js modules due to incomplete language features support. They agreed on almost 60% of the call edges, but each of them found valid edges that the other missed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The popularity and wide adoption of JavaScript both at the client and server side makes its code analysis more important than ever before. Most of the algorithms for vulnerability analysis, coding issue detection, or type inference rely on the call graph representation of the underlying program. Despite some obvious advantages of dynamic analysis, static algorithms should also be considered for call graph construction as they do not require extensive test beds for programs and their costly execution and tracing. In this paper, we systematically compare five widely adopted static algorithms - implemented by the npm call graph, IBM WALA, Google Closure Compiler, Approximate Call Graph, and Type Analyzer for JavaScript tools - for building JavaScript call graphs on 26 WebKit SunSpider benchmark programs and 6 real-world Node.js modules. We provide a performance analysis as well as a quantitative and qualitative evaluation of the results. We found that there was a relatively large intersection of the found call edges among the algorithms, which proved to be 100% precise. However, most of the tools found edges that were missed by all others. ACG had the highest precision followed immediately by TAJS, but ACG found significantly more call edges. As for the combination of tools, ACG and TAJS together covered 99% of the found true edges by all algorithms, while maintaining a precision as high as 98%. Only two of the tools were able to analyze up-to-date multi-file Node.js modules due to incomplete language features support. They agreed on almost 60% of the call edges, but each of them found valid edges that the other missed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The popularity and wide adoption of JavaScript both at the client and server side makes its code analysis more important than ever before. Most of the algorithms for vulnerability analysis, coding issue detection, or type inference rely on the call graph representation of the underlying program. Despite some obvious advantages of dynamic analysis, static algorithms should also be considered for call graph construction as they do not require extensive test beds for programs and their costly execution and tracing. In this paper, we systematically compare five widely adopted static algorithms - implemented by the npm call graph, IBM WALA, Google Closure Compiler, Approximate Call Graph, and Type Analyzer for JavaScript tools - for building JavaScript call graphs on 26 WebKit SunSpider benchmark programs and 6 real-world Node.js modules. We provide a performance analysis as well as a quantitative and qualitative evaluation of the results. We found that there was a relatively large intersection of the found call edges among the algorithms, which proved to be 100% precise. However, most of the tools found edges that were missed by all others. ACG had the highest precision followed immediately by TAJS, but ACG found significantly more call edges. As for the combination of tools, ACG and TAJS together covered 99% of the found true edges by all algorithms, while maintaining a precision as high as 98%. Only two of the tools were able to analyze up-to-date multi-file Node.js modules due to incomplete language features support. They agreed on almost 60% of the call edges, but each of them found valid edges that the other missed.",
"fno": "829000a177",
"keywords": [
"Authoring Languages",
"Graph Theory",
"Java",
"Program Compilers",
"Program Diagnostics",
"Software Engineering",
"Vulnerability Analysis",
"Type Inference",
"Extensive Test Beds",
"ACG",
"Approximate Call Graph",
"Type Analyzer",
"Code Issue Detection",
"IBM WALA",
"Java Script Tool",
"Web Kit Sun Spider Benchmark Program",
"Google Closure Compiler",
"Up To Date Multifile Node Js Modules",
"Tools",
"Heuristic Algorithms",
"Performance Analysis",
"Java",
"Buildings",
"Benchmark Testing",
"Static Analysis",
"Java Script",
"Call Graph",
"Static Code Analysis",
"Comparative Study"
],
"authors": [
{
"affiliation": null,
"fullName": "Gábor Antal",
"givenName": "Gábor",
"surname": "Antal",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Péter Hegedus",
"givenName": "Péter",
"surname": "Hegedus",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Zoltán Tóth",
"givenName": "Zoltán",
"surname": "Tóth",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Rudolf Ferenc",
"givenName": "Rudolf",
"surname": "Ferenc",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Tibor Gyimóthy",
"givenName": "Tibor",
"surname": "Gyimóthy",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "scam",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2018-09-01T00:00:00",
"pubType": "proceedings",
"pages": "177-186",
"year": "2018",
"issn": null,
"isbn": "978-1-5386-8290-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "829000a171",
"articleId": "17D45XDIXRf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "829000a187",
"articleId": "17D45We0UDr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1KmF2jIF6o0",
"title": "2022 IEEE/ACM Workshop on Programming and Performance Visualization Tools (ProTools)",
"acronym": "protools",
"groupId": "1835044",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1KmF2CjFIK4",
"doi": "10.1109/ProTools56701.2022.00008",
"title": "Generating and Analyzing Program Call Graphs using Ontology",
"normalizedTitle": "Generating and Analyzing Program Call Graphs using Ontology",
"abstract": "Call graph or caller-callee relationships have been used for various kinds of static program analysis, performance analysis and profiling, and for program safety or security analysis such as detecting anomalies of program execution or code injection attacks. However, different tools generate call graphs in different formats, which prevents efficient reuse of call graph results. In this paper, we present an approach of using ontology and resource description framework (RDF) to create knowledge graphs for specifying call graphs to facilitate the construction of full-fledged and complex call graphs of computer programs, realizing more interoperable and scalable program analyses than conventional approaches. We create a formal ontology-based specification of call graph information to capture concepts and properties of both static and dynamic call graphs so different tools can collaboratively contribute to more comprehensive analysis results. Our experiments show that ontology enables merging of call graphs generated from different tools and flexible queries using a standard query interface.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Call graph or caller-callee relationships have been used for various kinds of static program analysis, performance analysis and profiling, and for program safety or security analysis such as detecting anomalies of program execution or code injection attacks. However, different tools generate call graphs in different formats, which prevents efficient reuse of call graph results. In this paper, we present an approach of using ontology and resource description framework (RDF) to create knowledge graphs for specifying call graphs to facilitate the construction of full-fledged and complex call graphs of computer programs, realizing more interoperable and scalable program analyses than conventional approaches. We create a formal ontology-based specification of call graph information to capture concepts and properties of both static and dynamic call graphs so different tools can collaboratively contribute to more comprehensive analysis results. Our experiments show that ontology enables merging of call graphs generated from different tools and flexible queries using a standard query interface.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Call graph or caller-callee relationships have been used for various kinds of static program analysis, performance analysis and profiling, and for program safety or security analysis such as detecting anomalies of program execution or code injection attacks. However, different tools generate call graphs in different formats, which prevents efficient reuse of call graph results. In this paper, we present an approach of using ontology and resource description framework (RDF) to create knowledge graphs for specifying call graphs to facilitate the construction of full-fledged and complex call graphs of computer programs, realizing more interoperable and scalable program analyses than conventional approaches. We create a formal ontology-based specification of call graph information to capture concepts and properties of both static and dynamic call graphs so different tools can collaboratively contribute to more comprehensive analysis results. Our experiments show that ontology enables merging of call graphs generated from different tools and flexible queries using a standard query interface.",
"fno": "756400a011",
"keywords": [
"Graph Theory",
"Ontologies Artificial Intelligence",
"Program Compilers",
"Program Diagnostics",
"Query Processing",
"Call Graph Information",
"Call Graph Results",
"Call Graphs",
"Caller Callee Relationships",
"Comprehensive Analysis Results",
"Computer Programs",
"Formal Ontology Based Specification",
"Interoperable Program Analyses",
"Knowledge Graphs",
"Performance Analysis",
"Program Execution",
"Program Safety",
"Resource Description Framework",
"Scalable Program Analyses",
"Static Program Analysis",
"Visualization",
"Natural Languages",
"Merging",
"Ontologies",
"Resource Description Framework",
"Performance Analysis",
"Safety",
"Callgraph",
"Ontology",
"Knowledge Graph",
"Resource Description Framework",
"Program Analysis"
],
"authors": [
{
"affiliation": "University of North Carolina at Charlotte,Department of Computer Science,Charlotte,North Carolina,USA",
"fullName": "Ethan Dorta",
"givenName": "Ethan",
"surname": "Dorta",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of North Carolina at Charlotte,Department of Computer Science,Charlotte,North Carolina,USA",
"fullName": "Yonghong Yan",
"givenName": "Yonghong",
"surname": "Yan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Center for Applied Scientific Computing,Lawrence Livermore National Laboratory,Livermore,California,USA",
"fullName": "Chunhua Liao",
"givenName": "Chunhua",
"surname": "Liao",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "protools",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-11-01T00:00:00",
"pubType": "proceedings",
"pages": "11-20",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-7564-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "756400a001",
"articleId": "1KmF3yqTNT2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "756400a021",
"articleId": "1KmF2sgCces",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1hJrHq07uw0",
"title": "2019 IEEE International Conference on Big Data (Big Data)",
"acronym": "big-data",
"groupId": "1802964",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1hJsp9zjTCU",
"doi": "10.1109/BigData47090.2019.9005560",
"title": "GraphEvo: Characterizing and Understanding Software Evolution using Call Graphs",
"normalizedTitle": "GraphEvo: Characterizing and Understanding Software Evolution using Call Graphs",
"abstract": "Understanding software evolution is an imperative prerequisite for software related activities such as testing, debugging, and maintenance. As a software system evolves, it increases in size and complexity, introducing new challenges of understating the inner system interactions and subsequently hinders the overall system comprehension. While tools that construct and visualize call graphs have been used to facilitate software comprehension, they are still limited to capturing the functionality of a single software system at a time. However, understanding the similarities and differences across multiple releases becomes an imperative and challenging task during software evolution. To this end, we present a tool, named GraphEvo, that focuses on automating the process of quantifying and visualizing the changes across multiple releases of a software system based on an information-theoretic approach to compare the call graphs. Specifically, GraphEvo can automatically (1) construct and visualize the call graph for one or more software releases, (2) calculate and display a set of graph-based metrics, and (3) construct color-coded call graphs to visualize system evolution. The main goal of GraphEvo is to assist software developers and testers in exploring and tracking software changes over time. We demonstrate the functionality of GraphEvo by analyzing and studying five real software systems throughout their entire lifespan. The tool, evaluation results, and a video demo are available at https://goo.gl/8edZ64.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Understanding software evolution is an imperative prerequisite for software related activities such as testing, debugging, and maintenance. As a software system evolves, it increases in size and complexity, introducing new challenges of understating the inner system interactions and subsequently hinders the overall system comprehension. While tools that construct and visualize call graphs have been used to facilitate software comprehension, they are still limited to capturing the functionality of a single software system at a time. However, understanding the similarities and differences across multiple releases becomes an imperative and challenging task during software evolution. To this end, we present a tool, named GraphEvo, that focuses on automating the process of quantifying and visualizing the changes across multiple releases of a software system based on an information-theoretic approach to compare the call graphs. Specifically, GraphEvo can automatically (1) construct and visualize the call graph for one or more software releases, (2) calculate and display a set of graph-based metrics, and (3) construct color-coded call graphs to visualize system evolution. The main goal of GraphEvo is to assist software developers and testers in exploring and tracking software changes over time. We demonstrate the functionality of GraphEvo by analyzing and studying five real software systems throughout their entire lifespan. The tool, evaluation results, and a video demo are available at https://goo.gl/8edZ64.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Understanding software evolution is an imperative prerequisite for software related activities such as testing, debugging, and maintenance. As a software system evolves, it increases in size and complexity, introducing new challenges of understating the inner system interactions and subsequently hinders the overall system comprehension. While tools that construct and visualize call graphs have been used to facilitate software comprehension, they are still limited to capturing the functionality of a single software system at a time. However, understanding the similarities and differences across multiple releases becomes an imperative and challenging task during software evolution. To this end, we present a tool, named GraphEvo, that focuses on automating the process of quantifying and visualizing the changes across multiple releases of a software system based on an information-theoretic approach to compare the call graphs. Specifically, GraphEvo can automatically (1) construct and visualize the call graph for one or more software releases, (2) calculate and display a set of graph-based metrics, and (3) construct color-coded call graphs to visualize system evolution. The main goal of GraphEvo is to assist software developers and testers in exploring and tracking software changes over time. We demonstrate the functionality of GraphEvo by analyzing and studying five real software systems throughout their entire lifespan. The tool, evaluation results, and a video demo are available at https://goo.gl/8edZ64.",
"fno": "09005560",
"keywords": [
"Data Visualisation",
"Graph Theory",
"Program Testing",
"Public Domain Software",
"Software Maintenance",
"Graph Evo",
"Understanding Software Evolution",
"Software Related Activities",
"Inner System Interactions",
"System Comprehension",
"Software Comprehension",
"Single Software System",
"Software Releases",
"Graph Based Metrics",
"System Evolution",
"Software Developers",
"Testers",
"Software Systems",
"Tools",
"Visualization",
"Measurement",
"Java",
"Maintenance Engineering",
"Static Code Analysis",
"Call Graph",
"Program Comprehension",
"Software Evolution"
],
"authors": [
{
"affiliation": "University of Missouri-Kansas City,School of Computing and Engineering",
"fullName": "Vijay Walunj",
"givenName": "Vijay",
"surname": "Walunj",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Missouri-Kansas City,School of Computing and Engineering",
"fullName": "Gharib Gharibi",
"givenName": "Gharib",
"surname": "Gharibi",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Missouri-Kansas City,School of Computing and Engineering",
"fullName": "Duy H. Ho",
"givenName": "Duy H.",
"surname": "Ho",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Missouri-Kansas City,School of Computing and Engineering",
"fullName": "Yugyung Lee",
"givenName": "Yugyung",
"surname": "Lee",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "big-data",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-12-01T00:00:00",
"pubType": "proceedings",
"pages": "4799-4807",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-0858-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09005483",
"articleId": "1hJrRjSwgkE",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09006312",
"articleId": "1hJsBweaD6w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1sET4vIEs1O",
"title": "2021 IEEE/ACM 43rd International Conference on Software Engineering: Companion Proceedings (ICSE-Companion)",
"acronym": "icse-companion",
"groupId": null,
"volume": "0",
"displayVolume": null,
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1sET629XZaE",
"doi": "10.1109/ICSE-Companion52605.2021.00046",
"title": "Scalable Call Graph Constructor for Maven",
"normalizedTitle": "Scalable Call Graph Constructor for Maven",
"abstract": "As a rich source of data, Call Graphs are used for various applications including security vulnerability detection. Despite multiple studies showing that Call Graphs can drastically improve the accuracy of analysis, existing ecosystem-scale tools like Dependabot do not use Call Graphs and work at the package-level. Using Call Graphs in ecosystem use cases is not practical because of the scalability problems that Call Graph generators have. Call Graph generation is usually considered to be a \"full program analysis\" resulting in large Call Graphs and expensive computation. To make an analysis applicable to ecosystem scale, this pragmatic approach does not work, because the number of possible combinations of how a particular artifact can be combined in a full program explodes. Therefore, it is necessary to make the analysis incremental. There are existing studies on different types of incremental program analysis. However, none of them focuses on Call Graph generation for an entire ecosystem. In this paper, we propose an incremental implementation of the CHA algorithm that can generate Call Graphs on-demand, by stitching together partial Call Graphs that have been extracted for libraries before. Our preliminary evaluation results show that the proposed approach scales well and outperforms the most scalable existing framework called OPAL.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As a rich source of data, Call Graphs are used for various applications including security vulnerability detection. Despite multiple studies showing that Call Graphs can drastically improve the accuracy of analysis, existing ecosystem-scale tools like Dependabot do not use Call Graphs and work at the package-level. Using Call Graphs in ecosystem use cases is not practical because of the scalability problems that Call Graph generators have. Call Graph generation is usually considered to be a \"full program analysis\" resulting in large Call Graphs and expensive computation. To make an analysis applicable to ecosystem scale, this pragmatic approach does not work, because the number of possible combinations of how a particular artifact can be combined in a full program explodes. Therefore, it is necessary to make the analysis incremental. There are existing studies on different types of incremental program analysis. However, none of them focuses on Call Graph generation for an entire ecosystem. In this paper, we propose an incremental implementation of the CHA algorithm that can generate Call Graphs on-demand, by stitching together partial Call Graphs that have been extracted for libraries before. Our preliminary evaluation results show that the proposed approach scales well and outperforms the most scalable existing framework called OPAL.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As a rich source of data, Call Graphs are used for various applications including security vulnerability detection. Despite multiple studies showing that Call Graphs can drastically improve the accuracy of analysis, existing ecosystem-scale tools like Dependabot do not use Call Graphs and work at the package-level. Using Call Graphs in ecosystem use cases is not practical because of the scalability problems that Call Graph generators have. Call Graph generation is usually considered to be a \"full program analysis\" resulting in large Call Graphs and expensive computation. To make an analysis applicable to ecosystem scale, this pragmatic approach does not work, because the number of possible combinations of how a particular artifact can be combined in a full program explodes. Therefore, it is necessary to make the analysis incremental. There are existing studies on different types of incremental program analysis. However, none of them focuses on Call Graph generation for an entire ecosystem. In this paper, we propose an incremental implementation of the CHA algorithm that can generate Call Graphs on-demand, by stitching together partial Call Graphs that have been extracted for libraries before. Our preliminary evaluation results show that the proposed approach scales well and outperforms the most scalable existing framework called OPAL.",
"fno": "121900a099",
"keywords": [
"Automatic Programming",
"Data Flow Graphs",
"Java",
"Program Compilers",
"Program Diagnostics",
"Security Of Data",
"Security Vulnerability Detection",
"Ecosystem Scale Tools",
"Call Graph Generators",
"Call Graph Generation",
"Incremental Program Analysis",
"Scalable Call Graph Constructor",
"Maven",
"Call Graphs On Demand",
"Full Program Analysis",
"OPAL",
"Scalability",
"Ecosystems",
"Tools",
"Libraries",
"Security",
"Software Engineering",
"Pragmatics",
"Theory Of Computation Logic And Verification Program Analysis"
],
"authors": [
{
"affiliation": "Technical University of Delft",
"fullName": "Mehdi Keshani",
"givenName": "Mehdi",
"surname": "Keshani",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icse-companion",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-05-01T00:00:00",
"pubType": "proceedings",
"pages": "99-101",
"year": "2021",
"issn": "2574-1926",
"isbn": "978-1-6654-1219-3",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "121900a096",
"articleId": "1sET613QfXa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "121900a102",
"articleId": "1sET638ZK8w",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNC1GueH",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"acronym": "icpr",
"groupId": "1000545",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNC8uRjk",
"doi": "",
"title": "A graph-based method of newspaper article reconstruction",
"normalizedTitle": "A graph-based method of newspaper article reconstruction",
"abstract": "The primary information units in a newspaper are the articles. Article reconstruction from newspapers including article aggregation and reading order recovery is known to be a quite challenging task due to the complexity of the multi-article page layout. In this paper, we propose a novel approach for article reconstruction using a bipartite graph framework, which models the complex relationships between text blocks as one-to-one correspondences, and accomplishes the task by finding the optimal match on this graph. During the optimization process, various information sources, including geometric layout, linguistic and semantic content, are deeply mined in the bipartite graph model to deal with the wide range of complex newspaper layouts. Moreover, quite different from the existing methods, we perform the two sub-tasks of article reconstruction in reverse order, that is, we detect the reading orders of the text blocks first and then use the reading order to aggregate blocks belonging to the same articles. Experimental results on 3312 newspaper pages with 23184 articles demonstrate that our method outperforms the state-of-the-art methods for newspaper article reconstruction. In addition, this method has been adopted in several large-scale newspaper digitalization projects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The primary information units in a newspaper are the articles. Article reconstruction from newspapers including article aggregation and reading order recovery is known to be a quite challenging task due to the complexity of the multi-article page layout. In this paper, we propose a novel approach for article reconstruction using a bipartite graph framework, which models the complex relationships between text blocks as one-to-one correspondences, and accomplishes the task by finding the optimal match on this graph. During the optimization process, various information sources, including geometric layout, linguistic and semantic content, are deeply mined in the bipartite graph model to deal with the wide range of complex newspaper layouts. Moreover, quite different from the existing methods, we perform the two sub-tasks of article reconstruction in reverse order, that is, we detect the reading orders of the text blocks first and then use the reading order to aggregate blocks belonging to the same articles. Experimental results on 3312 newspaper pages with 23184 articles demonstrate that our method outperforms the state-of-the-art methods for newspaper article reconstruction. In addition, this method has been adopted in several large-scale newspaper digitalization projects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The primary information units in a newspaper are the articles. Article reconstruction from newspapers including article aggregation and reading order recovery is known to be a quite challenging task due to the complexity of the multi-article page layout. In this paper, we propose a novel approach for article reconstruction using a bipartite graph framework, which models the complex relationships between text blocks as one-to-one correspondences, and accomplishes the task by finding the optimal match on this graph. During the optimization process, various information sources, including geometric layout, linguistic and semantic content, are deeply mined in the bipartite graph model to deal with the wide range of complex newspaper layouts. Moreover, quite different from the existing methods, we perform the two sub-tasks of article reconstruction in reverse order, that is, we detect the reading orders of the text blocks first and then use the reading order to aggregate blocks belonging to the same articles. Experimental results on 3312 newspaper pages with 23184 articles demonstrate that our method outperforms the state-of-the-art methods for newspaper article reconstruction. In addition, this method has been adopted in several large-scale newspaper digitalization projects.",
"fno": "06460443",
"keywords": [
"Graph Theory",
"Information Resources",
"Text Analysis",
"Graph Based Method",
"Newspaper Article Reconstruction",
"Primary Information Units",
"Article Aggregation",
"Reading Order Recovery",
"Multiarticle Page Layout Complexity",
"Bipartite Graph Framework",
"Text Blocks",
"One To One Correspondences",
"Optimization Process",
"Information Sources",
"Geometric Layout",
"Linguistic Content",
"Semantic Content",
"Newspaper Digitalization Projects",
"Bipartite Graph",
"Layout",
"Semantics",
"Visualization",
"Image Edge Detection",
"Complexity Theory",
"Optimal Matching"
],
"authors": [
{
"affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China",
"fullName": "Liangcai Gao",
"givenName": "Liangcai",
"surname": "Gao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China",
"fullName": "Zhi Tang",
"givenName": "Zhi",
"surname": "Tang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China",
"fullName": "Xiaoyan Lin",
"givenName": "Xiaoyan",
"surname": "Lin",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Computer Science & Technology, Peking University, Beijing, China",
"fullName": "Yongtao Wang",
"givenName": "Yongtao",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1566-1569",
"year": "2012",
"issn": "1051-4651",
"isbn": "978-1-4673-2216-4",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "06460442",
"articleId": "12OmNwvDQth",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "06460444",
"articleId": "12OmNqIzgUR",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1FUSEgqo6oU",
"title": "2022 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"acronym": "vl-hcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1FUSFinOlwc",
"doi": "10.1109/VL/HCC53370.2022.9833103",
"title": "Code-Chips: Interactive Syntax in Visual Programming",
"normalizedTitle": "Code-Chips: Interactive Syntax in Visual Programming",
"abstract": "Visual programming is widely adopted in learning, usually with jigsaw-style blocks that may be freely placed on a canvas. While grammatical correctness is forced by the allowed compositions, syntactic information is not communicated to learners, causing the underlying language grammar to be experientially assimilated. But grammars are crucial for the deeper understanding of languages, since syntax reflects all important semantic aspects and elements. We present a general-purpose syntax-directed visual editor with syntactic tooltips, accepting as input the grammar of the subject language. It adopts a block-based visual style for program elements. However, contrasting to the typical canvas layout, it supports a row-based grid for spatial organization, enabling newlines and indentation. It also allows users view the production chain of any program element for a better understanding of the language. Our early evaluation findings indicate that such a combination of interactive syntax and visual code blocks is very positively received by learners.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual programming is widely adopted in learning, usually with jigsaw-style blocks that may be freely placed on a canvas. While grammatical correctness is forced by the allowed compositions, syntactic information is not communicated to learners, causing the underlying language grammar to be experientially assimilated. But grammars are crucial for the deeper understanding of languages, since syntax reflects all important semantic aspects and elements. We present a general-purpose syntax-directed visual editor with syntactic tooltips, accepting as input the grammar of the subject language. It adopts a block-based visual style for program elements. However, contrasting to the typical canvas layout, it supports a row-based grid for spatial organization, enabling newlines and indentation. It also allows users view the production chain of any program element for a better understanding of the language. Our early evaluation findings indicate that such a combination of interactive syntax and visual code blocks is very positively received by learners.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual programming is widely adopted in learning, usually with jigsaw-style blocks that may be freely placed on a canvas. While grammatical correctness is forced by the allowed compositions, syntactic information is not communicated to learners, causing the underlying language grammar to be experientially assimilated. But grammars are crucial for the deeper understanding of languages, since syntax reflects all important semantic aspects and elements. We present a general-purpose syntax-directed visual editor with syntactic tooltips, accepting as input the grammar of the subject language. It adopts a block-based visual style for program elements. However, contrasting to the typical canvas layout, it supports a row-based grid for spatial organization, enabling newlines and indentation. It also allows users view the production chain of any program element for a better understanding of the language. Our early evaluation findings indicate that such a combination of interactive syntax and visual code blocks is very positively received by learners.",
"fno": "09833103",
"keywords": [
"Visualization",
"Codes",
"Semantics",
"Layout",
"Production",
"Organizations",
"Syntactics",
"Visual Programming",
"Interactive Programming Environments",
"Syntax Directed Editing"
],
"authors": [
{
"affiliation": "University of Crete,Institute of Computer Science – FORTH, and Computer Science Department,Greece",
"fullName": "Anthony Savidis",
"givenName": "Anthony",
"surname": "Savidis",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Crete,Computer Science Department,Greece",
"fullName": "Emanuel Agapakis",
"givenName": "Emanuel",
"surname": "Agapakis",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vl-hcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-10",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-4214-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09833130",
"articleId": "1FUSLDdNzq0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09833119",
"articleId": "1FUSJMRIWsw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1FUSEgqo6oU",
"title": "2022 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC)",
"acronym": "vl-hcc",
"groupId": "1001007",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1FUSKPSqVuE",
"doi": "10.1109/VL/HCC53370.2022.9833128",
"title": "Exploring Organization of Computational Notebook Cells in 2D Space",
"normalizedTitle": "Exploring Organization of Computational Notebook Cells in 2D Space",
"abstract": "Representing branching and comparative analyses in computational notebooks is complicated by the 1-dimensional (1D), top-down list arrangement of cells. Given the ubiquity of these and other non-linear features, their importance to analysis and narrative, and the struggles current 1D computational notebooks have, enabling organization of computational notebook cells in 2 dimensions (2D) may prove valuable. We investigated whether and how users would organize cells in such a \"2D Computational Notebook\" through a user study and gathered feedback from participants through a follow-up survey and optional interviews. Through the user study, we found 3 main design patterns for arranging notebook cells in 2D: Linear, Multi-Column, and Workboard. Through the survey and interviews, we found that users see potential value in 2D Computational Notebooks for branching and comparative analyses, but the expansion from 1D to 2D may necessitate additional navigational and organizational aids.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Representing branching and comparative analyses in computational notebooks is complicated by the 1-dimensional (1D), top-down list arrangement of cells. Given the ubiquity of these and other non-linear features, their importance to analysis and narrative, and the struggles current 1D computational notebooks have, enabling organization of computational notebook cells in 2 dimensions (2D) may prove valuable. We investigated whether and how users would organize cells in such a \"2D Computational Notebook\" through a user study and gathered feedback from participants through a follow-up survey and optional interviews. Through the user study, we found 3 main design patterns for arranging notebook cells in 2D: Linear, Multi-Column, and Workboard. Through the survey and interviews, we found that users see potential value in 2D Computational Notebooks for branching and comparative analyses, but the expansion from 1D to 2D may necessitate additional navigational and organizational aids.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Representing branching and comparative analyses in computational notebooks is complicated by the 1-dimensional (1D), top-down list arrangement of cells. Given the ubiquity of these and other non-linear features, their importance to analysis and narrative, and the struggles current 1D computational notebooks have, enabling organization of computational notebook cells in 2 dimensions (2D) may prove valuable. We investigated whether and how users would organize cells in such a \"2D Computational Notebook\" through a user study and gathered feedback from participants through a follow-up survey and optional interviews. Through the user study, we found 3 main design patterns for arranging notebook cells in 2D: Linear, Multi-Column, and Workboard. Through the survey and interviews, we found that users see potential value in 2D Computational Notebooks for branching and comparative analyses, but the expansion from 1D to 2D may necessitate additional navigational and organizational aids.",
"fno": "09833128",
"keywords": [
"Visualization",
"Navigation",
"Annotations",
"Layout",
"Semantics",
"Directed Graphs",
"Organizations",
"Data Science",
"Computational Notebooks"
],
"authors": [
{
"affiliation": "Virginia Tech,Dept. of Computer Science,Blacksburg,VA,USA",
"fullName": "Jesse Harden",
"givenName": "Jesse",
"surname": "Harden",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Dept. of Computer Science,Blacksburg,VA,USA",
"fullName": "Elizabeth Christman",
"givenName": "Elizabeth",
"surname": "Christman",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hawaii at Manoa,Dept. of Information and Computer Sciences,Honolulu,HI,USA",
"fullName": "Nurit Kirshenbaum",
"givenName": "Nurit",
"surname": "Kirshenbaum",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Dept. of Computer Science,Blacksburg,VA,USA",
"fullName": "John Wenskovitch",
"givenName": "John",
"surname": "Wenskovitch",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Hawaii at Manoa,Dept. of Information and Computer Sciences,Honolulu,HI,USA",
"fullName": "Jason Leigh",
"givenName": "Jason",
"surname": "Leigh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Virginia Tech,Dept. of Computer Science,Blacksburg,VA,USA",
"fullName": "Chris North",
"givenName": "Chris",
"surname": "North",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vl-hcc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-09-01T00:00:00",
"pubType": "proceedings",
"pages": "1-6",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-4214-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09833117",
"articleId": "1FUSIPqKnQY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09833104",
"articleId": "1FUSI3ppOwg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1MIgQdQKHtu",
"title": "2022 Workshop on Visual Analytics in Healthcare (VAHC)",
"acronym": "vahc",
"groupId": "1826204",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1MIgSVcHS9O",
"doi": "10.1109/VAHC57815.2022.10108526",
"title": "Browser-based Infographic Tailoring Self-service Interface (BITSI)",
"normalizedTitle": "Browser-based Infographic Tailoring Self-service Interface (BITSI)",
"abstract": "Tailored infographics are useful tools for communicating health information to patients and research participants, particularly those with low health literacy, but software is required to automate the tailoring. The Browser-based Infographic Tailoring Self-service Interface (BITSI) is a bespoke software solution for tailoring infographics. BITSI produces batches or single PDFs of tailored infographics in English and Spanish of Asthma Control Test scores at one or two time points using a number line format. This open-source software uses R and a LaTeX compiler; development of a Shiny web application supported a user-friendly, browser-based interface. We improved upon previous infographic tailoring solutions by streamlining installation and creating a user-friendly point-and-click data entry interface. Due to its interface, BITSI is amenable to interfacing with other systems through application programming interfaces, such as with electronic health record systems. These improvements make deployment of tailored infographics in clinical and research settings feasible and practical.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Tailored infographics are useful tools for communicating health information to patients and research participants, particularly those with low health literacy, but software is required to automate the tailoring. The Browser-based Infographic Tailoring Self-service Interface (BITSI) is a bespoke software solution for tailoring infographics. BITSI produces batches or single PDFs of tailored infographics in English and Spanish of Asthma Control Test scores at one or two time points using a number line format. This open-source software uses R and a LaTeX compiler; development of a Shiny web application supported a user-friendly, browser-based interface. We improved upon previous infographic tailoring solutions by streamlining installation and creating a user-friendly point-and-click data entry interface. Due to its interface, BITSI is amenable to interfacing with other systems through application programming interfaces, such as with electronic health record systems. These improvements make deployment of tailored infographics in clinical and research settings feasible and practical.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Tailored infographics are useful tools for communicating health information to patients and research participants, particularly those with low health literacy, but software is required to automate the tailoring. The Browser-based Infographic Tailoring Self-service Interface (BITSI) is a bespoke software solution for tailoring infographics. BITSI produces batches or single PDFs of tailored infographics in English and Spanish of Asthma Control Test scores at one or two time points using a number line format. This open-source software uses R and a LaTeX compiler; development of a Shiny web application supported a user-friendly, browser-based interface. We improved upon previous infographic tailoring solutions by streamlining installation and creating a user-friendly point-and-click data entry interface. Due to its interface, BITSI is amenable to interfacing with other systems through application programming interfaces, such as with electronic health record systems. These improvements make deployment of tailored infographics in clinical and research settings feasible and practical.",
"fno": "10108526",
"keywords": [
"Visual Analytics",
"Conferences",
"Data Visualization",
"Medical Services",
"Browsers",
"Electronic Medical Records",
"Open Source Software",
"Comprehension",
"Audiovisual Aids",
"Information Visualization",
"Patient Reported Outcomes"
],
"authors": [
{
"affiliation": "Columbia University University of San Diego",
"fullName": "Adriana Arcia",
"givenName": "Adriana",
"surname": "Arcia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of British Columbia RStudio PBC",
"fullName": "Daniel Chen",
"givenName": "Daniel",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Columbia University School of Nursing",
"fullName": "Katherine South",
"givenName": "Katherine",
"surname": "South",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vahc",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-7",
"year": "2022",
"issn": null,
"isbn": "979-8-3503-0103-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "10108527",
"articleId": "1MIgQoBeQSY",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "10108522",
"articleId": "1MIgSA5zSgg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1J2XO4LxJkc",
"title": "2022 International Conference on Artificial Intelligence and Autonomous Robot Systems (AIARS)",
"acronym": "aiars",
"groupId": "9942816",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1J2XPikx7b2",
"doi": "10.1109/AIARS57204.2022.00038",
"title": "Intelligent Repair System of Table Tennis Server Based on Data Analysis Algorithm",
"normalizedTitle": "Intelligent Repair System of Table Tennis Server Based on Data Analysis Algorithm",
"abstract": "Billiards are important equipment for table tennis players or fans. When the table tennis ball machine fails, the sensor in the ball machine sends out an error signal, automatically transmits the built-in tool, damages the ball machine according to the planned maintenance method, and automatically completes the maintenance. The purpose of this paper is to study the intelligent repair system of table tennis server based on data analysis algorithm. This paper introduces the research background, significance and application scenarios of the intelligent repair system of table tennis ball server, summarizes the research status of fault diagnosis and genetic algorithm at home and abroad and some representative research results, and proposes the main work and research content of this paper. Using genetic algorithm can automatically correct the internal combination error of the ball machine, the effectiveness of the debugging method, and the proposed automatic debugging method is verified by the test. Through the experimental comparison of the self-repair method based on genetic algorithm implemented in this paper, it is found that the proposed error correction method based on genetic algorithm has the fastest repair speed, the success rate is 87.6%, and the confidence is high.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Billiards are important equipment for table tennis players or fans. When the table tennis ball machine fails, the sensor in the ball machine sends out an error signal, automatically transmits the built-in tool, damages the ball machine according to the planned maintenance method, and automatically completes the maintenance. The purpose of this paper is to study the intelligent repair system of table tennis server based on data analysis algorithm. This paper introduces the research background, significance and application scenarios of the intelligent repair system of table tennis ball server, summarizes the research status of fault diagnosis and genetic algorithm at home and abroad and some representative research results, and proposes the main work and research content of this paper. Using genetic algorithm can automatically correct the internal combination error of the ball machine, the effectiveness of the debugging method, and the proposed automatic debugging method is verified by the test. Through the experimental comparison of the self-repair method based on genetic algorithm implemented in this paper, it is found that the proposed error correction method based on genetic algorithm has the fastest repair speed, the success rate is 87.6%, and the confidence is high.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Billiards are important equipment for table tennis players or fans. When the table tennis ball machine fails, the sensor in the ball machine sends out an error signal, automatically transmits the built-in tool, damages the ball machine according to the planned maintenance method, and automatically completes the maintenance. The purpose of this paper is to study the intelligent repair system of table tennis server based on data analysis algorithm. This paper introduces the research background, significance and application scenarios of the intelligent repair system of table tennis ball server, summarizes the research status of fault diagnosis and genetic algorithm at home and abroad and some representative research results, and proposes the main work and research content of this paper. Using genetic algorithm can automatically correct the internal combination error of the ball machine, the effectiveness of the debugging method, and the proposed automatic debugging method is verified by the test. Through the experimental comparison of the self-repair method based on genetic algorithm implemented in this paper, it is found that the proposed error correction method based on genetic algorithm has the fastest repair speed, the success rate is 87.6%, and the confidence is high.",
"fno": "545700a135",
"keywords": [
"Data Analysis",
"Error Correction",
"Fault Diagnosis",
"Genetic Algorithms",
"Maintenance Engineering",
"Program Debugging",
"Sport",
"Automatic Debugging Method",
"Data Analysis Algorithm",
"Error Correction Method",
"Error Signal",
"Fans",
"Fastest Repair Speed",
"Genetic Algorithm",
"Intelligent Repair System",
"Main Work",
"Planned Maintenance Method",
"Representative Research Results",
"Research Background",
"Research Content",
"Research Status",
"Self Repair Method",
"Table Tennis Ball Machine",
"Table Tennis Ball Server",
"Table Tennis Players",
"Table Tennis Server",
"Fault Diagnosis",
"Training",
"Data Analysis",
"Sports Equipment",
"Simulation",
"Debugging",
"Maintenance Engineering",
"Data Analysis Algorithm",
"Table Tennis Ball Machine",
"Intelligent Repair",
"Repair System"
],
"authors": [
{
"affiliation": "Jiangxi Vocational Technical College of Industry and Trade,Nanchang,China",
"fullName": "Jianjun Cheng",
"givenName": "Jianjun",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aiars",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "135-138",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-5457-5",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "545700a131",
"articleId": "1J2XWl8mjao",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "545700a139",
"articleId": "1JeAfUGOI6c",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1pZ151d1sOc",
"title": "2020 International Conference on Pervasive Artificial Intelligence (ICPAI)",
"acronym": "icpai",
"groupId": "1839564",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1pZ17Yuv70Y",
"doi": "10.1109/ICPAI51961.2020.00019",
"title": "Stress Level Classifier: Taiwanese College Table Tennis Athletes’ Electroencephalography Analysis Based on Decision Trees",
"normalizedTitle": "Stress Level Classifier: Taiwanese College Table Tennis Athletes’ Electroencephalography Analysis Based on Decision Trees",
"abstract": "This study aims to provide a method to quantify the stress level with numerical EEG values, identify key features of brainwave and assess the level of stress for table tennis players. The data of College's Division 1 and Division 2 are collected and analyzed by the decision tree algorithms C4.5, CART, Random Forest and Random Tree. The result of Random Forest obtains the highest accuracy rate among each algorithm, which is 79.21% in all players, 79.3% in Division 1, and 80.68% in Division 2. According to the result of decision trees, the top attribute of the Division 1 players was Theta wave, which was different from the result of other data in the Division 2 players. Also reveal the deference of brainwaves between the Division 2 players and the Division 1 players while they are in high stressed competitions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This study aims to provide a method to quantify the stress level with numerical EEG values, identify key features of brainwave and assess the level of stress for table tennis players. The data of College's Division 1 and Division 2 are collected and analyzed by the decision tree algorithms C4.5, CART, Random Forest and Random Tree. The result of Random Forest obtains the highest accuracy rate among each algorithm, which is 79.21% in all players, 79.3% in Division 1, and 80.68% in Division 2. According to the result of decision trees, the top attribute of the Division 1 players was Theta wave, which was different from the result of other data in the Division 2 players. Also reveal the deference of brainwaves between the Division 2 players and the Division 1 players while they are in high stressed competitions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This study aims to provide a method to quantify the stress level with numerical EEG values, identify key features of brainwave and assess the level of stress for table tennis players. The data of College's Division 1 and Division 2 are collected and analyzed by the decision tree algorithms C4.5, CART, Random Forest and Random Tree. The result of Random Forest obtains the highest accuracy rate among each algorithm, which is 79.21% in all players, 79.3% in Division 1, and 80.68% in Division 2. According to the result of decision trees, the top attribute of the Division 1 players was Theta wave, which was different from the result of other data in the Division 2 players. Also reveal the deference of brainwaves between the Division 2 players and the Division 1 players while they are in high stressed competitions.",
"fno": "426200a058",
"keywords": [
"Decision Trees",
"Electroencephalography",
"Medical Signal Processing",
"Random Forests",
"Sport",
"Stress Level Classifier",
"Taiwanese College Table Tennis Athletes",
"Decision Trees",
"Table Tennis Players",
"Decision Tree Algorithms",
"Random Forest",
"Random Tree",
"Electroencephalography Analysis",
"Numerical EEG Values",
"Brainwave",
"Stress",
"Electroencephalography",
"Physiology",
"Sports",
"Games",
"Decision Trees",
"Vegetation",
"Table Tennis",
"Stress Level",
"Electroencephalogram",
"C 4 5",
"Random Forest"
],
"authors": [
{
"affiliation": "National Chung Hsing University,Department of Management Information Systems,Taichung City,Taiwan",
"fullName": "Ping-Sung Cheng",
"givenName": "Ping-Sung",
"surname": "Cheng",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chung Hsing University,AI and Data Science Program Institute of Genomic and Boioinformatics,Department of Management Information Systems,Taichung City,Taiwan",
"fullName": "Meng-Hsiun Tsai",
"givenName": "Meng-Hsiun",
"surname": "Tsai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Chung Hsing University,Department of Management Information Systems,Taichung City,Taiwan",
"fullName": "Chung-Hao Hsueh",
"givenName": "Chung-Hao",
"surname": "Hsueh",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "National Taiwan University of Sport,Department of Sport Performance,Taichung City,Taiwan",
"fullName": "Sheng Kuang Wu",
"givenName": "Sheng",
"surname": "Kuang Wu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icpai",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "58-63",
"year": "2020",
"issn": null,
"isbn": "978-1-6654-0483-9",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "426200a054",
"articleId": "1pZ16h0fv0s",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "426200a064",
"articleId": "1pZ15BdObXG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1zktbPUzbeE",
"title": "2021 Thirteenth International Conference on Mobile Computing and Ubiquitous Network (ICMU)",
"acronym": "icmu",
"groupId": "1803606",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1zktfg0C87u",
"doi": "10.23919/ICMU50196.2021.9638855",
"title": "Toward the Perfect Stroke: A Multimodal Approach for Table Tennis Stroke Evaluation",
"normalizedTitle": "Toward the Perfect Stroke: A Multimodal Approach for Table Tennis Stroke Evaluation",
"abstract": "In table tennis, developing a consistent and proper stroke is quite challenging, perhaps even more so for nonprofessional players. To build such consistency in beginner players, there is a need to understand how the stroke differs between beginner and standard players. So far, prior works have used video, accelerometers embedded in the table tennis rackets themselves, or infrared (IR) depth sensors for capturing and evaluating the stroke. However, these methods face certain challenges such as having insufficient data to analyse complete strokes, time-consuming and costly data collection, and use of non-prevalent equipment. Hence, to improve the beginner player’s performance, an ubiquitous method using readily accessible commercial devices is essential for stroke evaluation. To achieve the goal of this study, we (i) recorded video and accelerometer data from standard and beginner players using consumer-grade products, and (ii) analysed the stroke consistency between both groups. The results of both video-based and accelerometer-based data show the differences in the strokes between both kinds of players. These findings motivate us to further examine methods to help beginner players improve by providing guidance through procedural knowledge of a standard player’s stroke, and implement applications for motor-skill instruction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In table tennis, developing a consistent and proper stroke is quite challenging, perhaps even more so for nonprofessional players. To build such consistency in beginner players, there is a need to understand how the stroke differs between beginner and standard players. So far, prior works have used video, accelerometers embedded in the table tennis rackets themselves, or infrared (IR) depth sensors for capturing and evaluating the stroke. However, these methods face certain challenges such as having insufficient data to analyse complete strokes, time-consuming and costly data collection, and use of non-prevalent equipment. Hence, to improve the beginner player’s performance, an ubiquitous method using readily accessible commercial devices is essential for stroke evaluation. To achieve the goal of this study, we (i) recorded video and accelerometer data from standard and beginner players using consumer-grade products, and (ii) analysed the stroke consistency between both groups. The results of both video-based and accelerometer-based data show the differences in the strokes between both kinds of players. These findings motivate us to further examine methods to help beginner players improve by providing guidance through procedural knowledge of a standard player’s stroke, and implement applications for motor-skill instruction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In table tennis, developing a consistent and proper stroke is quite challenging, perhaps even more so for nonprofessional players. To build such consistency in beginner players, there is a need to understand how the stroke differs between beginner and standard players. So far, prior works have used video, accelerometers embedded in the table tennis rackets themselves, or infrared (IR) depth sensors for capturing and evaluating the stroke. However, these methods face certain challenges such as having insufficient data to analyse complete strokes, time-consuming and costly data collection, and use of non-prevalent equipment. Hence, to improve the beginner player’s performance, an ubiquitous method using readily accessible commercial devices is essential for stroke evaluation. To achieve the goal of this study, we (i) recorded video and accelerometer data from standard and beginner players using consumer-grade products, and (ii) analysed the stroke consistency between both groups. The results of both video-based and accelerometer-based data show the differences in the strokes between both kinds of players. These findings motivate us to further examine methods to help beginner players improve by providing guidance through procedural knowledge of a standard player’s stroke, and implement applications for motor-skill instruction.",
"fno": "09638855",
"keywords": [
"Accelerometers",
"Biomechanics",
"Sport",
"Video Signal Processing",
"Consumer Grade Products",
"Motor Skill Instruction",
"Video Based Data",
"IR Depth Sensors",
"Infrared Depth Sensors",
"Accelerometer Based Data",
"Stroke Consistency",
"Beginner Player",
"Table Tennis Rackets",
"Beginner Players",
"Nonprofessional Players",
"Table Tennis Stroke Evaluation",
"Multimodal Approach",
"Perfect Stroke",
"Standard Player",
"Accelerometers",
"Performance Evaluation",
"Sports Equipment",
"Data Collection",
"Sensors",
"Standards",
"Mobile Computing",
"Table Tennis",
"Stroke Detection",
"Motion Capture",
"Joint Kinematics",
"Sports Analytics"
],
"authors": [
{
"affiliation": "Nara Institute of Science and Technology,Nara,Japan",
"fullName": "Panyawut Sri-Iesaranusorn",
"givenName": "Panyawut",
"surname": "Sri-Iesaranusorn",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology,Nara,Japan",
"fullName": "Felan Carlo Garcia",
"givenName": "Felan Carlo",
"surname": "Garcia",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology,Nara,Japan",
"fullName": "Francis Tiausas",
"givenName": "Francis",
"surname": "Tiausas",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology,Nara,Japan",
"fullName": "Supatsara Wattanakriengkrai",
"givenName": "Supatsara",
"surname": "Wattanakriengkrai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology,Nara,Japan",
"fullName": "Kazushi Ikeda",
"givenName": "Kazushi",
"surname": "Ikeda",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Nara Institute of Science and Technology,Nara,Japan",
"fullName": "Junichiro Yoshimoto",
"givenName": "Junichiro",
"surname": "Yoshimoto",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icmu",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-11-01T00:00:00",
"pubType": "proceedings",
"pages": "1-5",
"year": "2021",
"issn": null,
"isbn": "978-4-907626-48-8",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "09638856",
"articleId": "1zktd8mIa0o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "09638853",
"articleId": "1zkthDUc5ZC",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNBqMDAT",
"title": "Wearable and Implantable Body Sensor Networks, International Workshop on",
"acronym": "bsn",
"groupId": "1002053",
"volume": "0",
"displayVolume": "0",
"year": "2011",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBAIAOf",
"doi": "10.1109/BSN.2011.42",
"title": "Wireless Hand Gesture Capture through Wearable Passive Tag Sensing",
"normalizedTitle": "Wireless Hand Gesture Capture through Wearable Passive Tag Sensing",
"abstract": "For wearable computing to become more widely accepted, the associated Human-Computer Interface must move past today's keyboard, keypad, touch screen, or other bulky hand-held interfaces to allow a user to specify input through their fingers without taking their eyes and attention off their immediate focus. Accordingly, we have developed a wearable system to track hand gestures with passive RFID sensor tags. This system is composed of an ultra-high frequency (UHF) reader and small, passive, finger-worn tags powered by transmit RF energy, each equipped with a variety of sensors that could be used to detect gestures. The primary physical goals of the system were to be comfortable and wearable without interfering with other everyday activities while tracking particular hand movements that could be used to control a wearable computer or aid in interaction with ubiquitous or other wearable devices. This paper first introduces our hardware, then gives some example user interface implementations, such as a mouse scrolled by hand position and a click specified by finger proximity, entering input by touching fingers, setting options when moving the hand to a particular spot of the user's apparel labeled with a passive RFID tag, and otherwise mapping control onto motion of the hand, arm, and fingers. The overall system was fully functional, but as this is an early implementation, it was still very much limited by transmit power and antenna efficiency, due to the constraints on the size of the passive tags. Means of scaling to lower power and smaller size are suggested.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For wearable computing to become more widely accepted, the associated Human-Computer Interface must move past today's keyboard, keypad, touch screen, or other bulky hand-held interfaces to allow a user to specify input through their fingers without taking their eyes and attention off their immediate focus. Accordingly, we have developed a wearable system to track hand gestures with passive RFID sensor tags. This system is composed of an ultra-high frequency (UHF) reader and small, passive, finger-worn tags powered by transmit RF energy, each equipped with a variety of sensors that could be used to detect gestures. The primary physical goals of the system were to be comfortable and wearable without interfering with other everyday activities while tracking particular hand movements that could be used to control a wearable computer or aid in interaction with ubiquitous or other wearable devices. This paper first introduces our hardware, then gives some example user interface implementations, such as a mouse scrolled by hand position and a click specified by finger proximity, entering input by touching fingers, setting options when moving the hand to a particular spot of the user's apparel labeled with a passive RFID tag, and otherwise mapping control onto motion of the hand, arm, and fingers. The overall system was fully functional, but as this is an early implementation, it was still very much limited by transmit power and antenna efficiency, due to the constraints on the size of the passive tags. Means of scaling to lower power and smaller size are suggested.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For wearable computing to become more widely accepted, the associated Human-Computer Interface must move past today's keyboard, keypad, touch screen, or other bulky hand-held interfaces to allow a user to specify input through their fingers without taking their eyes and attention off their immediate focus. Accordingly, we have developed a wearable system to track hand gestures with passive RFID sensor tags. This system is composed of an ultra-high frequency (UHF) reader and small, passive, finger-worn tags powered by transmit RF energy, each equipped with a variety of sensors that could be used to detect gestures. The primary physical goals of the system were to be comfortable and wearable without interfering with other everyday activities while tracking particular hand movements that could be used to control a wearable computer or aid in interaction with ubiquitous or other wearable devices. This paper first introduces our hardware, then gives some example user interface implementations, such as a mouse scrolled by hand position and a click specified by finger proximity, entering input by touching fingers, setting options when moving the hand to a particular spot of the user's apparel labeled with a passive RFID tag, and otherwise mapping control onto motion of the hand, arm, and fingers. The overall system was fully functional, but as this is an early implementation, it was still very much limited by transmit power and antenna efficiency, due to the constraints on the size of the passive tags. Means of scaling to lower power and smaller size are suggested.",
"fno": "4431a200",
"keywords": [
"HCI",
"Wireless Finger Tracker",
"Wearable Computing",
"Passive Sensing",
"RFID"
],
"authors": [
{
"affiliation": null,
"fullName": "Rachel Bainbridge",
"givenName": "Rachel",
"surname": "Bainbridge",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Joseph A. Paradiso",
"givenName": "Joseph A.",
"surname": "Paradiso",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "bsn",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2011-05-01T00:00:00",
"pubType": "proceedings",
"pages": "200-204",
"year": "2011",
"issn": null,
"isbn": "978-0-7695-4431-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4431a194",
"articleId": "12OmNzV70GV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4431a205",
"articleId": "12OmNC4wtyb",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNyKJiwN",
"title": "2013 IEEE International Conference on Computer Vision Workshops (ICCVW)",
"acronym": "iccvw",
"groupId": "1800041",
"volume": "0",
"displayVolume": "0",
"year": "2013",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNqIQSbm",
"doi": "10.1109/ICCVW.2013.64",
"title": "Wearable Smartphone: Wearable Hybrid Framework for Hand and Foot Gesture Interaction on Smartphone",
"normalizedTitle": "Wearable Smartphone: Wearable Hybrid Framework for Hand and Foot Gesture Interaction on Smartphone",
"abstract": "There is a increasing interest in creating wearable smartphone interaction technologies. A novel smartphone wearable hybrid interaction framework based on mixed low-cost hardware and software is proposed in this work. The software application renders touch-less application graphics and status information using smart phone's screen. Augmented reality image rendering technology is employed for a convenient hand/foot and phone interaction. The users interact with the application using hand/foot gesture motion behind the camera, which trigger the interaction event and generate activity sequences for interactive buffers. The touch-less algorithm provides a core support for hand/foot-gesture interaction by accurately detecting and tracking the hand/foot gesture. Four proof-of-concept application prototypes are developed based on the wearable hybrid framework. The user study evaluation demonstrates the social acceptability of the designed hand/foot gestures and the usability of the applications on proposed wearable hybrid framework with touch-less interaction approach.",
"abstracts": [
{
"abstractType": "Regular",
"content": "There is a increasing interest in creating wearable smartphone interaction technologies. A novel smartphone wearable hybrid interaction framework based on mixed low-cost hardware and software is proposed in this work. The software application renders touch-less application graphics and status information using smart phone's screen. Augmented reality image rendering technology is employed for a convenient hand/foot and phone interaction. The users interact with the application using hand/foot gesture motion behind the camera, which trigger the interaction event and generate activity sequences for interactive buffers. The touch-less algorithm provides a core support for hand/foot-gesture interaction by accurately detecting and tracking the hand/foot gesture. Four proof-of-concept application prototypes are developed based on the wearable hybrid framework. The user study evaluation demonstrates the social acceptability of the designed hand/foot gestures and the usability of the applications on proposed wearable hybrid framework with touch-less interaction approach.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "There is a increasing interest in creating wearable smartphone interaction technologies. A novel smartphone wearable hybrid interaction framework based on mixed low-cost hardware and software is proposed in this work. The software application renders touch-less application graphics and status information using smart phone's screen. Augmented reality image rendering technology is employed for a convenient hand/foot and phone interaction. The users interact with the application using hand/foot gesture motion behind the camera, which trigger the interaction event and generate activity sequences for interactive buffers. The touch-less algorithm provides a core support for hand/foot-gesture interaction by accurately detecting and tracking the hand/foot gesture. Four proof-of-concept application prototypes are developed based on the wearable hybrid framework. The user study evaluation demonstrates the social acceptability of the designed hand/foot gestures and the usability of the applications on proposed wearable hybrid framework with touch-less interaction approach.",
"fno": "3022a436",
"keywords": [
"Foot",
"Games",
"Cameras",
"Smart Phones",
"Mobile Communication",
"Hardware",
"Real Time Systems",
"Gesture Interaction",
"Wearable Smartphone",
"Touchless",
"Computer Vision"
],
"authors": [
{
"affiliation": null,
"fullName": "Zhihan Lv",
"givenName": "Zhihan",
"surname": "Lv",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "iccvw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2013-12-01T00:00:00",
"pubType": "proceedings",
"pages": "436-443",
"year": "2013",
"issn": null,
"isbn": "978-1-4799-3022-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "3022a428",
"articleId": "12OmNvAiSAe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "3022a444",
"articleId": "12OmNvIfDOS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNAsTgX3",
"title": "2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2009",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvT2p2e",
"doi": "10.1109/CVPRW.2009.5204355",
"title": "High level activity recognition using low resolution wearable vision",
"normalizedTitle": "High level activity recognition using low resolution wearable vision",
"abstract": "This paper presents a system aimed to serve as the enabling platform for a wearable assistant. The method observes manipulations from a wearable camera and classifies activities from roughly stabilized low resolution images (160 × 120 pixels) with the help of a 3-level Dynamic Bayesian Network and adapted temporal templates. Our motivation is to explore robust but computationally inexpensive visual methods to perform as much activity inference as possible without resorting to more complex object or hand detectors. The description of the method and results obtained are presented, as well as the motivation for further work in the area of wearable visual sensing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper presents a system aimed to serve as the enabling platform for a wearable assistant. The method observes manipulations from a wearable camera and classifies activities from roughly stabilized low resolution images (160 × 120 pixels) with the help of a 3-level Dynamic Bayesian Network and adapted temporal templates. Our motivation is to explore robust but computationally inexpensive visual methods to perform as much activity inference as possible without resorting to more complex object or hand detectors. The description of the method and results obtained are presented, as well as the motivation for further work in the area of wearable visual sensing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper presents a system aimed to serve as the enabling platform for a wearable assistant. The method observes manipulations from a wearable camera and classifies activities from roughly stabilized low resolution images (160 × 120 pixels) with the help of a 3-level Dynamic Bayesian Network and adapted temporal templates. Our motivation is to explore robust but computationally inexpensive visual methods to perform as much activity inference as possible without resorting to more complex object or hand detectors. The description of the method and results obtained are presented, as well as the motivation for further work in the area of wearable visual sensing.",
"fno": "05204355",
"keywords": [
"Belief Networks",
"Cameras",
"Computer Vision",
"Image Classification",
"Image Registration",
"Image Resolution",
"Object Recognition",
"Wearable Computers",
"High Level Activity Recognition",
"Low Resolution Wearable Vision",
"Wearable Camera",
"Activity Classification",
"Low Resolution Image Registration",
"3 Level Dynamic Bayesian Network",
"Temporal Template",
"Activity Inference",
"Cameras",
"Wearable Sensors",
"Bayesian Methods",
"Manipulator Dynamics",
"Robustness",
"Sensor Arrays",
"Image Resolution",
"Pixel",
"Object Detection",
"Wearable Computers"
],
"authors": [
{
"affiliation": "Department of Computer Science, University of Bristol, UK",
"fullName": "Sudeep Sundaram",
"givenName": "Sudeep",
"surname": "Sundaram",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Department of Computer Science, University of Bristol, UK",
"fullName": "Walterio W. Mayol Cuevas",
"givenName": "Walterio W.",
"surname": "Mayol Cuevas",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2009-06-01T00:00:00",
"pubType": "proceedings",
"pages": "25-32",
"year": "2009",
"issn": "2160-7508",
"isbn": "978-1-4244-3994-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "05204354",
"articleId": "12OmNC2xhzk",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "05204353",
"articleId": "12OmNzahc2V",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzSh1bk",
"title": "Proceedings. Third International Conference on Image and Graphics",
"acronym": "icig",
"groupId": "1001790",
"volume": "0",
"displayVolume": "0",
"year": "2004",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNvlxJpQ",
"doi": "10.1109/ICIG.2004.24",
"title": "A robust hand tracking and gesture recognition method for wearable visual interfaces and its applications",
"normalizedTitle": "A robust hand tracking and gesture recognition method for wearable visual interfaces and its applications",
"abstract": "Gesture-based interface is one of the most promising modes of human-computer interaction for wearable computers. This paper proposes a robust hand tracking and gesture recognition method for wearable visual interfaces, which is an extension of ICONDENSATION algorithm. The method integrates shape and depth information for robust hand tracking. Gesture recognition is realized through the maximum posterior estimation of several pre-defined gestures. The experimental results show that the proposed method works well in dynamic and complex background. Several promising applications in wearable computers are also discussed.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Gesture-based interface is one of the most promising modes of human-computer interaction for wearable computers. This paper proposes a robust hand tracking and gesture recognition method for wearable visual interfaces, which is an extension of ICONDENSATION algorithm. The method integrates shape and depth information for robust hand tracking. Gesture recognition is realized through the maximum posterior estimation of several pre-defined gestures. The experimental results show that the proposed method works well in dynamic and complex background. Several promising applications in wearable computers are also discussed.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Gesture-based interface is one of the most promising modes of human-computer interaction for wearable computers. This paper proposes a robust hand tracking and gesture recognition method for wearable visual interfaces, which is an extension of ICONDENSATION algorithm. The method integrates shape and depth information for robust hand tracking. Gesture recognition is realized through the maximum posterior estimation of several pre-defined gestures. The experimental results show that the proposed method works well in dynamic and complex background. Several promising applications in wearable computers are also discussed.",
"fno": "01410485",
"keywords": [
"Robustness",
"Wearable Computers",
"Application Software",
"Tracking",
"Shape Measurement",
"Wireless LAN",
"Skin",
"Image Segmentation",
"Cameras",
"Area Measurement"
],
"authors": [
{
"affiliation": "Dept. of Comput. Sci. & Eng., Beijing Inst. of Technol., China",
"fullName": "Yang Liu",
"givenName": null,
"surname": "Yang Liu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Dept. of Comput. Sci. & Eng., Beijing Inst. of Technol., China",
"fullName": "Yunde Jia",
"givenName": null,
"surname": "Yunde Jia",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icig",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2004-01-01T00:00:00",
"pubType": "proceedings",
"pages": "472-475",
"year": "2004",
"issn": null,
"isbn": "0-7695-2244-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "01410496",
"articleId": "12OmNwCaCxX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "01410486",
"articleId": "12OmNzw8jdD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNzcxYUb",
"title": "2012 IEEE International Conference on Multimedia and Expo",
"acronym": "icme",
"groupId": "1000477",
"volume": "0",
"displayVolume": "0",
"year": "2012",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNzlly47",
"doi": "10.1109/ICME.2012.48",
"title": "Real-Time Hand Pose Estimation from RGB-D Sensor",
"normalizedTitle": "Real-Time Hand Pose Estimation from RGB-D Sensor",
"abstract": "Hand pose estimation in cluttered environment is always challenging. In this paper, we address the problem of hand pose estimation from RGB-D sensor. To achieve robust real-time usability, we first design a data acquisition strategy, using a color glove to label different hand parts, and collect a new training data set. Then a novel hand pose estimation framework is presented, so that feature fusion drives hand localization and hand parts classification. Moreover, instead of using articulated model, a simplified and efficient 3D contour model is designed to assist real-time implementation, which does not require a large amount of training data. Experiments show that our approach can handle real-time hand interaction in a desktop environments with cluttered background.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hand pose estimation in cluttered environment is always challenging. In this paper, we address the problem of hand pose estimation from RGB-D sensor. To achieve robust real-time usability, we first design a data acquisition strategy, using a color glove to label different hand parts, and collect a new training data set. Then a novel hand pose estimation framework is presented, so that feature fusion drives hand localization and hand parts classification. Moreover, instead of using articulated model, a simplified and efficient 3D contour model is designed to assist real-time implementation, which does not require a large amount of training data. Experiments show that our approach can handle real-time hand interaction in a desktop environments with cluttered background.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hand pose estimation in cluttered environment is always challenging. In this paper, we address the problem of hand pose estimation from RGB-D sensor. To achieve robust real-time usability, we first design a data acquisition strategy, using a color glove to label different hand parts, and collect a new training data set. Then a novel hand pose estimation framework is presented, so that feature fusion drives hand localization and hand parts classification. Moreover, instead of using articulated model, a simplified and efficient 3D contour model is designed to assist real-time implementation, which does not require a large amount of training data. Experiments show that our approach can handle real-time hand interaction in a desktop environments with cluttered background.",
"fno": "4711a705",
"keywords": [
"Estimation",
"Shape",
"Training",
"Accuracy",
"Cameras",
"Image Color Analysis",
"Solid Modeling",
"Contour Model",
"Hand Pose Estimation",
"RGB D",
"Feature Fusion"
],
"authors": [
{
"affiliation": null,
"fullName": "Yuan Yao",
"givenName": "Yuan",
"surname": "Yao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": null,
"fullName": "Yun Fu",
"givenName": "Yun",
"surname": "Fu",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "icme",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2012-07-01T00:00:00",
"pubType": "proceedings",
"pages": "705-710",
"year": "2012",
"issn": "1945-7871",
"isbn": "978-1-4673-1659-0",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "4711a699",
"articleId": "12OmNBE7MtC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "4711a711",
"articleId": "12OmNyGbIiN",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1Hcn4kmUyR2",
"title": "2022 Global Conference on Robotics, Artificial Intelligence and Information Technology (GCRAIT)",
"acronym": "gcrait",
"groupId": "1847864",
"volume": "0",
"displayVolume": "0",
"year": "2022",
"__typename": "ProceedingType"
},
"article": {
"id": "1Hcnbghxs5i",
"doi": "10.1109/GCRAIT55928.2022.00056",
"title": "Artificial Intelligence Hand Spatial Position Predictor Based on Data Gloves and Jetson Xavier NX",
"normalizedTitle": "Artificial Intelligence Hand Spatial Position Predictor Based on Data Gloves and Jetson Xavier NX",
"abstract": "Based on the functional design of the hand rehabilitation index evaluator for patients with clinical hemiplegia and hand dysfunction, as well as the design based on the function of gesture recognition and sign language translation for the deaf, this paper applies a wireless wearable data glove based on inertial sensors, combined with the current NVIDIA most advanced edge computing development platform Jetson Xavier conducts artificial intelligence development and applications. A database of sensor data and hand joint angle and spatial position information is established by using the sensor data obtained by the data glove. A recurrent neural network with continuous prediction function is then trained using deep learning based on this database and deployed on the Jetson Xavier development platform. Finally, the sensor data is transmitted to the platform through the wireless communication module for real-time reasoning based on deep learning neural network, and the role of artificial intelligence in predicting the angle and spatial position of hand joints is realized.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Based on the functional design of the hand rehabilitation index evaluator for patients with clinical hemiplegia and hand dysfunction, as well as the design based on the function of gesture recognition and sign language translation for the deaf, this paper applies a wireless wearable data glove based on inertial sensors, combined with the current NVIDIA most advanced edge computing development platform Jetson Xavier conducts artificial intelligence development and applications. A database of sensor data and hand joint angle and spatial position information is established by using the sensor data obtained by the data glove. A recurrent neural network with continuous prediction function is then trained using deep learning based on this database and deployed on the Jetson Xavier development platform. Finally, the sensor data is transmitted to the platform through the wireless communication module for real-time reasoning based on deep learning neural network, and the role of artificial intelligence in predicting the angle and spatial position of hand joints is realized.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Based on the functional design of the hand rehabilitation index evaluator for patients with clinical hemiplegia and hand dysfunction, as well as the design based on the function of gesture recognition and sign language translation for the deaf, this paper applies a wireless wearable data glove based on inertial sensors, combined with the current NVIDIA most advanced edge computing development platform Jetson Xavier conducts artificial intelligence development and applications. A database of sensor data and hand joint angle and spatial position information is established by using the sensor data obtained by the data glove. A recurrent neural network with continuous prediction function is then trained using deep learning based on this database and deployed on the Jetson Xavier development platform. Finally, the sensor data is transmitted to the platform through the wireless communication module for real-time reasoning based on deep learning neural network, and the role of artificial intelligence in predicting the angle and spatial position of hand joints is realized.",
"fno": "819200a232",
"keywords": [
"Data Gloves",
"Deep Learning Artificial Intelligence",
"Distributed Processing",
"Gesture Recognition",
"Patient Diagnosis",
"Patient Rehabilitation",
"Recurrent Neural Nets",
"Sensors",
"Artificial Intelligence Hand Spatial Position Predictor",
"Hand Rehabilitation Index Evaluator",
"Clinical Hemiplegia",
"Hand Dysfunction",
"Gesture Recognition",
"Wireless Wearable Data Glove",
"Inertial Sensors",
"NVIDIA",
"Sensor Data",
"Jetson Xavier Development Platform",
"Deep Learning Neural Network",
"Edge Computing",
"Recurrent Neural Network",
"Wireless Communication Module",
"Wireless Communication",
"Deep Learning",
"Wireless Sensor Networks",
"Wearable Computers",
"Gesture Recognition",
"Data Gloves",
"Robot Sensing Systems",
"Data Gloves",
"Artificial Intelligence",
"Jetson Xavier",
"Inertial Sensors",
"Wireless Communication"
],
"authors": [
{
"affiliation": "School of Information Science and Engineering, Southeast University,Nanjing,China",
"fullName": "Yueqi Pan",
"givenName": "Yueqi",
"surname": "Pan",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Science and Engineering, Southeast University,Nanjing,China",
"fullName": "Wanzhu Wang",
"givenName": "Wanzhu",
"surname": "Wang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Science and Engineering, Southeast University,Nanjing,China",
"fullName": "Pengxiong Zhang",
"givenName": "Pengxiong",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "School of Information Science and Engineering, Southeast University,Nanjing,China",
"fullName": "Yuelong Liang",
"givenName": "Yuelong",
"surname": "Liang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Radio Frequency and Optoelectronic Integrated Circuits, Southeast University,Nanjing,China",
"fullName": "Fei Li",
"givenName": "Fei",
"surname": "Li",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Institute of Radio Frequency and Optoelectronic Integrated Circuits, Southeast University,Nanjing,China",
"fullName": "Haipeng Wang",
"givenName": "Haipeng",
"surname": "Wang",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "gcrait",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2022-07-01T00:00:00",
"pubType": "proceedings",
"pages": "232-236",
"year": "2022",
"issn": null,
"isbn": "978-1-6654-8192-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "819200a225",
"articleId": "1Hcnk90tnG0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "819200a237",
"articleId": "1HcncaXV5w4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1cI6akLvAuQ",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"acronym": "vr",
"groupId": "1000791",
"volume": "0",
"displayVolume": "0",
"year": "2019",
"__typename": "ProceedingType"
},
"article": {
"id": "1cJ1ahxGiWc",
"doi": "10.1109/VR.2019.8798296",
"title": "Development of Sensitive Glove Type Wearable Robot System",
"normalizedTitle": "Development of Sensitive Glove Type Wearable Robot System",
"abstract": "In this paper, a sensitive glove type wearable robot is designed, and its remote-control system is developed for operating a robot hand remotely. By sensing the motions of human fingers through pressure sensors in the glove and sending the information to the robot hand, the robot hand can accurately repeat the motions of users.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, a sensitive glove type wearable robot is designed, and its remote-control system is developed for operating a robot hand remotely. By sensing the motions of human fingers through pressure sensors in the glove and sending the information to the robot hand, the robot hand can accurately repeat the motions of users.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, a sensitive glove type wearable robot is designed, and its remote-control system is developed for operating a robot hand remotely. By sensing the motions of human fingers through pressure sensors in the glove and sending the information to the robot hand, the robot hand can accurately repeat the motions of users.",
"fno": "08798296",
"keywords": [
"Control Engineering Computing",
"Data Gloves",
"Dexterous Manipulators",
"Pressure Sensors",
"Telerobotics",
"Wearable Robots",
"Sensitive Glove Type Wearable Robot System",
"Remote Control System",
"Robot Hand",
"Human Fingers",
"Pressure Sensors",
"Robot Sensing Systems",
"Wearable Robots",
"Immune System",
"Pressure Sensors",
"Monitoring",
"Wearable Sensors",
"Human Augmentation",
"Glove Type Wearable Robot",
"Remote Control System"
],
"authors": [
{
"affiliation": "Kanagawa University",
"fullName": "Bin Zhang",
"givenName": "Bin",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanagawa University",
"fullName": "Atsufumi Suzuki",
"givenName": "Atsufumi",
"surname": "Suzuki",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Kanagawa University",
"fullName": "Hunok Lim",
"givenName": "Hunok",
"surname": "Lim",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "vr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2019-03-01T00:00:00",
"pubType": "proceedings",
"pages": "1581-1582",
"year": "2019",
"issn": null,
"isbn": "978-1-7281-1377-7",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "08798054",
"articleId": "1cJ17KUNi12",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "08798101",
"articleId": "1cJ0RtUtRgk",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1qpzz6dhLLq",
"title": "2020 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)",
"acronym": "aivr",
"groupId": "1830004",
"volume": "0",
"displayVolume": "0",
"year": "2020",
"__typename": "ProceedingType"
},
"article": {
"id": "1qpzzXysef6",
"doi": "10.1109/AIVR50618.2020.00079",
"title": "A Preliminary Investigation into a Deep Learning Implementation for Hand Tracking on Mobile Devices",
"normalizedTitle": "A Preliminary Investigation into a Deep Learning Implementation for Hand Tracking on Mobile Devices",
"abstract": "Hand tracking is an essential component of computer graphics and human-computer interaction applications. The use of RGB camera without specific hardware and sensors (e.g., depth cameras) allows developing solutions for a plethora of devices and platforms. Although various methods were proposed, hand tracking from a single RGB camera is still a challenging research area due to occlusions, complex backgrounds, and various hand poses and gestures. We present a mobile application for 2D hand tracking from RGB images captured by the smartphone camera. The images are processed by a deep neural network, modified specifically to tackle this task and run on mobile devices, looking for a compromise between performance and computational time. Network output is used to show a 2D skeleton on the user's hand. We tested our system on several scenarios, showing an interactive hand tracking level and achieving promising results in the case of variable brightness and backgrounds and small occlusions.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Hand tracking is an essential component of computer graphics and human-computer interaction applications. The use of RGB camera without specific hardware and sensors (e.g., depth cameras) allows developing solutions for a plethora of devices and platforms. Although various methods were proposed, hand tracking from a single RGB camera is still a challenging research area due to occlusions, complex backgrounds, and various hand poses and gestures. We present a mobile application for 2D hand tracking from RGB images captured by the smartphone camera. The images are processed by a deep neural network, modified specifically to tackle this task and run on mobile devices, looking for a compromise between performance and computational time. Network output is used to show a 2D skeleton on the user's hand. We tested our system on several scenarios, showing an interactive hand tracking level and achieving promising results in the case of variable brightness and backgrounds and small occlusions.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Hand tracking is an essential component of computer graphics and human-computer interaction applications. The use of RGB camera without specific hardware and sensors (e.g., depth cameras) allows developing solutions for a plethora of devices and platforms. Although various methods were proposed, hand tracking from a single RGB camera is still a challenging research area due to occlusions, complex backgrounds, and various hand poses and gestures. We present a mobile application for 2D hand tracking from RGB images captured by the smartphone camera. The images are processed by a deep neural network, modified specifically to tackle this task and run on mobile devices, looking for a compromise between performance and computational time. Network output is used to show a 2D skeleton on the user's hand. We tested our system on several scenarios, showing an interactive hand tracking level and achieving promising results in the case of variable brightness and backgrounds and small occlusions.",
"fno": "746300a380",
"keywords": [
"Cameras",
"Deep Learning Artificial Intelligence",
"Gesture Recognition",
"Graphical User Interfaces",
"Human Computer Interaction",
"Image Colour Analysis",
"Image Sensors",
"Interactive Systems",
"Mobile Computing",
"Object Tracking",
"Pose Estimation",
"Smart Phones",
"Depth Cameras",
"Single RGB Camera",
"Hand Poses",
"Mobile Application",
"RGB Images",
"Smartphone Camera",
"Deep Neural Network",
"Mobile Devices",
"Computational Time",
"Interactive Hand Tracking Level",
"Deep Learning Implementation",
"Computer Graphics",
"Human Computer Interaction Applications",
"2 D Hand Tracking",
"Hand Gestures",
"Image Processing",
"2 D Skeleton",
"Two Dimensional Displays",
"Cameras",
"Three Dimensional Displays",
"Neural Networks",
"Heating Systems",
"Computational Modeling",
"Performance Evaluation",
"Deep Learning",
"Human Computer Interaction",
"Image Processing",
"Hand Tracking"
],
"authors": [
{
"affiliation": "University of Basilicata,Department of Mathematics, Computer Science, and Economics,Potenza,Italy,85100",
"fullName": "Monica Gruosso",
"givenName": "Monica",
"surname": "Gruosso",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Basilicata,School of Engineering,Potenza,Italy,85100",
"fullName": "Nicola Capece",
"givenName": "Nicola",
"surname": "Capece",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Basilicata,Department of Mathematics, Computer Science, and Economics,Potenza,Italy,85100",
"fullName": "Ugo Erra",
"givenName": "Ugo",
"surname": "Erra",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "University of Basilicata,Department of Mathematics, Computer Science, and Economics,Potenza,Italy,85100",
"fullName": "Francesco Angiolillo",
"givenName": "Francesco",
"surname": "Angiolillo",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "aivr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2020-12-01T00:00:00",
"pubType": "proceedings",
"pages": "380-385",
"year": "2020",
"issn": null,
"isbn": "978-1-7281-7463-1",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "746300a375",
"articleId": "1qpzDhur636",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "746300a386",
"articleId": "1qpzCRIlrJS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yeCSUXkdhu",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"acronym": "ismar",
"groupId": "1000465",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeD59qEabK",
"doi": "10.1109/ISMAR52148.2021.00055",
"title": "Detection-Guided 3D Hand Tracking for Mobile AR Applications",
"normalizedTitle": "Detection-Guided 3D Hand Tracking for Mobile AR Applications",
"abstract": "Interaction using bare hands is experiencing a growing interest in mobile-based Augmented Reality (AR). Existing RGB-based works fail to provide a practical solution to identifying rich details of the hand. In this paper, we present a detection-guided method capable of recovery 3D hand posture with a color camera. The proposed method consists of key-point detectors and 3D pose optimizer. The detectors first locate the 2D hand bounding box and then apply a lightweight network on the hand region to provide a pixel-wise like-hood of hand joints. The optimizer lifts the 3D pose from the estimated 2D joints in a model-fitting manner. To ensure the result plausibly, we encode the hand shape into the objective function. The estimated 3D posture allows flexible hand-to-mobile interaction in AR applications. We extensively evaluate the proposed approach on several challenging public datasets. The experimental results indicate the efficiency and effectiveness of the proposed method.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Interaction using bare hands is experiencing a growing interest in mobile-based Augmented Reality (AR). Existing RGB-based works fail to provide a practical solution to identifying rich details of the hand. In this paper, we present a detection-guided method capable of recovery 3D hand posture with a color camera. The proposed method consists of key-point detectors and 3D pose optimizer. The detectors first locate the 2D hand bounding box and then apply a lightweight network on the hand region to provide a pixel-wise like-hood of hand joints. The optimizer lifts the 3D pose from the estimated 2D joints in a model-fitting manner. To ensure the result plausibly, we encode the hand shape into the objective function. The estimated 3D posture allows flexible hand-to-mobile interaction in AR applications. We extensively evaluate the proposed approach on several challenging public datasets. The experimental results indicate the efficiency and effectiveness of the proposed method.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Interaction using bare hands is experiencing a growing interest in mobile-based Augmented Reality (AR). Existing RGB-based works fail to provide a practical solution to identifying rich details of the hand. In this paper, we present a detection-guided method capable of recovery 3D hand posture with a color camera. The proposed method consists of key-point detectors and 3D pose optimizer. The detectors first locate the 2D hand bounding box and then apply a lightweight network on the hand region to provide a pixel-wise like-hood of hand joints. The optimizer lifts the 3D pose from the estimated 2D joints in a model-fitting manner. To ensure the result plausibly, we encode the hand shape into the objective function. The estimated 3D posture allows flexible hand-to-mobile interaction in AR applications. We extensively evaluate the proposed approach on several challenging public datasets. The experimental results indicate the efficiency and effectiveness of the proposed method.",
"fno": "015800a386",
"keywords": [
"Augmented Reality",
"Cameras",
"Feature Extraction",
"Gesture Recognition",
"Human Computer Interaction",
"Image Colour Analysis",
"Optimisation",
"Pose Estimation",
"Lightweight Network",
"Hand Region",
"Hand Joints",
"Estimated 2 D Joints",
"Model Fitting Manner",
"Hand Shape",
"Estimated 3 D Posture",
"Hand To Mobile Interaction",
"Detection Guided 3 D",
"Mobile AR Applications",
"Bare Hands",
"Mobile Based Augmented Reality",
"RGB Based Works",
"Rich Details",
"Detection Guided Method Capable",
"Color Camera",
"Key Point Detectors",
"Performance Evaluation",
"Solid Modeling",
"Three Dimensional Displays",
"Shape",
"Detectors",
"Real Time Systems",
"Mobile Handsets",
"Mobile AR",
"3 D Hand Pose Estimation",
"Hand Tracking"
],
"authors": [
{
"affiliation": "OPPO Research Institute,Beijing",
"fullName": "Yunlong Che",
"givenName": "Yunlong",
"surname": "Che",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State Key Laboratory of Virtual Reality Technology and Systems,Beijing",
"fullName": "Yue Qi",
"givenName": "Yue",
"surname": "Qi",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "ismar",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-10-01T00:00:00",
"pubType": "proceedings",
"pages": "386-392",
"year": "2021",
"issn": "1554-7868",
"isbn": "978-1-6654-0158-6",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeD4LjSve0",
"name": "pismar202101580-09583794s1-mm_015800a386.zip",
"size": "69.9 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pismar202101580-09583794s1-mm_015800a386.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "015800a376",
"articleId": "1yeCUnrXwiI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "015800a393",
"articleId": "1yeCVRK9bri",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "1yeHGyRsuys",
"title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"acronym": "cvpr",
"groupId": "1000147",
"volume": "0",
"displayVolume": "0",
"year": "2021",
"__typename": "ProceedingType"
},
"article": {
"id": "1yeJWXqdk3u",
"doi": "10.1109/CVPR46437.2021.01031",
"title": "Model-based 3D Hand Reconstruction via Self-Supervised Learning",
"normalizedTitle": "Model-based 3D Hand Reconstruction via Self-Supervised Learning",
"abstract": "Reconstructing a 3D hand from a single-view RGB image is challenging due to various hand configurations and depth ambiguity. To reliably reconstruct a 3D hand from a monocular image, most state-of-the-art methods heavily rely on 3D annotations at the training stage, but obtaining 3D annotations is expensive. To alleviate reliance on labeled training data, we propose S<sup>2</sup>HAND, a self-supervised 3D hand reconstruction network that can jointly estimate pose, shape, texture, and the camera viewpoint. Specifically, we obtain geometric cues from the input image through easily accessible 2D detected keypoints. To learn an accurate hand reconstruction model from these noisy geometric cues, we utilize the consistency between 2D and 3D representations and propose a set of novel losses to rationalize outputs of the neural network. For the first time, we demonstrate the feasibility of training an accurate 3D hand reconstruction network without relying on manual annotations. Our experiments show that the proposed self-supervised method achieves comparable performance with recent fully-supervised methods. The code is available at https://github.com/TerenceCYJ/S2HAND.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Reconstructing a 3D hand from a single-view RGB image is challenging due to various hand configurations and depth ambiguity. To reliably reconstruct a 3D hand from a monocular image, most state-of-the-art methods heavily rely on 3D annotations at the training stage, but obtaining 3D annotations is expensive. To alleviate reliance on labeled training data, we propose S<sup>2</sup>HAND, a self-supervised 3D hand reconstruction network that can jointly estimate pose, shape, texture, and the camera viewpoint. Specifically, we obtain geometric cues from the input image through easily accessible 2D detected keypoints. To learn an accurate hand reconstruction model from these noisy geometric cues, we utilize the consistency between 2D and 3D representations and propose a set of novel losses to rationalize outputs of the neural network. For the first time, we demonstrate the feasibility of training an accurate 3D hand reconstruction network without relying on manual annotations. Our experiments show that the proposed self-supervised method achieves comparable performance with recent fully-supervised methods. The code is available at https://github.com/TerenceCYJ/S2HAND.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Reconstructing a 3D hand from a single-view RGB image is challenging due to various hand configurations and depth ambiguity. To reliably reconstruct a 3D hand from a monocular image, most state-of-the-art methods heavily rely on 3D annotations at the training stage, but obtaining 3D annotations is expensive. To alleviate reliance on labeled training data, we propose S2HAND, a self-supervised 3D hand reconstruction network that can jointly estimate pose, shape, texture, and the camera viewpoint. Specifically, we obtain geometric cues from the input image through easily accessible 2D detected keypoints. To learn an accurate hand reconstruction model from these noisy geometric cues, we utilize the consistency between 2D and 3D representations and propose a set of novel losses to rationalize outputs of the neural network. For the first time, we demonstrate the feasibility of training an accurate 3D hand reconstruction network without relying on manual annotations. Our experiments show that the proposed self-supervised method achieves comparable performance with recent fully-supervised methods. The code is available at https://github.com/TerenceCYJ/S2HAND.",
"fno": "450900k0446",
"keywords": [
"Cameras",
"Feature Extraction",
"Image Classification",
"Image Colour Analysis",
"Image Motion Analysis",
"Image Reconstruction",
"Image Segmentation",
"Image Sensors",
"Learning Artificial Intelligence",
"Pose Estimation",
"Robot Vision",
"Self Supervised Method",
"Fully Supervised Methods",
"Model Based 3 D Hand Reconstruction",
"Single View RGB Image",
"Hand Configurations",
"Monocular Image",
"Training Stage",
"Labeled Training Data",
"2 HAND",
"Self Supervised 3 D Hand Reconstruction Network",
"Input Image",
"Easily Accessible 2 D Detected Keypoints",
"Accurate Hand Reconstruction Model",
"Noisy Geometric Cues",
"Accurate 3 D Hand Reconstruction Network",
"Manual Annotations",
"Training",
"Solid Modeling",
"Surface Reconstruction",
"Three Dimensional Displays",
"Annotations",
"Shape",
"Training Data"
],
"authors": [
{
"affiliation": "Wuhan University",
"fullName": "Yujin Chen",
"givenName": "Yujin",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wuhan University",
"fullName": "Zhigang Tu",
"givenName": "Zhigang",
"surname": "Tu",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tencent AI Lab",
"fullName": "Di Kang",
"givenName": "Di",
"surname": "Kang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tencent AI Lab",
"fullName": "Linchao Bao",
"givenName": "Linchao",
"surname": "Bao",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tencent",
"fullName": "Ying Zhang",
"givenName": "Ying",
"surname": "Zhang",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Tencent AI Lab",
"fullName": "Xuefei Zhe",
"givenName": "Xuefei",
"surname": "Zhe",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Wuhan University",
"fullName": "Ruizhi Chen",
"givenName": "Ruizhi",
"surname": "Chen",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "State University of New York at Buffalo",
"fullName": "Junsong Yuan",
"givenName": "Junsong",
"surname": "Yuan",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvpr",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2021-06-01T00:00:00",
"pubType": "proceedings",
"pages": "10446-10455",
"year": "2021",
"issn": null,
"isbn": "978-1-6654-4509-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [
{
"id": "1yeJWSwuwSs",
"name": "pcvpr202145090-09577060s1-mm_450900k0446.zip",
"size": "6.82 MB",
"location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09577060s1-mm_450900k0446.zip",
"__typename": "WebExtraType"
}
],
"adjacentArticles": {
"previous": {
"fno": "450900k0435",
"articleId": "1yeKkTgSYzC",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "450900k0456",
"articleId": "1yeI08QZPbO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
{
"proceeding": {
"id": "12OmNxETa7W",
"title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)",
"acronym": "cvprw",
"groupId": "1001809",
"volume": "0",
"displayVolume": "0",
"year": "2006",
"__typename": "ProceedingType"
},
"article": {
"id": "12OmNBkfRla",
"doi": "10.1109/CVPRW.2006.147",
"title": "Octree-Based Topology-Preserving Isosurface Simplification",
"normalizedTitle": "Octree-Based Topology-Preserving Isosurface Simplification",
"abstract": "Isosurface generation has many important applications in medical imaging. Standard isosurface algorithms generate very large triangle meshes when high resolution volumetric data is available, which increases rendering time and storage requirements. Most existing mesh simplification algorithms either do not guarantee non-intersecting meshes or require large cost to prevent self-intersection. We present an octree-based isosurface generation and simplification method that preserves topology, guarantees no selfintersections, and generates a surface that approximates the true isosurface of the underlying data. Rather than focusing on directly simplifying the surface mesh, the new strategy is to generate an octree grid from the original volumetric grid in a way that guarantees these desired properties of the generated isosurface. The new method demonstrates savings of 70% in mesh nodes for real 3D medical data with highly complicated shapes such as the human brain cortex and the pelvis. The simplified surface stays within a userspecified distance bound from the original finest resolution surface, preserves the original topology and has no selfintersections.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Isosurface generation has many important applications in medical imaging. Standard isosurface algorithms generate very large triangle meshes when high resolution volumetric data is available, which increases rendering time and storage requirements. Most existing mesh simplification algorithms either do not guarantee non-intersecting meshes or require large cost to prevent self-intersection. We present an octree-based isosurface generation and simplification method that preserves topology, guarantees no selfintersections, and generates a surface that approximates the true isosurface of the underlying data. Rather than focusing on directly simplifying the surface mesh, the new strategy is to generate an octree grid from the original volumetric grid in a way that guarantees these desired properties of the generated isosurface. The new method demonstrates savings of 70% in mesh nodes for real 3D medical data with highly complicated shapes such as the human brain cortex and the pelvis. The simplified surface stays within a userspecified distance bound from the original finest resolution surface, preserves the original topology and has no selfintersections.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Isosurface generation has many important applications in medical imaging. Standard isosurface algorithms generate very large triangle meshes when high resolution volumetric data is available, which increases rendering time and storage requirements. Most existing mesh simplification algorithms either do not guarantee non-intersecting meshes or require large cost to prevent self-intersection. We present an octree-based isosurface generation and simplification method that preserves topology, guarantees no selfintersections, and generates a surface that approximates the true isosurface of the underlying data. Rather than focusing on directly simplifying the surface mesh, the new strategy is to generate an octree grid from the original volumetric grid in a way that guarantees these desired properties of the generated isosurface. The new method demonstrates savings of 70% in mesh nodes for real 3D medical data with highly complicated shapes such as the human brain cortex and the pelvis. The simplified surface stays within a userspecified distance bound from the original finest resolution surface, preserves the original topology and has no selfintersections.",
"fno": "26460081",
"keywords": [],
"authors": [
{
"affiliation": "Johns Hopkins University, USA",
"fullName": "Ying Bai",
"givenName": "Ying",
"surname": "Bai",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "CMS, Inc., USA",
"fullName": "Xiao Han",
"givenName": "Xiao",
"surname": "Han",
"__typename": "ArticleAuthorType"
},
{
"affiliation": "Johns Hopkins University, USA",
"fullName": "Jerry L. Prince",
"givenName": "Jerry L.",
"surname": "Prince",
"__typename": "ArticleAuthorType"
}
],
"idPrefix": "cvprw",
"isOpenAccess": false,
"showRecommendedArticles": true,
"showBuyMe": true,
"hasPdf": true,
"pubDate": "2006-06-01T00:00:00",
"pubType": "proceedings",
"pages": "81",
"year": "2006",
"issn": null,
"isbn": "0-7695-2646-2",
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"webExtras": [],
"adjacentArticles": {
"previous": {
"fno": "26460080",
"articleId": "12OmNyGbIkh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "26460093",
"articleId": "12OmNBLdKDD",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"recommendedArticles": [],
"articleVideos": []
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.